gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.apis.oapi_v1 import OapiV1
class TestOapiV1(unittest.TestCase):
""" OapiV1 unit test stubs """
def setUp(self):
self.api = lib_openshift.apis.oapi_v1.OapiV1()
def tearDown(self):
pass
def test_create_build(self):
"""
Test case for create_build
create a Build
"""
pass
def test_create_buildconfig(self):
"""
Test case for create_buildconfig
create a BuildConfig
"""
pass
def test_create_clusternetwork(self):
"""
Test case for create_clusternetwork
create a ClusterNetwork
"""
pass
def test_create_clusterpolicie(self):
"""
Test case for create_clusterpolicie
create a ClusterPolicy
"""
pass
def test_create_clusterpolicybinding(self):
"""
Test case for create_clusterpolicybinding
create a ClusterPolicyBinding
"""
pass
def test_create_clusterrole(self):
"""
Test case for create_clusterrole
create a ClusterRole
"""
pass
def test_create_clusterrolebinding(self):
"""
Test case for create_clusterrolebinding
create a ClusterRoleBinding
"""
pass
def test_create_deploymentconfig(self):
"""
Test case for create_deploymentconfig
create a DeploymentConfig
"""
pass
def test_create_deploymentconfigrollback(self):
"""
Test case for create_deploymentconfigrollback
create a DeploymentConfigRollback
"""
pass
def test_create_group(self):
"""
Test case for create_group
create a Group
"""
pass
def test_create_hostsubnet(self):
"""
Test case for create_hostsubnet
create a HostSubnet
"""
pass
def test_create_identitie(self):
"""
Test case for create_identitie
create a Identity
"""
pass
def test_create_image(self):
"""
Test case for create_image
create a Image
"""
pass
def test_create_imagestream(self):
"""
Test case for create_imagestream
create a ImageStream
"""
pass
def test_create_imagestreamimport(self):
"""
Test case for create_imagestreamimport
create a ImageStreamImport
"""
pass
def test_create_imagestreammapping(self):
"""
Test case for create_imagestreammapping
create a ImageStreamMapping
"""
pass
def test_create_localresourceaccessreview(self):
"""
Test case for create_localresourceaccessreview
create a LocalResourceAccessReview
"""
pass
def test_create_localsubjectaccessreview(self):
"""
Test case for create_localsubjectaccessreview
create a LocalSubjectAccessReview
"""
pass
def test_create_namespaced_build(self):
"""
Test case for create_namespaced_build
create a Build
"""
pass
def test_create_namespaced_build_clone(self):
"""
Test case for create_namespaced_build_clone
create clone of a BuildRequest
"""
pass
def test_create_namespaced_buildconfig(self):
"""
Test case for create_namespaced_buildconfig
create a BuildConfig
"""
pass
def test_create_namespaced_buildconfig_instantiate(self):
"""
Test case for create_namespaced_buildconfig_instantiate
create instantiate of a BuildRequest
"""
pass
def test_create_namespaced_buildconfig_instantiatebinary(self):
"""
Test case for create_namespaced_buildconfig_instantiatebinary
connect POST requests to instantiatebinary of BinaryBuildRequestOptions
"""
pass
def test_create_namespaced_buildconfig_webhook(self):
"""
Test case for create_namespaced_buildconfig_webhook
connect POST requests to webhooks of Status
"""
pass
def test_create_namespaced_buildconfig_webhookspath(self):
"""
Test case for create_namespaced_buildconfig_webhookspath
connect POST requests to webhooks of Status
"""
pass
def test_create_namespaced_deploymentconfig(self):
"""
Test case for create_namespaced_deploymentconfig
create a DeploymentConfig
"""
pass
def test_create_namespaced_deploymentconfigrollback(self):
"""
Test case for create_namespaced_deploymentconfigrollback
create a DeploymentConfigRollback
"""
pass
def test_create_namespaced_imagestream(self):
"""
Test case for create_namespaced_imagestream
create a ImageStream
"""
pass
def test_create_namespaced_imagestreamimport(self):
"""
Test case for create_namespaced_imagestreamimport
create a ImageStreamImport
"""
pass
def test_create_namespaced_imagestreammapping(self):
"""
Test case for create_namespaced_imagestreammapping
create a ImageStreamMapping
"""
pass
def test_create_namespaced_localresourceaccessreview(self):
"""
Test case for create_namespaced_localresourceaccessreview
create a LocalResourceAccessReview
"""
pass
def test_create_namespaced_localsubjectaccessreview(self):
"""
Test case for create_namespaced_localsubjectaccessreview
create a LocalSubjectAccessReview
"""
pass
def test_create_namespaced_policie(self):
"""
Test case for create_namespaced_policie
create a Policy
"""
pass
def test_create_namespaced_policybinding(self):
"""
Test case for create_namespaced_policybinding
create a PolicyBinding
"""
pass
def test_create_namespaced_processedtemplate(self):
"""
Test case for create_namespaced_processedtemplate
create a Template
"""
pass
def test_create_namespaced_resourceaccessreview(self):
"""
Test case for create_namespaced_resourceaccessreview
create a ResourceAccessReview
"""
pass
def test_create_namespaced_role(self):
"""
Test case for create_namespaced_role
create a Role
"""
pass
def test_create_namespaced_rolebinding(self):
"""
Test case for create_namespaced_rolebinding
create a RoleBinding
"""
pass
def test_create_namespaced_route(self):
"""
Test case for create_namespaced_route
create a Route
"""
pass
def test_create_namespaced_subjectaccessreview(self):
"""
Test case for create_namespaced_subjectaccessreview
create a SubjectAccessReview
"""
pass
def test_create_namespaced_template(self):
"""
Test case for create_namespaced_template
create a Template
"""
pass
def test_create_netnamespace(self):
"""
Test case for create_netnamespace
create a NetNamespace
"""
pass
def test_create_oauthaccesstoken(self):
"""
Test case for create_oauthaccesstoken
create a OAuthAccessToken
"""
pass
def test_create_oauthauthorizetoken(self):
"""
Test case for create_oauthauthorizetoken
create a OAuthAuthorizeToken
"""
pass
def test_create_oauthclient(self):
"""
Test case for create_oauthclient
create a OAuthClient
"""
pass
def test_create_oauthclientauthorization(self):
"""
Test case for create_oauthclientauthorization
create a OAuthClientAuthorization
"""
pass
def test_create_policie(self):
"""
Test case for create_policie
create a Policy
"""
pass
def test_create_policybinding(self):
"""
Test case for create_policybinding
create a PolicyBinding
"""
pass
def test_create_processedtemplate(self):
"""
Test case for create_processedtemplate
create a Template
"""
pass
def test_create_project(self):
"""
Test case for create_project
create a Project
"""
pass
def test_create_projectrequest(self):
"""
Test case for create_projectrequest
create a ProjectRequest
"""
pass
def test_create_resourceaccessreview(self):
"""
Test case for create_resourceaccessreview
create a ResourceAccessReview
"""
pass
def test_create_role(self):
"""
Test case for create_role
create a Role
"""
pass
def test_create_rolebinding(self):
"""
Test case for create_rolebinding
create a RoleBinding
"""
pass
def test_create_route(self):
"""
Test case for create_route
create a Route
"""
pass
def test_create_subjectaccessreview(self):
"""
Test case for create_subjectaccessreview
create a SubjectAccessReview
"""
pass
def test_create_template(self):
"""
Test case for create_template
create a Template
"""
pass
def test_create_user(self):
"""
Test case for create_user
create a User
"""
pass
def test_create_useridentitymapping(self):
"""
Test case for create_useridentitymapping
create a UserIdentityMapping
"""
pass
def test_delete_clusternetwork(self):
"""
Test case for delete_clusternetwork
delete a ClusterNetwork
"""
pass
def test_delete_clusternetworks(self):
"""
Test case for delete_clusternetworks
delete collection of ClusterNetwork
"""
pass
def test_delete_clusterpolicie(self):
"""
Test case for delete_clusterpolicie
delete a ClusterPolicy
"""
pass
def test_delete_clusterpolicies(self):
"""
Test case for delete_clusterpolicies
delete collection of ClusterPolicy
"""
pass
def test_delete_clusterpolicybinding(self):
"""
Test case for delete_clusterpolicybinding
delete a ClusterPolicyBinding
"""
pass
def test_delete_clusterpolicybindings(self):
"""
Test case for delete_clusterpolicybindings
delete collection of ClusterPolicyBinding
"""
pass
def test_delete_clusterrole(self):
"""
Test case for delete_clusterrole
delete a ClusterRole
"""
pass
def test_delete_clusterrolebinding(self):
"""
Test case for delete_clusterrolebinding
delete a ClusterRoleBinding
"""
pass
def test_delete_group(self):
"""
Test case for delete_group
delete a Group
"""
pass
def test_delete_groups(self):
"""
Test case for delete_groups
delete collection of Group
"""
pass
def test_delete_hostsubnet(self):
"""
Test case for delete_hostsubnet
delete a HostSubnet
"""
pass
def test_delete_hostsubnets(self):
"""
Test case for delete_hostsubnets
delete collection of HostSubnet
"""
pass
def test_delete_identitie(self):
"""
Test case for delete_identitie
delete a Identity
"""
pass
def test_delete_identities(self):
"""
Test case for delete_identities
delete collection of Identity
"""
pass
def test_delete_image(self):
"""
Test case for delete_image
delete a Image
"""
pass
def test_delete_images(self):
"""
Test case for delete_images
delete collection of Image
"""
pass
def test_delete_namespaced_build(self):
"""
Test case for delete_namespaced_build
delete a Build
"""
pass
def test_delete_namespaced_buildconfig(self):
"""
Test case for delete_namespaced_buildconfig
delete a BuildConfig
"""
pass
def test_delete_namespaced_buildconfigs(self):
"""
Test case for delete_namespaced_buildconfigs
delete collection of BuildConfig
"""
pass
def test_delete_namespaced_builds(self):
"""
Test case for delete_namespaced_builds
delete collection of Build
"""
pass
def test_delete_namespaced_deploymentconfig(self):
"""
Test case for delete_namespaced_deploymentconfig
delete a DeploymentConfig
"""
pass
def test_delete_namespaced_deploymentconfigs(self):
"""
Test case for delete_namespaced_deploymentconfigs
delete collection of DeploymentConfig
"""
pass
def test_delete_namespaced_imagestream(self):
"""
Test case for delete_namespaced_imagestream
delete a ImageStream
"""
pass
def test_delete_namespaced_imagestreams(self):
"""
Test case for delete_namespaced_imagestreams
delete collection of ImageStream
"""
pass
def test_delete_namespaced_imagestreamtag(self):
"""
Test case for delete_namespaced_imagestreamtag
delete a ImageStreamTag
"""
pass
def test_delete_namespaced_policie(self):
"""
Test case for delete_namespaced_policie
delete a Policy
"""
pass
def test_delete_namespaced_policies(self):
"""
Test case for delete_namespaced_policies
delete collection of Policy
"""
pass
def test_delete_namespaced_policybinding(self):
"""
Test case for delete_namespaced_policybinding
delete a PolicyBinding
"""
pass
def test_delete_namespaced_policybindings(self):
"""
Test case for delete_namespaced_policybindings
delete collection of PolicyBinding
"""
pass
def test_delete_namespaced_role(self):
"""
Test case for delete_namespaced_role
delete a Role
"""
pass
def test_delete_namespaced_rolebinding(self):
"""
Test case for delete_namespaced_rolebinding
delete a RoleBinding
"""
pass
def test_delete_namespaced_route(self):
"""
Test case for delete_namespaced_route
delete a Route
"""
pass
def test_delete_namespaced_routes(self):
"""
Test case for delete_namespaced_routes
delete collection of Route
"""
pass
def test_delete_namespaced_template(self):
"""
Test case for delete_namespaced_template
delete a Template
"""
pass
def test_delete_namespaced_templates(self):
"""
Test case for delete_namespaced_templates
delete collection of Template
"""
pass
def test_delete_netnamespace(self):
"""
Test case for delete_netnamespace
delete a NetNamespace
"""
pass
def test_delete_netnamespaces(self):
"""
Test case for delete_netnamespaces
delete collection of NetNamespace
"""
pass
def test_delete_oauthaccesstoken(self):
"""
Test case for delete_oauthaccesstoken
delete a OAuthAccessToken
"""
pass
def test_delete_oauthauthorizetoken(self):
"""
Test case for delete_oauthauthorizetoken
delete a OAuthAuthorizeToken
"""
pass
def test_delete_oauthclient(self):
"""
Test case for delete_oauthclient
delete a OAuthClient
"""
pass
def test_delete_oauthclientauthorization(self):
"""
Test case for delete_oauthclientauthorization
delete a OAuthClientAuthorization
"""
pass
def test_delete_oauthclientauthorizations(self):
"""
Test case for delete_oauthclientauthorizations
delete collection of OAuthClientAuthorization
"""
pass
def test_delete_oauthclients(self):
"""
Test case for delete_oauthclients
delete collection of OAuthClient
"""
pass
def test_delete_project(self):
"""
Test case for delete_project
delete a Project
"""
pass
def test_delete_user(self):
"""
Test case for delete_user
delete a User
"""
pass
def test_delete_useridentitymapping(self):
"""
Test case for delete_useridentitymapping
delete a UserIdentityMapping
"""
pass
def test_delete_users(self):
"""
Test case for delete_users
delete collection of User
"""
pass
def test_get_clusternetwork(self):
"""
Test case for get_clusternetwork
read the specified ClusterNetwork
"""
pass
def test_get_clusterpolicie(self):
"""
Test case for get_clusterpolicie
read the specified ClusterPolicy
"""
pass
def test_get_clusterpolicybinding(self):
"""
Test case for get_clusterpolicybinding
read the specified ClusterPolicyBinding
"""
pass
def test_get_clusterrole(self):
"""
Test case for get_clusterrole
read the specified ClusterRole
"""
pass
def test_get_clusterrolebinding(self):
"""
Test case for get_clusterrolebinding
read the specified ClusterRoleBinding
"""
pass
def test_get_group(self):
"""
Test case for get_group
read the specified Group
"""
pass
def test_get_hostsubnet(self):
"""
Test case for get_hostsubnet
read the specified HostSubnet
"""
pass
def test_get_identitie(self):
"""
Test case for get_identitie
read the specified Identity
"""
pass
def test_get_image(self):
"""
Test case for get_image
read the specified Image
"""
pass
def test_get_namespaced_build(self):
"""
Test case for get_namespaced_build
read the specified Build
"""
pass
def test_get_namespaced_build_log(self):
"""
Test case for get_namespaced_build_log
read log of the specified BuildLog
"""
pass
def test_get_namespaced_buildconfig(self):
"""
Test case for get_namespaced_buildconfig
read the specified BuildConfig
"""
pass
def test_get_namespaced_deploymentconfig(self):
"""
Test case for get_namespaced_deploymentconfig
read the specified DeploymentConfig
"""
pass
def test_get_namespaced_deploymentconfig_log(self):
"""
Test case for get_namespaced_deploymentconfig_log
read log of the specified DeploymentLog
"""
pass
def test_get_namespaced_deploymentconfig_scale(self):
"""
Test case for get_namespaced_deploymentconfig_scale
read scale of the specified Scale
"""
pass
def test_get_namespaced_generatedeploymentconfig(self):
"""
Test case for get_namespaced_generatedeploymentconfig
read the specified DeploymentConfig
"""
pass
def test_get_namespaced_imagestream(self):
"""
Test case for get_namespaced_imagestream
read the specified ImageStream
"""
pass
def test_get_namespaced_imagestream_secrets(self):
"""
Test case for get_namespaced_imagestream_secrets
read secrets of the specified SecretList
"""
pass
def test_get_namespaced_imagestreamimage(self):
"""
Test case for get_namespaced_imagestreamimage
read the specified ImageStreamImage
"""
pass
def test_get_namespaced_imagestreamtag(self):
"""
Test case for get_namespaced_imagestreamtag
read the specified ImageStreamTag
"""
pass
def test_get_namespaced_policie(self):
"""
Test case for get_namespaced_policie
read the specified Policy
"""
pass
def test_get_namespaced_policybinding(self):
"""
Test case for get_namespaced_policybinding
read the specified PolicyBinding
"""
pass
def test_get_namespaced_role(self):
"""
Test case for get_namespaced_role
read the specified Role
"""
pass
def test_get_namespaced_rolebinding(self):
"""
Test case for get_namespaced_rolebinding
read the specified RoleBinding
"""
pass
def test_get_namespaced_route(self):
"""
Test case for get_namespaced_route
read the specified Route
"""
pass
def test_get_namespaced_template(self):
"""
Test case for get_namespaced_template
read the specified Template
"""
pass
def test_get_netnamespace(self):
"""
Test case for get_netnamespace
read the specified NetNamespace
"""
pass
def test_get_oauthaccesstoken(self):
"""
Test case for get_oauthaccesstoken
read the specified OAuthAccessToken
"""
pass
def test_get_oauthauthorizetoken(self):
"""
Test case for get_oauthauthorizetoken
read the specified OAuthAuthorizeToken
"""
pass
def test_get_oauthclient(self):
"""
Test case for get_oauthclient
read the specified OAuthClient
"""
pass
def test_get_oauthclientauthorization(self):
"""
Test case for get_oauthclientauthorization
read the specified OAuthClientAuthorization
"""
pass
def test_get_project(self):
"""
Test case for get_project
read the specified Project
"""
pass
def test_get_user(self):
"""
Test case for get_user
read the specified User
"""
pass
def test_get_useridentitymapping(self):
"""
Test case for get_useridentitymapping
read the specified UserIdentityMapping
"""
pass
def test_list(self):
"""
Test case for list
get available resources
"""
pass
def test_list_buildconfigs(self):
"""
Test case for list_buildconfigs
list or watch objects of kind BuildConfig
"""
pass
def test_list_builds(self):
"""
Test case for list_builds
list or watch objects of kind Build
"""
pass
def test_list_clusternetworks(self):
"""
Test case for list_clusternetworks
list or watch objects of kind ClusterNetwork
"""
pass
def test_list_clusterpolicies(self):
"""
Test case for list_clusterpolicies
list or watch objects of kind ClusterPolicy
"""
pass
def test_list_clusterpolicybindings(self):
"""
Test case for list_clusterpolicybindings
list or watch objects of kind ClusterPolicyBinding
"""
pass
def test_list_clusterrolebindings(self):
"""
Test case for list_clusterrolebindings
list objects of kind ClusterRoleBinding
"""
pass
def test_list_clusterroles(self):
"""
Test case for list_clusterroles
list objects of kind ClusterRole
"""
pass
def test_list_deploymentconfigs(self):
"""
Test case for list_deploymentconfigs
list or watch objects of kind DeploymentConfig
"""
pass
def test_list_groups(self):
"""
Test case for list_groups
list or watch objects of kind Group
"""
pass
def test_list_hostsubnets(self):
"""
Test case for list_hostsubnets
list or watch objects of kind HostSubnet
"""
pass
def test_list_identities(self):
"""
Test case for list_identities
list or watch objects of kind Identity
"""
pass
def test_list_images(self):
"""
Test case for list_images
list or watch objects of kind Image
"""
pass
def test_list_imagestreams(self):
"""
Test case for list_imagestreams
list or watch objects of kind ImageStream
"""
pass
def test_list_imagestreamtags(self):
"""
Test case for list_imagestreamtags
list objects of kind ImageStreamTag
"""
pass
def test_list_namespaced_buildconfigs(self):
"""
Test case for list_namespaced_buildconfigs
list or watch objects of kind BuildConfig
"""
pass
def test_list_namespaced_builds(self):
"""
Test case for list_namespaced_builds
list or watch objects of kind Build
"""
pass
def test_list_namespaced_deploymentconfigs(self):
"""
Test case for list_namespaced_deploymentconfigs
list or watch objects of kind DeploymentConfig
"""
pass
def test_list_namespaced_imagestreams(self):
"""
Test case for list_namespaced_imagestreams
list or watch objects of kind ImageStream
"""
pass
def test_list_namespaced_imagestreamtags(self):
"""
Test case for list_namespaced_imagestreamtags
list objects of kind ImageStreamTag
"""
pass
def test_list_namespaced_policies(self):
"""
Test case for list_namespaced_policies
list or watch objects of kind Policy
"""
pass
def test_list_namespaced_policybindings(self):
"""
Test case for list_namespaced_policybindings
list or watch objects of kind PolicyBinding
"""
pass
def test_list_namespaced_rolebindings(self):
"""
Test case for list_namespaced_rolebindings
list objects of kind RoleBinding
"""
pass
def test_list_namespaced_roles(self):
"""
Test case for list_namespaced_roles
list objects of kind Role
"""
pass
def test_list_namespaced_routes(self):
"""
Test case for list_namespaced_routes
list or watch objects of kind Route
"""
pass
def test_list_namespaced_templates(self):
"""
Test case for list_namespaced_templates
list or watch objects of kind Template
"""
pass
def test_list_netnamespaces(self):
"""
Test case for list_netnamespaces
list or watch objects of kind NetNamespace
"""
pass
def test_list_oauthaccesstokens(self):
"""
Test case for list_oauthaccesstokens
list objects of kind OAuthAccessToken
"""
pass
def test_list_oauthauthorizetokens(self):
"""
Test case for list_oauthauthorizetokens
list objects of kind OAuthAuthorizeToken
"""
pass
def test_list_oauthclientauthorizations(self):
"""
Test case for list_oauthclientauthorizations
list or watch objects of kind OAuthClientAuthorization
"""
pass
def test_list_oauthclients(self):
"""
Test case for list_oauthclients
list or watch objects of kind OAuthClient
"""
pass
def test_list_policies(self):
"""
Test case for list_policies
list or watch objects of kind Policy
"""
pass
def test_list_policybindings(self):
"""
Test case for list_policybindings
list or watch objects of kind PolicyBinding
"""
pass
def test_list_projectrequests(self):
"""
Test case for list_projectrequests
list objects of kind ProjectRequest
"""
pass
def test_list_projects(self):
"""
Test case for list_projects
list objects of kind Project
"""
pass
def test_list_rolebindings(self):
"""
Test case for list_rolebindings
list objects of kind RoleBinding
"""
pass
def test_list_roles(self):
"""
Test case for list_roles
list objects of kind Role
"""
pass
def test_list_routes(self):
"""
Test case for list_routes
list or watch objects of kind Route
"""
pass
def test_list_templates(self):
"""
Test case for list_templates
list or watch objects of kind Template
"""
pass
def test_list_users(self):
"""
Test case for list_users
list or watch objects of kind User
"""
pass
def test_patch_clusternetwork(self):
"""
Test case for patch_clusternetwork
partially update the specified ClusterNetwork
"""
pass
def test_patch_clusterpolicie(self):
"""
Test case for patch_clusterpolicie
partially update the specified ClusterPolicy
"""
pass
def test_patch_clusterpolicybinding(self):
"""
Test case for patch_clusterpolicybinding
partially update the specified ClusterPolicyBinding
"""
pass
def test_patch_clusterrole(self):
"""
Test case for patch_clusterrole
partially update the specified ClusterRole
"""
pass
def test_patch_clusterrolebinding(self):
"""
Test case for patch_clusterrolebinding
partially update the specified ClusterRoleBinding
"""
pass
def test_patch_group(self):
"""
Test case for patch_group
partially update the specified Group
"""
pass
def test_patch_hostsubnet(self):
"""
Test case for patch_hostsubnet
partially update the specified HostSubnet
"""
pass
def test_patch_identitie(self):
"""
Test case for patch_identitie
partially update the specified Identity
"""
pass
def test_patch_image(self):
"""
Test case for patch_image
partially update the specified Image
"""
pass
def test_patch_namespaced_build(self):
"""
Test case for patch_namespaced_build
partially update the specified Build
"""
pass
def test_patch_namespaced_buildconfig(self):
"""
Test case for patch_namespaced_buildconfig
partially update the specified BuildConfig
"""
pass
def test_patch_namespaced_deploymentconfig(self):
"""
Test case for patch_namespaced_deploymentconfig
partially update the specified DeploymentConfig
"""
pass
def test_patch_namespaced_deploymentconfig_scale(self):
"""
Test case for patch_namespaced_deploymentconfig_scale
partially update scale of the specified Scale
"""
pass
def test_patch_namespaced_imagestream(self):
"""
Test case for patch_namespaced_imagestream
partially update the specified ImageStream
"""
pass
def test_patch_namespaced_imagestreamtag(self):
"""
Test case for patch_namespaced_imagestreamtag
partially update the specified ImageStreamTag
"""
pass
def test_patch_namespaced_policie(self):
"""
Test case for patch_namespaced_policie
partially update the specified Policy
"""
pass
def test_patch_namespaced_policybinding(self):
"""
Test case for patch_namespaced_policybinding
partially update the specified PolicyBinding
"""
pass
def test_patch_namespaced_role(self):
"""
Test case for patch_namespaced_role
partially update the specified Role
"""
pass
def test_patch_namespaced_rolebinding(self):
"""
Test case for patch_namespaced_rolebinding
partially update the specified RoleBinding
"""
pass
def test_patch_namespaced_route(self):
"""
Test case for patch_namespaced_route
partially update the specified Route
"""
pass
def test_patch_namespaced_template(self):
"""
Test case for patch_namespaced_template
partially update the specified Template
"""
pass
def test_patch_netnamespace(self):
"""
Test case for patch_netnamespace
partially update the specified NetNamespace
"""
pass
def test_patch_oauthclient(self):
"""
Test case for patch_oauthclient
partially update the specified OAuthClient
"""
pass
def test_patch_oauthclientauthorization(self):
"""
Test case for patch_oauthclientauthorization
partially update the specified OAuthClientAuthorization
"""
pass
def test_patch_project(self):
"""
Test case for patch_project
partially update the specified Project
"""
pass
def test_patch_user(self):
"""
Test case for patch_user
partially update the specified User
"""
pass
def test_patch_useridentitymapping(self):
"""
Test case for patch_useridentitymapping
partially update the specified UserIdentityMapping
"""
pass
def test_replace_clusternetwork(self):
"""
Test case for replace_clusternetwork
replace the specified ClusterNetwork
"""
pass
def test_replace_clusterpolicie(self):
"""
Test case for replace_clusterpolicie
replace the specified ClusterPolicy
"""
pass
def test_replace_clusterpolicybinding(self):
"""
Test case for replace_clusterpolicybinding
replace the specified ClusterPolicyBinding
"""
pass
def test_replace_clusterrole(self):
"""
Test case for replace_clusterrole
replace the specified ClusterRole
"""
pass
def test_replace_clusterrolebinding(self):
"""
Test case for replace_clusterrolebinding
replace the specified ClusterRoleBinding
"""
pass
def test_replace_group(self):
"""
Test case for replace_group
replace the specified Group
"""
pass
def test_replace_hostsubnet(self):
"""
Test case for replace_hostsubnet
replace the specified HostSubnet
"""
pass
def test_replace_identitie(self):
"""
Test case for replace_identitie
replace the specified Identity
"""
pass
def test_replace_image(self):
"""
Test case for replace_image
replace the specified Image
"""
pass
def test_replace_namespaced_build(self):
"""
Test case for replace_namespaced_build
replace the specified Build
"""
pass
def test_replace_namespaced_build_details(self):
"""
Test case for replace_namespaced_build_details
replace details of the specified Build
"""
pass
def test_replace_namespaced_buildconfig(self):
"""
Test case for replace_namespaced_buildconfig
replace the specified BuildConfig
"""
pass
def test_replace_namespaced_deploymentconfig(self):
"""
Test case for replace_namespaced_deploymentconfig
replace the specified DeploymentConfig
"""
pass
def test_replace_namespaced_deploymentconfig_scale(self):
"""
Test case for replace_namespaced_deploymentconfig_scale
replace scale of the specified Scale
"""
pass
def test_replace_namespaced_imagestream(self):
"""
Test case for replace_namespaced_imagestream
replace the specified ImageStream
"""
pass
def test_replace_namespaced_imagestream_status(self):
"""
Test case for replace_namespaced_imagestream_status
replace status of the specified ImageStream
"""
pass
def test_replace_namespaced_imagestreamtag(self):
"""
Test case for replace_namespaced_imagestreamtag
replace the specified ImageStreamTag
"""
pass
def test_replace_namespaced_policie(self):
"""
Test case for replace_namespaced_policie
replace the specified Policy
"""
pass
def test_replace_namespaced_policybinding(self):
"""
Test case for replace_namespaced_policybinding
replace the specified PolicyBinding
"""
pass
def test_replace_namespaced_role(self):
"""
Test case for replace_namespaced_role
replace the specified Role
"""
pass
def test_replace_namespaced_rolebinding(self):
"""
Test case for replace_namespaced_rolebinding
replace the specified RoleBinding
"""
pass
def test_replace_namespaced_route(self):
"""
Test case for replace_namespaced_route
replace the specified Route
"""
pass
def test_replace_namespaced_route_status(self):
"""
Test case for replace_namespaced_route_status
replace status of the specified Route
"""
pass
def test_replace_namespaced_template(self):
"""
Test case for replace_namespaced_template
replace the specified Template
"""
pass
def test_replace_netnamespace(self):
"""
Test case for replace_netnamespace
replace the specified NetNamespace
"""
pass
def test_replace_oauthclient(self):
"""
Test case for replace_oauthclient
replace the specified OAuthClient
"""
pass
def test_replace_oauthclientauthorization(self):
"""
Test case for replace_oauthclientauthorization
replace the specified OAuthClientAuthorization
"""
pass
def test_replace_project(self):
"""
Test case for replace_project
replace the specified Project
"""
pass
def test_replace_user(self):
"""
Test case for replace_user
replace the specified User
"""
pass
def test_replace_useridentitymapping(self):
"""
Test case for replace_useridentitymapping
replace the specified UserIdentityMapping
"""
pass
def test_watch_namespaced_watch_build(self):
"""
Test case for watch_namespaced_watch_build
watch changes to an object of kind Build
"""
pass
def test_watch_namespaced_watch_buildconfig(self):
"""
Test case for watch_namespaced_watch_buildconfig
watch changes to an object of kind BuildConfig
"""
pass
def test_watch_namespaced_watch_buildconfigs(self):
"""
Test case for watch_namespaced_watch_buildconfigs
watch individual changes to a list of BuildConfig
"""
pass
def test_watch_namespaced_watch_builds(self):
"""
Test case for watch_namespaced_watch_builds
watch individual changes to a list of Build
"""
pass
def test_watch_namespaced_watch_deploymentconfig(self):
"""
Test case for watch_namespaced_watch_deploymentconfig
watch changes to an object of kind DeploymentConfig
"""
pass
def test_watch_namespaced_watch_deploymentconfigs(self):
"""
Test case for watch_namespaced_watch_deploymentconfigs
watch individual changes to a list of DeploymentConfig
"""
pass
def test_watch_namespaced_watch_imagestream(self):
"""
Test case for watch_namespaced_watch_imagestream
watch changes to an object of kind ImageStream
"""
pass
def test_watch_namespaced_watch_imagestreams(self):
"""
Test case for watch_namespaced_watch_imagestreams
watch individual changes to a list of ImageStream
"""
pass
def test_watch_namespaced_watch_policie(self):
"""
Test case for watch_namespaced_watch_policie
watch changes to an object of kind Policy
"""
pass
def test_watch_namespaced_watch_policies(self):
"""
Test case for watch_namespaced_watch_policies
watch individual changes to a list of Policy
"""
pass
def test_watch_namespaced_watch_policybinding(self):
"""
Test case for watch_namespaced_watch_policybinding
watch changes to an object of kind PolicyBinding
"""
pass
def test_watch_namespaced_watch_policybindings(self):
"""
Test case for watch_namespaced_watch_policybindings
watch individual changes to a list of PolicyBinding
"""
pass
def test_watch_namespaced_watch_route(self):
"""
Test case for watch_namespaced_watch_route
watch changes to an object of kind Route
"""
pass
def test_watch_namespaced_watch_routes(self):
"""
Test case for watch_namespaced_watch_routes
watch individual changes to a list of Route
"""
pass
def test_watch_namespaced_watch_template(self):
"""
Test case for watch_namespaced_watch_template
watch changes to an object of kind Template
"""
pass
def test_watch_namespaced_watch_templates(self):
"""
Test case for watch_namespaced_watch_templates
watch individual changes to a list of Template
"""
pass
def test_watch_watch_buildconfigs(self):
"""
Test case for watch_watch_buildconfigs
watch individual changes to a list of BuildConfig
"""
pass
def test_watch_watch_builds(self):
"""
Test case for watch_watch_builds
watch individual changes to a list of Build
"""
pass
def test_watch_watch_clusternetwork(self):
"""
Test case for watch_watch_clusternetwork
watch changes to an object of kind ClusterNetwork
"""
pass
def test_watch_watch_clusternetworks(self):
"""
Test case for watch_watch_clusternetworks
watch individual changes to a list of ClusterNetwork
"""
pass
def test_watch_watch_clusterpolicie(self):
"""
Test case for watch_watch_clusterpolicie
watch changes to an object of kind ClusterPolicy
"""
pass
def test_watch_watch_clusterpolicies(self):
"""
Test case for watch_watch_clusterpolicies
watch individual changes to a list of ClusterPolicy
"""
pass
def test_watch_watch_clusterpolicybinding(self):
"""
Test case for watch_watch_clusterpolicybinding
watch changes to an object of kind ClusterPolicyBinding
"""
pass
def test_watch_watch_clusterpolicybindings(self):
"""
Test case for watch_watch_clusterpolicybindings
watch individual changes to a list of ClusterPolicyBinding
"""
pass
def test_watch_watch_deploymentconfigs(self):
"""
Test case for watch_watch_deploymentconfigs
watch individual changes to a list of DeploymentConfig
"""
pass
def test_watch_watch_group(self):
"""
Test case for watch_watch_group
watch changes to an object of kind Group
"""
pass
def test_watch_watch_groups(self):
"""
Test case for watch_watch_groups
watch individual changes to a list of Group
"""
pass
def test_watch_watch_hostsubnet(self):
"""
Test case for watch_watch_hostsubnet
watch changes to an object of kind HostSubnet
"""
pass
def test_watch_watch_hostsubnets(self):
"""
Test case for watch_watch_hostsubnets
watch individual changes to a list of HostSubnet
"""
pass
def test_watch_watch_identitie(self):
"""
Test case for watch_watch_identitie
watch changes to an object of kind Identity
"""
pass
def test_watch_watch_identities(self):
"""
Test case for watch_watch_identities
watch individual changes to a list of Identity
"""
pass
def test_watch_watch_image(self):
"""
Test case for watch_watch_image
watch changes to an object of kind Image
"""
pass
def test_watch_watch_images(self):
"""
Test case for watch_watch_images
watch individual changes to a list of Image
"""
pass
def test_watch_watch_imagestreams(self):
"""
Test case for watch_watch_imagestreams
watch individual changes to a list of ImageStream
"""
pass
def test_watch_watch_netnamespace(self):
"""
Test case for watch_watch_netnamespace
watch changes to an object of kind NetNamespace
"""
pass
def test_watch_watch_netnamespaces(self):
"""
Test case for watch_watch_netnamespaces
watch individual changes to a list of NetNamespace
"""
pass
def test_watch_watch_oauthclient(self):
"""
Test case for watch_watch_oauthclient
watch changes to an object of kind OAuthClient
"""
pass
def test_watch_watch_oauthclientauthorization(self):
"""
Test case for watch_watch_oauthclientauthorization
watch changes to an object of kind OAuthClientAuthorization
"""
pass
def test_watch_watch_oauthclientauthorizations(self):
"""
Test case for watch_watch_oauthclientauthorizations
watch individual changes to a list of OAuthClientAuthorization
"""
pass
def test_watch_watch_oauthclients(self):
"""
Test case for watch_watch_oauthclients
watch individual changes to a list of OAuthClient
"""
pass
def test_watch_watch_policies(self):
"""
Test case for watch_watch_policies
watch individual changes to a list of Policy
"""
pass
def test_watch_watch_policybindings(self):
"""
Test case for watch_watch_policybindings
watch individual changes to a list of PolicyBinding
"""
pass
def test_watch_watch_routes(self):
"""
Test case for watch_watch_routes
watch individual changes to a list of Route
"""
pass
def test_watch_watch_templates(self):
"""
Test case for watch_watch_templates
watch individual changes to a list of Template
"""
pass
def test_watch_watch_user(self):
"""
Test case for watch_watch_user
watch changes to an object of kind User
"""
pass
def test_watch_watch_users(self):
"""
Test case for watch_watch_users
watch individual changes to a list of User
"""
pass
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines.stackoverflow import word_prediction_preprocessing
TEST_DATA = collections.OrderedDict(
creation_date=(['unused date']),
score=([tf.constant(0, dtype=tf.int64)]),
tags=(['unused test tag']),
title=(['unused title']),
tokens=(['one must imagine']),
type=(['unused type']),
)
def _compute_length_of_dataset(ds):
return ds.reduce(0, lambda x, _: x + 1)
class SplitInputTest(tf.test.TestCase):
def test_split_input_returns_expected_result(self):
tokens = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
expected_input = [[0, 1, 2, 3]]
expected_target = [[1, 2, 3, 4]]
split = word_prediction_preprocessing.split_input_target(tokens)
self.assertAllEqual(self.evaluate(split[0]), expected_input)
self.assertAllEqual(self.evaluate(split[1]), expected_target)
class ToIDsFnTest(tf.test.TestCase):
def test_ids_fn_truncates_on_input_longer_than_sequence_length(self):
vocab = ['A', 'B', 'C']
max_seq_len = 1
bos = word_prediction_preprocessing.get_special_tokens(
len(vocab)).beginning_of_sentence
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
self.assertAllEqual(self.evaluate(processed), [bos, 1])
def test_build_to_ids_fn_embeds_all_vocab(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
special_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab))
bos = special_tokens.beginning_of_sentence
eos = special_tokens.end_of_sentence
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
self.assertAllEqual(self.evaluate(processed), [bos, 1, 2, 3, eos])
def test_pad_token_correct(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
special_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab))
pad = special_tokens.padding
bos = special_tokens.beginning_of_sentence
eos = special_tokens.end_of_sentence
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
batched_ds = tf.data.Dataset.from_tensor_slices([processed]).padded_batch(
1, padded_shapes=[6])
sample_elem = next(iter(batched_ds))
self.assertAllEqual(self.evaluate(sample_elem), [[bos, 1, 2, 3, eos, pad]])
def test_out_of_vocab_tokens_are_correct(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
num_out_of_vocab_buckets = 2
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len, num_out_of_vocab_buckets=num_out_of_vocab_buckets)
out_of_vocab_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab),
num_out_of_vocab_buckets=num_out_of_vocab_buckets).out_of_vocab
data = {'tokens': 'A B D'}
processed = to_ids_fn(data)
self.assertLen(out_of_vocab_tokens, num_out_of_vocab_buckets)
self.assertIn(self.evaluate(processed)[3], out_of_vocab_tokens)
class BatchAndSplitTest(tf.test.TestCase):
def test_batch_and_split_fn_returns_dataset_with_correct_type_spec(self):
token = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
ds = tf.data.Dataset.from_tensor_slices(token)
padded_and_batched = word_prediction_preprocessing.batch_and_split(
ds, sequence_length=6, batch_size=1)
self.assertIsInstance(padded_and_batched, tf.data.Dataset)
self.assertEqual(padded_and_batched.element_spec, (tf.TensorSpec(
[None, 6], dtype=tf.int64), tf.TensorSpec([None, 6], dtype=tf.int64)))
def test_batch_and_split_fn_returns_dataset_yielding_expected_elements(self):
token = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
ds = tf.data.Dataset.from_tensor_slices(token)
padded_and_batched = word_prediction_preprocessing.batch_and_split(
ds, sequence_length=6, batch_size=1)
num_elems = 0
for elem in padded_and_batched:
self.assertAllEqual(
self.evaluate(elem[0]),
tf.constant([[0, 1, 2, 3, 4, 0]], dtype=tf.int64))
self.assertAllEqual(
self.evaluate(elem[1]),
tf.constant([[1, 2, 3, 4, 0, 0]], dtype=tf.int64))
num_elems += 1
self.assertEqual(num_elems, 1)
class PreprocessFnTest(tf.test.TestCase, parameterized.TestCase):
def test_preprocess_fn_with_empty_vocab_raises(self):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(ValueError, 'vocab must be non-empty'):
word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, vocab=[], sequence_length=10)
@parameterized.named_parameters(('zero_value', 0), ('negative_value1', -1),
('negative_value2', -2))
def test_nonpositive_sequence_length_raises(self, sequence_length):
del sequence_length # Unused.
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(ValueError,
'sequence_length must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, vocab=['A'], sequence_length=0)
@parameterized.named_parameters(('zero_value', 0), ('negative_value1', -1),
('negative_value2', -2))
def test_nonpositive_num_out_of_vocab_buckets_length_raises(
self, num_out_of_vocab_buckets):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(
ValueError, 'num_out_of_vocab_buckets must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec,
vocab=['A'],
sequence_length=10,
num_out_of_vocab_buckets=num_out_of_vocab_buckets)
@parameterized.named_parameters(('param1', 1, 1), ('param2', 4, 2),
('param3', 100, 3))
def test_preprocess_fn_returns_correct_dataset_element_spec(
self, sequence_length, num_out_of_vocab_buckets):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=32, max_elements=100)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec,
sequence_length=sequence_length,
vocab=['one', 'must'],
num_out_of_vocab_buckets=num_out_of_vocab_buckets)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
preprocessed_ds.element_spec,
(tf.TensorSpec(shape=[None, sequence_length], dtype=tf.int64),
tf.TensorSpec(shape=[None, sequence_length], dtype=tf.int64)))
def test_preprocess_fn_returns_correct_sequence_with_1_out_of_vocab_bucket(
self):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=32, max_elements=100)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec,
sequence_length=6,
vocab=['one', 'must'],
num_out_of_vocab_buckets=1)
preprocessed_ds = preprocess_fn(ds)
element = next(iter(preprocessed_ds))
# BOS is len(vocab)+2, EOS is len(vocab)+3, pad is 0, OOV is len(vocab)+1
self.assertAllEqual(
self.evaluate(element[0]),
tf.constant([[4, 1, 2, 3, 5, 0]], dtype=tf.int64))
def test_preprocess_fn_returns_correct_sequence_with_3_out_of_vocab_buckets(
self):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=32, max_elements=100)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec,
sequence_length=6,
vocab=['one', 'must'],
num_out_of_vocab_buckets=3)
preprocessed_ds = preprocess_fn(ds)
element = next(iter(preprocessed_ds))
# BOS is len(vocab)+3+1
self.assertEqual(self.evaluate(element[0])[0][0], 6)
self.assertEqual(self.evaluate(element[0])[0][1], 1)
self.assertEqual(self.evaluate(element[0])[0][2], 2)
# OOV is [len(vocab)+1, len(vocab)+2, len(vocab)+3]
self.assertIn(self.evaluate(element[0])[0][3], [3, 4, 5])
# EOS is len(vocab)+3+2
self.assertEqual(self.evaluate(element[0])[0][4], 7)
# pad is 0
self.assertEqual(self.evaluate(element[0])[0][5], 0)
@parameterized.named_parameters(
('num_epochs_1_batch_size_1', 1, 1),
('num_epochs_4_batch_size_2', 4, 2),
('num_epochs_9_batch_size_3', 9, 3),
('num_epochs_12_batch_size_1', 12, 1),
('num_epochs_3_batch_size_5', 3, 5),
('num_epochs_7_batch_size_2', 7, 2),
)
def test_ds_length_is_ceil_num_epochs_over_batch_size(self, num_epochs,
batch_size):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=num_epochs, batch_size=batch_size)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, vocab=['A'], sequence_length=10)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
tf.cast(tf.math.ceil(num_epochs / batch_size), tf.int32))
@parameterized.named_parameters(
('max_elements1', 1),
('max_elements3', 3),
('max_elements7', 7),
('max_elements11', 11),
('max_elements18', 18),
)
def test_ds_length_with_max_elements(self, max_elements):
repeat_size = 10
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=repeat_size, batch_size=1, max_elements=max_elements)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, vocab=['A'])
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
min(repeat_size, max_elements))
if __name__ == '__main__':
execution_contexts.set_local_python_execution_context()
tf.test.main()
|
|
"""Base class for sparse matrices"""
from __future__ import division, print_function, absolute_import
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning','SparseEfficiencyWarning']
import sys
import numpy as np
from scipy._lib.six import xrange
from .sputils import isdense, isscalarlike, isintlike
class SparseWarning(Warning):
pass
class SparseFormatWarning(SparseWarning):
pass
class SparseEfficiencyWarning(SparseWarning):
pass
# The formats that we might potentially understand.
_formats = {'csc':[0, "Compressed Sparse Column"],
'csr':[1, "Compressed Sparse Row"],
'dok':[2, "Dictionary Of Keys"],
'lil':[3, "LInked List"],
'dod':[4, "Dictionary of Dictionaries"],
'sss':[5, "Symmetric Sparse Skyline"],
'coo':[6, "COOrdinate"],
'lba':[7, "Linpack BAnded"],
'egd':[8, "Ellpack-itpack Generalized Diagonal"],
'dia':[9, "DIAgonal"],
'bsr':[10, "Block Sparse Row"],
'msr':[11, "Modified compressed Sparse Row"],
'bsc':[12, "Block Sparse Column"],
'msc':[13, "Modified compressed Sparse Column"],
'ssk':[14, "Symmetric SKyline"],
'nsk':[15, "Nonsymmetric SKyline"],
'jad':[16, "JAgged Diagonal"],
'uss':[17, "Unsymmetric Sparse Skyline"],
'vbr':[18, "Variable Block Row"],
'und':[19, "Undefined"]
}
# These univariate ufuncs preserve zeros.
_ufuncs_with_fixed_point_at_zero = frozenset([
np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self.format = self.__class__.__name__[:3]
self._shape = None
if self.format == 'spm':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self,shape):
shape = tuple(shape)
if len(shape) != 2:
raise ValueError("Only two-dimensional sparse arrays "
"are supported.")
try:
shape = int(shape[0]),int(shape[1]) # floats, other weirdness
except:
raise TypeError('invalid shape')
if not (shape[0] >= 0 and shape[1] >= 0):
raise ValueError('invalid shape')
if (self._shape != shape) and (self._shape is not None):
try:
self = self.reshape(shape)
except NotImplementedError:
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
self._shape = shape
def get_shape(self):
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self, shape):
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
def astype(self, t):
return self.tocsr().astype(t).asformat(self.format)
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f','d','F','D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in xrange(self.shape[0]):
yield self[r,:]
def getmaxprint(self):
try:
maxprint = self.maxprint
except AttributeError:
maxprint = MAXPRINT
return maxprint
# def typecode(self):
# try:
# typ = self.dtype.char
# except AttributeError:
# typ = None
# return typ
def getnnz(self):
try:
return self.nnz
except AttributeError:
raise AttributeError("nnz not defined")
def getformat(self):
try:
format = self.format
except AttributeError:
format = 'und'
return format
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, nnz, _formats[format][1]))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
nnz = self.getnnz()
# helper function, outputs "(i,j) v"
def tostr(row,col,data):
triples = zip(list(zip(row,col)),data)
return '\n'.join([(' %s\t%s' % t) for t in triples])
if nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return True if self.nnz == 1 else False
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
__nonzero__ = __bool__
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
# return self.getnnz()
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format):
"""Return this matrix in a given sparse format
Parameters
----------
format : {string, None}
desired sparse matrix format
- None for no format conversion
- "csr" for csr_matrix format
- "csc" for csc_matrix format
- "lil" for lil_matrix format
- "dok" for dok_matrix format and so on
"""
if format is None or format == self.format:
return self
else:
return getattr(self,'to' + format)()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def maximum(self, other):
return self.tocsr().maximum(other)
def minimum(self, other):
return self.tocsr().minimum(other)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
return self * other
def power(self, n, dtype=None):
return self.tocsr().power(n, dtype=dtype)
def __eq__(self, other):
return self.tocsr().__eq__(other)
def __ne__(self, other):
return self.tocsr().__ne__(other)
def __lt__(self,other):
return self.tocsr().__lt__(other)
def __gt__(self,other):
return self.tocsr().__gt__(other)
def __le__(self,other):
return self.tocsr().__le__(other)
def __ge__(self,other):
return self.tocsr().__ge__(other)
def __abs__(self):
return abs(self.tocsr())
def __add__(self, other): # self + other
return self.tocsr().__add__(other)
def __radd__(self, other): # other + self
return self.tocsr().__radd__(other)
def __sub__(self, other): # self - other
# note: this can't be replaced by self + (-other) for unsigned types
return self.tocsr().__sub__(other)
def __rsub__(self, other): # other - self
return self.tocsr().__rsub__(other)
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M,N = self.shape
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._mul_vector(other)
elif other.shape == (N, 1):
return self._mul_vector(other.ravel()).reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._mul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
try:
other.shape
except AttributeError:
# If it's a list or whatever, treat it like a matrix
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N,1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1,1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
####################
# Other Arithmetic #
####################
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer)
and np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
return self._divide(other, true_divide=True)
def __div__(self, other):
# Always do true division
return self._divide(other, true_divide=True)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from .construct import eye
return eye(self.shape[0], dtype=self.dtype)
elif other == 1:
return self.copy()
else:
tmp = self.__pow__(other//2)
if (other % 2):
return self * tmp * tmp
else:
return tmp * tmp
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
return NotImplemented
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self):
return self.tocsr().transpose()
def conj(self):
return self.tocsr().conj()
def conjugate(self):
return self.conj()
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask],A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from .csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]), shape=(n,1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from .csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]), shape=(1,m), dtype=self.dtype)
return row_selector * self
# def __array__(self):
# return self.toarray()
def todense(self, order=None, out=None):
"""
Return a dense matrix representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-dimensional
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return np.asmatrix(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo().toarray(order=order, out=out)
def todok(self):
return self.tocoo().todok()
def tocoo(self):
return self.tocsr().tocoo()
def tolil(self):
return self.tocsr().tolil()
def todia(self):
return self.tocoo().todia()
def tobsr(self, blocksize=None):
return self.tocsr().tobsr(blocksize=blocksize)
def copy(self):
return self.__class__(self,copy=True)
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# We use multiplication by an array of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
if np.issubdtype(self.dtype, np.float_):
res_dtype = np.float_
elif (np.issubdtype(self.dtype, np.int_) or
np.issubdtype(self.dtype, np.bool_)):
res_dtype = np.int_
elif np.issubdtype(self.dtype, np.complex_):
res_dtype = np.complex_
else:
res_dtype = self.dtype
if axis is None:
# sum over rows and columns
return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum()
if axis < 0:
axis += 2
if axis == 0:
# sum over columns
return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self
elif axis == 1:
# sum over rows
return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))
else:
raise ValueError("axis out of bounds")
def mean(self, axis=None):
"""Average the matrix over the given axis. If the axis is None,
average over both rows and columns, returning a scalar.
"""
# Mimic numpy's casting.
if (np.issubdtype(self.dtype, np.float_) or
np.issubdtype(self.dtype, np.integer) or
np.issubdtype(self.dtype, np.bool_)):
res_dtype = np.float_
elif np.issubdtype(self.dtype, np.complex_):
res_dtype = np.complex_
else:
res_dtype = self.dtype
if axis is None:
return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
if axis < 0:
axis += 2
if axis == 0:
mean = self.astype(res_dtype).sum(0)
mean *= 1.0 / self.shape[0]
return mean
elif axis == 1:
mean = self.astype(res_dtype).sum(1)
mean *= 1.0 / self.shape[1]
return mean
else:
raise ValueError("axis out of bounds")
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
# TODO support k != 0
return self.tocsr().diagonal()
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values if
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in xrange(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i,v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in xrange(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i,v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse matrix')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""Method for compatibility with NumPy's ufuncs and dot
functions.
"""
if any(not isinstance(x, spmatrix) and np.asarray(x).dtype == object
for x in inputs):
# preserve previous behavior with object arrays
with_self = list(inputs)
with_self[pos] = np.asarray(self, dtype=object)
return getattr(func, method)(*with_self, **kwargs)
out = kwargs.pop('out', None)
if method != '__call__' or kwargs:
return NotImplemented
without_self = list(inputs)
del without_self[pos]
without_self = tuple(without_self)
if func is np.multiply:
result = self.multiply(*without_self)
elif func is np.add:
result = self.__add__(*without_self)
elif func is np.dot:
if pos == 0:
result = self.__mul__(inputs[1])
else:
result = self.__rmul__(inputs[0])
elif func is np.subtract:
if pos == 0:
result = self.__sub__(inputs[1])
else:
result = self.__rsub__(inputs[0])
elif func is np.divide:
true_divide = (sys.version_info[0] >= 3)
rdivide = (pos == 1)
result = self._divide(*without_self,
true_divide=true_divide,
rdivide=rdivide)
elif func is np.true_divide:
rdivide = (pos == 1)
result = self._divide(*without_self, true_divide=True, rdivide=rdivide)
elif func is np.maximum:
result = self.maximum(*without_self)
elif func is np.minimum:
result = self.minimum(*without_self)
elif func is np.absolute:
result = abs(self)
elif func in _ufuncs_with_fixed_point_at_zero:
func_name = func.__name__
if hasattr(self, func_name):
result = getattr(self, func_name)()
else:
result = getattr(self.tocsr(), func_name)()
else:
return NotImplemented
if out is not None:
if not isinstance(out, spmatrix) and isinstance(result, spmatrix):
out[...] = result.todense()
else:
out[...] = result
result = out
return result
def isspmatrix(x):
return isinstance(x, spmatrix)
issparse = isspmatrix
|
|
"""
Python client for BaseX.
Works with BaseX 7.x (but not with BaseX 8.0 and later)
Documentation: http://docs.basex.org/wiki/Clients
(C) BaseX Team 2005-12, Arjen van Elteren
BSD License
"""
import hashlib, socket, array
import threading
class SocketInputReader(object):
def __init__(self, sock):
self.__s = sock
self.__buf = array.array('B', chr(0) * 0x1000)
self.init()
def init(self):
self.__bpos = 0
self.__bsize = 0
# Returns a single byte from the socket.
def read(self):
# Cache next bytes
if self.__bpos >= self.__bsize:
self.__bsize = self.__s.recv_into(self.__buf)
self.__bpos = 0
b = self.__buf[self.__bpos]
self.__bpos += 1
return b
# Reads until byte is found.
def read_until(self, byte):
# Cache next bytes
if self.__bpos >= self.__bsize:
self.__bsize = self.__s.recv_into(self.__buf)
self.__bpos = 0
found = False
substr = ""
try:
pos = self.__buf[self.__bpos:self.__bsize].index(byte)
found = True
substr = self.__buf[self.__bpos:pos+self.__bpos].tostring()
self.__bpos = self.__bpos + pos + 1
except ValueError:
substr = self.__buf[self.__bpos:self.__bsize].tostring()
self.__bpos = self.__bsize
return (found, substr)
def readString(self):
strings = []
found = False
while not found:
found, substr = self.read_until(0)
strings.append(substr)
return ''.join(strings)
class Session(object):
def __init__(self, host, port, user, pw):
self.__info = None
# create server connection
self.__s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__s.connect((host, port))
self.__sreader = SocketInputReader(self.__s)
self.__event_socket = None
self.__event_host = host
self.__event_listening_thread = None
self.__event_callbacks = {}
# receive timestamp
ts = self.readString()
# send username and hashed password/timestamp
m = hashlib.md5()
m.update(hashlib.md5(pw).hexdigest())
m.update(ts)
self.send(user + chr(0) + m.hexdigest())
# evaluate success flag
if self.__s.recv(1) != chr(0):
raise IOError('Access Denied.')
def execute(self, com):
# send command to server
self.send(com)
# receive result
result = self.receive()
self.__info = self.readString()
if not self.ok():
raise IOError(self.__info)
return result
def query(self, q):
return Query(self, q)
def create(self, name, content):
self.sendInput(8, name, content)
def add(self, path, content):
self.sendInput(9, path, content)
def replace(self, path, content):
self.sendInput(12, path, content)
def store(self, path, content):
self.sendInput(13, path, content)
def info(self):
return self.__info
def close(self):
self.send('exit')
self.__s.close()
if not self.__event_socket is None:
self.__event_socket.close()
def init(self):
"""Initialize byte transfer"""
self.__sreader.init()
def register_and_start_listener(self):
self.__s.sendall(chr(10))
event_port = int(self.readString())
self.__event_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__event_socket.settimeout(5000)
self.__event_socket.connect((self.__event_host, event_port))
token = self.readString()
self.__event_socket.sendall(token + chr(0))
if not self.__event_socket.recv(1) == chr(0):
raise IOError("Could not register event listener")
self.__event_listening_thread = threading.Thread(
target=self.event_listening_loop
)
self.__event_listening_thread.daemon = True
self.__event_listening_thread.start()
def event_listening_loop(self):
reader = SocketInputReader(self.__event_socket)
reader.init()
while True:
name = reader.readString()
data = reader.readString()
self.__event_callbacks[name](data)
def is_listening(self):
return not self.__event_socket is None
def watch(self, name, callback):
if not self.is_listening():
self.register_and_start_listener()
else:
self.__s.sendall(chr(10))
self.send(name)
info = self.readString()
if not self.ok():
raise IOError(info)
self.__event_callbacks[name] = callback
def unwatch(self, name):
self.send(chr(11) + name)
info = self.readString()
if not self.ok():
raise IOError(info)
del self.__event_callbacks[name]
def readString(self):
"""Retrieve a string from the socket"""
return self.__sreader.readString()
def read(self):
"""Return a single byte from socket"""
return self.__sreader.read()
def read_until(self, byte):
"""Read until byte is found"""
return self.__sreader.read_until(byte)
def send(self, value):
"""Send the defined string"""
self.__s.sendall(value + chr(0))
def sendInput(self, code, arg, content):
self.__s.sendall(chr(code) + arg + chr(0) + content + chr(0))
self.__info = self.readString()
if not self.ok():
raise IOError(self.info())
def ok(self):
"""Return success check"""
return self.read() == 0
def receive(self):
"""Return received string"""
self.init()
return self.readString()
def iter_receive(self):
self.init()
typecode = self.read()
while typecode > 0:
string = self.readString()
yield string
typecode = self.read()
if not self.ok():
raise IOError(self.readString())
class Query():
def __init__(self, session, q):
self.__session = session
self.__id = self.exc(chr(0), q)
def bind(self, name, value, datatype=''):
self.exc(chr(3), self.__id + chr(0) + name + chr(0) + value + chr(0) + datatype)
def context(self, value, datatype=''):
self.exc(chr(14), self.__id + chr(0) + value + chr(0) + datatype)
def iter(self):
self.__session.send(chr(4) + self.__id)
return self.__session.iter_receive()
def execute(self):
return self.exc(chr(5), self.__id)
def info(self):
return self.exc(chr(6), self.__id)
def options(self):
return self.exc(chr(7), self.__id)
def close(self):
self.exc(chr(2), self.__id)
def exc(self, cmd, arg):
self.__session.send(cmd + arg)
s = self.__session.receive()
if not self.__session.ok():
raise IOError(self.__session.readString())
return s
|
|
from math import sqrt
import warnings
import numbers
import numpy as np
from sklearn.utils.extmath import randomized_svd, squared_norm
import scipy.sparse as sp
class Sparse_NMF():
def __init__(self, n_components=2, init=None,
tol=1e-4, max_iter=200,
alpha=0.01, beta=0.01):
self.n_components = n_components #number of features
self.init = init #the init way of
self.tol = tol #the error tolerance
self.max_iter = max_iter #the maximum iteration
self.alpha = alpha #learning rate
self.beta = beta #L2 coefficient
def fit(self, X):
#given X, return W and H
W, H, n_iter_ = matrix_factorization(
X=X, n_components=self.n_components,
init=self.init, update_H=True,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
beta=self.beta)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W, H
def transform(self, X):
#given X, calculate W with the H got during fit procedure
W, _, n_iter_ = matrix_factorization(
X=X, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
beta=self.beta)
self.n_iter_ = n_iter_
return W
def matrix_factorization(X, H=None, n_components=None,
init=None, update_H=True,
tol=1e-4, max_iter=200, alpha=0.01,
beta=0.02):
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
# check W and H, or initialize them
if not update_H:
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init, eps=1e-6)
print W
print H
n_iter = 0
e_before = 0
for step in xrange(max_iter):
n_iter = step + 1
print n_iter
xs, ys = X.nonzero() # the x index and y index of nonzero
W_temp = W
ER = X - np.dot(W,H) # the error matrix
for i in xrange(n_samples):
for k in xrange(n_components):
sum = 0
for j in ys[xs==i]:
sum += ER[i][j] * H[k][j]
t = W[i][k] + alpha * (sum - beta * W[i][k])
if t < 0:
a = alpha
for l in xrange(10):
a /= 2
t = W[i][k] + a * (sum - beta * W[i][k])
if t >= 0:
break
if t < 0:
t = W[i][k]
W[i][k] = t
if update_H:
for j in xrange(n_features):
for k in xrange(n_components):
sum = 0
for i in xs[ys==j]:
sum += ER[i][j] * W_temp[i][k]
t = H[k][j] + alpha * (sum - beta * H[k][j])
if t < 0:
a = alpha
for l in xrange(10):
a /= 2
t = H[k][j] + a * (sum - beta * H[k][j])
if t >= 0:
break
if t < 0:
t = H[k][j]
H[k][j] = t
E = (X - np.dot(W,H)) * (X>0)
e = squared_norm(E) + beta * ( squared_norm(W) + squared_norm(H) )
# if step > 0:
# if abs(e/e_before - 1) < tol:
# break
# e_before = e
print e
if e < tol:
break
if n_iter == max_iter:
print ("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter)
return W, H, n_iter
# def matrix_factorization(X, H=None, n_components=None,
# init='random', update_H=True,
# tol=1e-4, max_iter=200, alpha=0.01,
# beta=0.02):
#
# n_samples, n_features = X.shape
# if n_components is None:
# n_components = n_features
#
# # check W and H, or initialize them
# if not update_H:
# W = np.zeros((n_samples, n_components))
# else:
# W, H = _initialize_nmf(X, n_components, init=init, eps=1e-6)
#
# n_iter = 0
# e_before = 0
# xs, ys = X.nonzero()
#
# for step in xrange(max_iter):
# n_iter = step + 1
#
# V = np.dot(W,H)
# W_temp = W
#
# for i in xrange(n_samples):
# for k in xrange(n_components):
# sum = 0
# whht = 0
# for j in ys[xs==i]:
# eij = X[i][j] - V[i][j]
# sum += eij * H[k][j]
# whht += V[i][j] * H[k][j]
# W[i][k] = W[i][k] + sum * W[i][k] / whht
# print W[i][k] / whht
#
# if update_H:
# for j in xrange(n_features):
# for k in xrange(n_components):
# sum = 0
# wtwh= 0
# for i in xs[ys==j]:
# eij = X[i][j] - V[i][j]
# sum += eij * W_temp[i][k]
# wtwh += W_temp[i][k] * V[i][j]
# H[k][j] = H[k][j] + sum * H[k][j] / wtwh
# print H[k][j] / wtwh
#
# e = 0
# for i in xrange(n_samples):
# for j in ys[xs==i]:
# e = e + (X[i][j] - V[i][j])**2 / 2
#
# # e = e + (beta/2) * ( (W*W).sum() + (H*H).sum() )
# if step > 0:
# if abs(e/e_before - 1) < tol:
# break
# e_before = e
#
# if n_iter == max_iter:
# print ("Maximum number of iteration %d reached. Increase it to"
# " improve convergence." % max_iter)
#
# return W, H, n_iter
# def matrix_factorization(X, H=None, n_components=None,
# init='random', update_H=True,
# tol=1e-4, max_iter=200, alpha=0.01,
# beta=0.02):
#
# n_samples, n_features = X.shape
# if n_components is None:
# n_components = n_features
#
# # check W and H, or initialize them
# if not update_H:
# W = np.zeros((n_samples, n_components))
# else:
# W, H = _initialize_nmf(X, n_components, init=init, eps=1e-6)
#
# n_iter = 0
# e_before = 0
# xs, ys = X.nonzero()
#
# for step in xrange(max_iter):
# n_iter = step + 1
#
# V = np.dot(W,H)
# W_temp = W
#
# for i in xrange(n_samples):
# for k in xrange(n_components):
# sum = 0
# whht = 0
# for j in ys[xs==i]:
# sum += X[i][j] * H[k][j]
# whht += V[i][j] * H[k][j]
# W[i][k] = sum * W[i][k] / whht
#
# if update_H:
# for j in xrange(n_features):
# for k in xrange(n_components):
# sum = 0
# wtwh= 0
# for i in xs[ys==j]:
# sum += W_temp[i][k] * X[i][j]
# wtwh += W_temp[i][k] * V[i][j]
# H[k][j] = sum * H[k][j] / wtwh
#
# e = 0
# for i in xrange(n_samples):
# for j in ys[xs==i]:
# e = e + (X[i][j] - V[i][j])**2 / 2
#
# # e = e + (beta/2) * ( (W*W).sum() + (H*H).sum() )
# if step > 0:
# if abs(e/e_before - 1) < tol:
# break
# e_before = e
#
# if n_iter == max_iter:
# print ("Maximum number of iteration %d reached. Increase it to"
# " improve convergence." % max_iter)
#
# return W, H, n_iter
# def matrix_factorization(X, H=None, n_components=None,
# init='random', update_H=True,
# tol=1e-4, max_iter=200, alpha=0.01,
# beta=0.02):
#
# n_samples, n_features = X.shape
# if n_components is None:
# n_components = n_features
#
# # check W and H, or initialize them
# if not update_H:
# W = np.zeros((n_samples, n_components))
# else:
# W, H = _initialize_nmf(X, n_components, init=init, eps=1e-6)
#
# n_iter = 0
# e_before = 0
# xs, ys = X.nonzero()
#
# for step in xrange(max_iter):
# n_iter = step + 1
#
# V = np.dot(W,H)
# W_temp = W
#
# for i in xrange(n_samples):
# for k in xrange(n_components):
# s1 = 0
# s2 = 0
# for j in xrange(n_features):
# s1 += X[i][j] * H[k][j] / V[i][j]
# s2 += H[k][j]
# W[i][k] = s1 * W[i][k] / s2
#
# if update_H:
# for j in xrange(n_features):
# for k in xrange(n_components):
# s1 = 0
# s2 = 0
# for i in xrange(n_samples):
# s1 += W[i][k] * X[i][j] / V[i][j]
# s2 += W_temp[i][k]
# H[k][j] = s1 * H[k][j] / s2
#
# e = 0
# for i in xrange(n_samples):
# for j in ys[xs==i]:
# e = e + (X[i][j] - V[i][j])**2 / 2
#
# # e = e + (beta/2) * ( (W*W).sum() + (H*H).sum() )
# if step > 0:
# if abs(e/e_before - 1) < tol:
# break
# e_before = e
#
# if n_iter == max_iter:
# print ("Maximum number of iteration %d reached. Increase it to"
# " improve convergence." % max_iter)
#
# return W, H, n_iter
def _initialize_nmf(X, n_components, init=None, eps=1e-6):
n_samples, n_features = X.shape
if init is None:
if n_components < n_features and n_components < n_samples:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
W = avg * np.random.randn(n_samples, n_components)
H = avg * np.random.randn(n_components, n_features)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
avg = X.mean()
W[W == 0] = abs(avg * np.random.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * np.random.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def norm(x):
return sqrt(squared_norm(x))
|
|
"""Module that reads BGEN files."""
# This file is part of pybgen.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import os
import sys
import zlib
import logging
import sqlite3
from math import ceil
from struct import unpack
from io import UnsupportedOperation
import numpy as np
from six.moves import range
try:
import zstandard as zstd
HAS_ZSTD = True
except ImportError:
HAS_ZSTD = False
__author__ = "Louis-Philippe Lemieux Perreault"
__copyright__ = "Copyright 2017 Louis-Philippe Lemieux Perreault"
__license__ = "MIT"
__all__ = ["PyBGEN"]
# The logger
logger = logging.getLogger(__name__)
# The python version
PYTHON_VERSION = sys.version_info.major
class _Variant(object):
__slots__ = ("name", "chrom", "pos", "a1", "a2")
def __init__(self, name, chrom, pos, a1, a2):
self.name = name
self.chrom = chrom
self.pos = pos
self.a1 = a1
self.a2 = a2
def __repr__(self):
return "<Variant {} chr{}:{}_{}/{}>".format(
self.name, self.chrom, self.pos, self.a1, self.a2,
)
class PyBGEN(object):
"""Reads and store a set of BGEN files.
Args:
fn (str): The name of the BGEN file.
mode (str): The open mode for the BGEN file.
prob_t (float): The probability threshold (optional).
probs_only (boolean): Return only the probabilities instead of dosage.
Reads or write BGEN files.
.. code-block:: python
from pybgen import PyBGEN
# Reading a BGEN file
with PyBGEN("bgen_file_name") as bgen:
pass
"""
def __init__(self, fn, mode="r", prob_t=0.9, _skip_index=False,
probs_only=False):
"""Initializes a new PyBGEN instance."""
# The mode
self._mode = mode
# What to return
self._return_probs = probs_only
if self._mode == "r":
# Parsing the file
self._bgen = open(fn, "rb")
self._parse_header()
# Did the samples were parsed?
if not self._has_sample:
self._samples = None
# Connecting to the index
self._skip_index = _skip_index
if not _skip_index:
if not os.path.isfile(fn + ".bgi"):
raise IOError("{}: no such file".format(fn + ".bgi"))
self._connect_index()
# The probability
self.prob_t = prob_t
# Seeking to the first variant of the file
self._bgen.seek(self._first_variant_block)
elif self._mode == "w":
raise NotImplementedError("'w' mode not yet implemented")
else:
raise ValueError("invalid mode: '{}'".format(self._mode))
def __repr__(self):
"""The representation of the PyBGEN object."""
if self._mode == "r":
return "PyBGEN({:,d} samples; {:,d} variants)".format(
self._nb_samples, self._nb_variants,
)
return 'PyBGEN(mode="w")'
def __iter__(self):
"""The __iter__ function."""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self
def __next__(self):
"""The __next__ function."""
return self.next()
def __enter__(self):
"""Entering the context manager."""
return self
def __exit__(self, *args):
"""Exiting the context manager."""
self.close()
def close(self):
"""Closes the BGEN object."""
# Closing the BGEN file
self._bgen.close()
# Closing the index file (if in read mode)
if self._mode == "r" and not self._skip_index:
self._bgen_db.close()
@property
def nb_variants(self):
"""Returns the number of markers.
Returns:
int: The number of markers in the dataset.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._nb_variants
@property
def nb_samples(self):
"""Returns the number of samples.
Returns:
int: The number of samples in the dataset.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self._nb_samples
@property
def samples(self):
"""Returns the samples.
Returns:
tuple: The samples.
"""
return self._samples
def next(self):
"""Returns the next variant.
Returns:
tuple: The variant's information and its genotypes (dosage) as
:py:class:`numpy.ndarray`.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if self._bgen.tell() > self._last_variant_block:
raise StopIteration()
return self._read_current_variant()
def iter_variants(self):
"""Iterates over variants from the beginning of the BGEN file.
Returns:
tuple: A variant and the dosage.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Seeking back to the first variant block
self._bgen.seek(self._first_variant_block)
# Return itself (the generator)
return self
def iter_variants_in_region(self, chrom, start, end):
"""Iterates over variants in a specific region.
Args:
chrom (str): The name of the chromosome.
start (int): The starting position of the region.
end (int): The ending position of the region.
"""
# Getting the region from the index file
self._bgen_index.execute(
"SELECT file_start_position "
"FROM Variant "
"WHERE chromosome = ? AND position >= ? AND position <= ?",
(chrom, start, end),
)
# Fetching all the seek positions
seek_positions = [_[0] for _ in self._bgen_index.fetchall()]
return self._iter_seeks(seek_positions)
def iter_variants_by_names(self, names):
"""Iterates over variants using a list of names.
Args:
names (list): A list of names to extract specific variants.
"""
# Fetching all the seek positions
seek_positions = self._get_seeks_for_names(names)
return self._iter_seeks(seek_positions)
def get_specific_variant(self, chrom, pos, ref, alt):
"""Get specific variant with allele lookup
Args:
chrom (str): The name of the chromosome.
pos (int): The starting position of the region.
ref (str): The reference allele.
alt (str): The alternative allele.
Returns:
list: A list containing all the value for a given variant. The list
has more than one item if there are duplicated variants.
"""
# Getting the region from the index file
self._bgen_index.execute(
"SELECT file_start_position "
"FROM Variant "
"WHERE chromosome = ? AND position = ? AND allele1 = ? AND "
" allele2 = ?",
(chrom, pos, ref, alt),
)
# Fetching all the seek positions
seek_positions = [_[0] for _ in self._bgen_index.fetchall()]
# Fetching seek positions, we return the variant
results = list(self._iter_seeks(seek_positions))
if not results:
raise ValueError("{}:{} {}/{}: variant not found"
"".format(chrom, pos, ref, alt))
return results
def iter_variant_info(self):
"""Iterate over marker information."""
self._bgen_index.execute(
"SELECT chromosome, position, rsid, allele1, allele2 FROM Variant",
)
# The array size
array_size = 1000
# Fetching the results
results = self._bgen_index.fetchmany(array_size)
while results:
for chrom, pos, rsid, a1, a2 in results:
yield _Variant(rsid, chrom, pos, a1, a2)
results = self._bgen_index.fetchmany(array_size)
def _iter_seeks(self, seeks):
"""Iterate over seek positions."""
for seek in seeks:
self._bgen.seek(seek)
yield self._read_current_variant()
def _get_seeks_for_names(self, names):
"""Gets the seek values for each names."""
# Generating a temporary table that will contain the markers to extract
self._bgen_index.execute("CREATE TEMPORARY TABLE tnames (name text)")
self._bgen_index.executemany(
"INSERT INTO tnames VALUES (?)",
[(n, ) for n in names],
)
# Fetching the seek positions
self._bgen_index.execute(
"SELECT file_start_position "
"FROM Variant "
"WHERE rsid IN (SELECT name FROM tnames)",
)
return tuple(_[0] for _ in self._bgen_index.fetchall())
def get_variant(self, name):
"""Gets the values for a given variant.
Args:
name (str): The name of the variant.
Returns:
list: A list containing all the value for a given variant. The list
has more than one item if there are duplicated variants.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
# Fetching the variant
self._bgen_index.execute(
"SELECT file_start_position FROM Variant WHERE rsid = ?",
(name, )
)
# Fetching all the seek positions
seek_positions = [_[0] for _ in self._bgen_index.fetchall()]
# Constructing the results
results = list(self._iter_seeks(seek_positions))
if not results:
raise ValueError("{}: name not found".format(name))
return results
def _read_current_variant(self):
"""Reads the current variant."""
# Getting the variant's information
var_id, rs_id, chrom, pos, alleles = self._get_curr_variant_info()
# Getting the variant's dosage
dosage = self._get_curr_variant_data()
return _Variant(rs_id, chrom, pos, *alleles), dosage
def _get_curr_variant_info(self):
"""Gets the current variant's information."""
if self._layout == 1:
n = unpack("<I", self._bgen.read(4))[0]
if n != self._nb_samples:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name),
)
# Reading the variant id
var_id = self._bgen.read(unpack("<H", self._bgen.read(2))[0]).decode()
# Reading the variant rsid
rs_id = self._bgen.read(unpack("<H", self._bgen.read(2))[0]).decode()
# Reading the chromosome
chrom = self._bgen.read(unpack("<H", self._bgen.read(2))[0]).decode()
# Reading the position
pos = unpack("<I", self._bgen.read(4))[0]
# Getting the number of alleles
nb_alleles = 2
if self._layout == 2:
nb_alleles = unpack("<H", self._bgen.read(2))[0]
# Getting the alleles
alleles = []
for _ in range(nb_alleles):
alleles.append(self._bgen.read(
unpack("<I", self._bgen.read(4))[0]
).decode())
return var_id, rs_id, chrom, pos, tuple(alleles)
def _get_curr_variant_probs_layout_1(self):
"""Gets the current variant's probabilities (layout 1)."""
c = self._nb_samples
if self._is_compressed:
c = unpack("<I", self._bgen.read(4))[0]
# Getting the probabilities
probs = np.frombuffer(
self._decompress(self._bgen.read(c)),
dtype="u2",
) / 32768
probs.shape = (self._nb_samples, 3)
return probs
def _get_curr_variant_probs_layout_2(self):
"""Gets the current variant's probabilities (layout 2)."""
# The total length C of the rest of the data for this variant
c = unpack("<I", self._bgen.read(4))[0]
# The number of bytes to read
to_read = c
# D = C if no compression
d = c
if self._is_compressed:
# The total length D of the probability data after
# decompression
d = unpack("<I", self._bgen.read(4))[0]
to_read = c - 4
# Reading the data and checking
data = self._decompress(self._bgen.read(to_read))
if len(data) != d:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name)
)
# Checking the number of samples
n = unpack("<I", data[:4])[0]
if n != self._nb_samples:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name)
)
data = data[4:]
# Checking the number of alleles (we only accept 2 alleles)
nb_alleles = unpack("<H", data[:2])[0]
if nb_alleles != 2:
raise ValueError(
"{}: only two alleles are "
"supported".format(self._bgen.name)
)
data = data[2:]
# TODO: Check ploidy for sexual chromosomes
# The minimum and maximum for ploidy (we only accept ploidy of 2)
min_ploidy = _byte_to_int(data[0])
max_ploidy = _byte_to_int(data[1])
if min_ploidy != 2 or max_ploidy != 2:
raise ValueError(
"{}: only accepting ploidy of "
"2".format(self._bgen.name)
)
data = data[2:]
# Check the list of N bytes for missingness (since we assume only
# diploid values for each sample)
ploidy_info = np.frombuffer(data[:n], dtype=np.uint8)
ploidy_info = np.unpackbits(
ploidy_info.reshape(1, ploidy_info.shape[0]).T,
axis=1,
)
missing_data = ploidy_info[:, 0] == 1
data = data[n:]
# TODO: Permit phased data
# Is the data phased?
is_phased = data[0] == 1
if is_phased:
raise ValueError(
"{}: only accepting unphased data".format(self._bgen.name)
)
data = data[1:]
# The number of bits used to encode each probabilities
b = _byte_to_int(data[0])
data = data[1:]
# Reading the probabilities (don't forget we allow only for diploid
# values)
probs = None
if b == 8:
probs = np.frombuffer(data, dtype=np.uint8)
elif b == 16:
probs = np.frombuffer(data, dtype=np.uint16)
elif b == 32:
probs = np.frombuffer(data, dtype=np.uint32)
else:
probs = _pack_bits(data, b)
# Changing shape and computing dosage
probs.shape = (self._nb_samples, 2)
return probs / (2**b - 1), missing_data
def _get_curr_variant_data(self):
"""Gets the current variant's dosage or probabilities."""
if self._layout == 1:
# Getting the probabilities
probs = self._get_curr_variant_probs_layout_1()
if self._return_probs:
# Returning the probabilities
return probs
else:
# Returning the dosage
return self._layout_1_probs_to_dosage(probs)
else:
# Getting the probabilities
probs, missing_data = self._get_curr_variant_probs_layout_2()
if self._return_probs:
# Getting the alternative allele homozygous probabilities
last_probs = self._get_layout_2_last_probs(probs)
# Stacking the probabilities
last_probs.shape = (last_probs.shape[0], 1)
full_probs = np.hstack((probs, last_probs))
# Setting the missing to NaN
full_probs[missing_data] = np.nan
# Returning the probabilities
return full_probs
else:
# Computing the dosage
dosage = self._layout_2_probs_to_dosage(probs)
# Setting the missing to NaN
dosage[missing_data] = np.nan
# Returning the dosage
return dosage
def _layout_1_probs_to_dosage(self, probs):
"""Transforms probability values to dosage (from layout 1)"""
# Constructing the dosage
dosage = 2 * probs[:, 2] + probs[:, 1]
if self.prob_t > 0:
dosage[~np.any(probs >= self.prob_t, axis=1)] = np.nan
return dosage
@staticmethod
def _get_layout_2_last_probs(probs):
"""Gets the layout 2 last probabilities (homo alternative)."""
return 1 - np.sum(probs, axis=1)
def _layout_2_probs_to_dosage(self, probs):
"""Transforms probability values to dosage (from layout 2)."""
# Computing the last genotype's probabilities
last_probs = self._get_layout_2_last_probs(probs)
# Constructing the dosage
dosage = 2 * last_probs + probs[:, 1]
# Setting low quality to NaN
if self.prob_t > 0:
good_probs = (
np.any(probs >= self.prob_t, axis=1) |
(last_probs >= self.prob_t)
)
dosage[~good_probs] = np.nan
return dosage
def _parse_header(self):
"""Parses the header (header and sample blocks)."""
# Parsing the header block
self._parse_header_block()
# Parsing the sample block (if any)
if self._has_sample:
self._parse_sample_block()
def _parse_header_block(self):
"""Parses the header block."""
# Getting the data offset (the start point of the data
self._offset = unpack("<I", self._bgen.read(4))[0]
self._first_variant_block = self._offset + 4
# Getting the header size
self._header_size = unpack("<I", self._bgen.read(4))[0]
# Getting the number of samples and variants
self._nb_variants = unpack("<I", self._bgen.read(4))[0]
self._nb_samples = unpack("<I", self._bgen.read(4))[0]
# Checking the magic number
magic = self._bgen.read(4)
if magic != b"bgen":
# The magic number might be 0, then
if unpack("<I", magic)[0] != 0:
raise ValueError(
"{}: invalid BGEN file.".format(self._bgen.name)
)
# Passing through the "free data area"
self._bgen.read(self._header_size - 20)
# Reading the flag
flag = np.frombuffer(self._bgen.read(4), dtype=np.uint8)
flag = np.unpackbits(flag.reshape(1, flag.shape[0]).T, axis=1)
# Getting the compression type from the layout
compression = _bits_to_int(flag[0, -2:])
self._is_compressed = False
if compression == 0:
# No decompression required
self._decompress = self._no_decompress
elif compression == 1:
# ZLIB decompression
self._decompress = zlib.decompress
self._is_compressed = True
elif compression == 2:
if not HAS_ZSTD:
raise ValueError("zstandard module is not installed")
# ZSTANDARD decompression (needs to be check)
self._decompress = zstd.ZstdDecompressor().decompress
self._is_compressed = True
# Getting the layout
layout = _bits_to_int(flag[0, -6:-2])
if layout == 0:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name)
)
elif layout == 1:
self._layout = 1
elif layout == 2:
self._layout = 2
else:
raise ValueError(
"{}: {} invalid layout type".format(self._bgen.name, layout)
)
# Checking if samples are in the file
self._has_sample = flag[-1, 0] == 1
def _parse_sample_block(self):
"""Parses the sample block."""
# Getting the block size
block_size = unpack("<I", self._bgen.read(4))[0]
if block_size + self._header_size > self._offset:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name)
)
# Checking the number of samples
n = unpack("<I", self._bgen.read(4))[0]
if n != self._nb_samples:
raise ValueError(
"{}: invalid BGEN file".format(self._bgen.name)
)
# Getting the sample information
samples = []
for i in range(self._nb_samples):
size = unpack("<H", self._bgen.read(2))[0]
samples.append(self._bgen.read(size).decode())
self._samples = tuple(samples)
# Just a check with the header
if len(self.samples) != self._nb_samples:
raise ValueError("{}: number of samples different between header "
"and sample block".format(self._bgen.name))
def _connect_index(self):
"""Connect to the index (which is an SQLITE database)."""
self._bgen_db = sqlite3.connect(self._bgen.name + ".bgi")
self._bgen_index = self._bgen_db.cursor()
# Fetching the number of variants and the first and last seek position
self._bgen_index.execute(
"SELECT COUNT (rsid), "
" MIN (file_start_position), "
" MAX (file_start_position) "
"FROM Variant"
)
result = self._bgen_index.fetchone()
nb_markers = result[0]
first_variant_block = result[1]
self._last_variant_block = result[2]
# Checking the number of markers
if nb_markers != self._nb_variants:
raise ValueError(
"{}: number of markers different between header ({:,d}) "
"and index file ({:,d})".format(
self._bgen.name, self._nb_variants, nb_markers,
)
)
# Checking the first variant seek position
if first_variant_block != self._first_variant_block:
raise ValueError("{}: invalid index".format(self._bgen.name))
@staticmethod
def _no_decompress(data):
return data
def _bits_to_int(bits):
"""Converts bits to int."""
result = 0
for bit in bits:
result = (result << 1) | bit
return result
def _byte_to_int_python3(byte):
"""Converts a byte to a int for python 3."""
return byte
def _byte_to_int_python2(byte):
"""Converts a byte to a int for python 2."""
return unpack("<B", byte)[0]
_byte_to_int = _byte_to_int_python3
if PYTHON_VERSION < 3:
_byte_to_int = _byte_to_int_python2
def _pack_bits(data, b):
"""Unpacks BGEN probabilities (as bits)."""
# Getting the data from the bytes
data = np.fromiter(
((_byte_to_int(byte) >> i) & 1 for byte in data for i in range(8)),
dtype=bool,
)
data.shape = (data.shape[0] // b, b)
# Finding the closest full bytes (if required)
# TODO: Improve this so that it is more efficient
full_bytes = data[:, ::-1]
if data.shape[1] % 8 != 0:
nb_bits = int(ceil(b / 8)) * 8
full_bytes = np.zeros((data.shape[0], nb_bits), dtype=bool)
full_bytes[:, -b:] += data[:, ::-1]
# Packing the bits
packed = np.packbits(full_bytes, axis=1)
# Left-shifting for final value
final = packed[:, 0]
for i in range(1, packed.shape[1]):
final = np.left_shift(final, 8, dtype=np.uint) | packed[:, i]
return final
|
|
# orm/descriptor_props.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.\
create_proxied_attribute(self.descriptor)\
(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
See also:
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a full
usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does not
load immediately, and is instead loaded when the attribute is first
accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._init_props()
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
def _init_props(self):
self.props = props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(attr)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
props.append(prop)
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_class = prop._strategy_lookup(
deferred=True, instrument=True)
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
dict_ = state.dict
if self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
#assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
def __clause_element__(self):
return expression.ClauseList(group=False, *self._comparable_elements)
__hash__ = None
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add this "
"property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
"""Denote an attribute name as a synonym to a mapped property.
.. versionchanged:: 0.7
:func:`.synonym` is superseded by the :mod:`~sqlalchemy.ext.hybrid`
extension. See the documentation for hybrids
at :ref:`hybrids_toplevel`.
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`::
class MyClass(object):
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = value
status = property(_get_status, _set_status)
mapper(MyClass, sometable, properties={
"status":synonym("_status", map_column=True)
})
Above, the ``status`` attribute of MyClass will produce
expression behavior against the table column named ``status``,
using the Python attribute ``_status`` on the mapped class
to represent the underlying value.
:param name: the name of the existing mapped property, which can be
any other ``MapperProperty`` including column-based properties and
relationships.
:param map_column: if ``True``, an additional ``ColumnProperty`` is created
on the mapper automatically, using the synonym's name as the keyname of
the property, and the keyname of this ``synonym()`` as the name of the
column to map.
"""
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
return getattr(self.parent.class_, self.name).property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(self, comparator_factory, descriptor=None, doc=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
"""
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
|
|
from mongoengine import *
import datetime
import urllib
import simplejson as json
import urllib.request as request
from time import mktime
from django.contrib.auth.models import User
from reader.learn import predict_articles
class Source(Document):
title = StringField(required=True)
alias = StringField(required=True, unique=True)
description = StringField()
def __str__(self):
return self.title
def update_articles(self):
pass
meta = {'allow_inheritance': True}
class Reader(Document):
user_id = IntField(required=True, unique=True)
brain = BinaryField()
subscriptions = ListField(ReferenceField(Source,
reverse_delete_rule=NULLIFY))
preferred_article_view = StringField(choices=('frame', 'article'),
default='frame')
reading_list = ListField(DictField())
def subscribe(self, source):
self.update(add_to_set__subscriptions=[source])
def unsubscribe(self, source):
self.update(pull_all__subscriptions=[source])
def is_subscribed(self, source):
return source in self.subscriptions
def extend_reading_list(self):
if len(self.reading_list) > 10:
return
if len(ReadRecord.objects(reader=self)) == 0:
self.update(add_to_set__reading_list=[{"score": 0, "article_id": x.id} for x in Article.objects[:5]])
return
predicted = predict_articles(self, Article.objects.filter(
source__in=self.subscriptions,
id__nin=[x.article.id for x in ReadRecord.objects(reader=self)] + [x['article_id'] for x in self.reading_list]))
self.update(add_to_set__reading_list=[{"score": x[1], "article_id": x[0].id} for x in predicted])
@staticmethod
def reader_for(user):
try:
return Reader.objects.get(user_id=user.id)
except Reader.DoesNotExist:
reader = Reader(user_id=user.id)
reader.save()
try:
hackernews = Source.objects.get(alias="hackernews")
except Source.DoesNotExist:
hackernews = HackerNewsSource(alias="hackernews", title="Hacker News")
hackernews.save()
reader.subscribe(hackernews)
reader.extend_reading_list()
return reader
class Article(Document):
title = StringField(required=True)
content_text = StringField(required=True)
content_html = StringField(required=True)
url = StringField(required=True, unique=True)
date_published = DateTimeField(required=True,
default=datetime.datetime.now)
is_framing_allowed = BooleanField(default=True, required=True)
source = ReferenceField(Source, required=True, reverse_delete_rule=CASCADE)
meta = {'allow_inheritance': True}
def update_content_by_url(self):
from boilerpipe.extract import Extractor
extractor = Extractor(extractor='ArticleExtractor', url=self.url)
self.content_html = extractor.getHTML()
self.content_text = extractor.getText()
def check_framing_allowed(self):
from django.conf import settings
request = urllib.request.Request(self.url)
request.add_header("Referer", settings.BASE_URL)
try:
opener = urllib.request.urlopen(request)
except Exception:
self.is_framing_allowed = False
return
framing_allowed = not ('x-frame-options' in opener.headers)
self.is_framing_allowed = framing_allowed
def __str__(self):
return (self.title + " [" + self.url + "]")
@staticmethod
def get_or_new(**kwargs):
try:
return Article.objects.get(**kwargs), False
except Article.DoesNotExist:
return Article(**kwargs), True
# Additional Documents for Reader
class ReadRecord(Document):
reader = ReferenceField(Reader, required=True, unique_with="article")
article = ReferenceField(Article, required=True, unique_with="reader")
date_read = ListField(DateTimeField(default=datetime.datetime.now))
is_liked = BooleanField(default=False, required=True)
is_learned = BooleanField(default=False, required=True)
# Additional Documents for Articles
class HackerNewsArticle(Article):
hackernews_id = StringField(required=True)
hackernews_type = StringField(required=True)
hackernews_score = IntField(required=True)
hackernews_submitter = StringField(required=True)
@staticmethod
def get_or_new(**kwargs):
try:
return HackerNewsArticle.objects.get(**kwargs), False
except HackerNewsArticle.DoesNotExist:
return HackerNewsArticle(**kwargs), True
# Additional Documents for Source
class HackerNewsSource(Source):
def fetch_top_story_ids(self):
top_stories_url = "https://hacker-news.firebaseio.com/v0/topstories.json"
try:
story_ids_raw = request.urlopen(top_stories_url)
return json.loads(story_ids_raw.readlines()[0])
except Exception:
return []
def fetch_story(self, story_id):
story_url = "https://hacker-news.firebaseio.com/v0/item/{}.json".format(story_id)
try:
story_raw = request.urlopen(story_url)
s = json.loads(story_raw.readlines()[0])
return s if s['title'] and s['url'] and s['time'] and s['id'] and s['score'] and s['by'] and s['type'] else None
except Exception:
return None
def update_articles(self):
for story_id in self.fetch_top_story_ids():
story = self.fetch_story(story_id)
if not story:
continue
article, newed = HackerNewsArticle.get_or_new(url=story['url'])
article.title = story['title']
article.date_published = datetime.datetime.fromtimestamp(story['time'])
article.source = self
article.hackernews_id = str(story['id'])
article.hackernews_score = story['score']
article.hackernews_submitter = story['by']
article.hackernews_type = story['type']
try:
article.update_content_by_url()
except Exception:
continue
article.check_framing_allowed()
article.save()
class RSSSource(Source):
url = StringField(required=True)
categories = ListField(StringField())
def update_articles(self):
import feedparser
try:
rss = feedparser.parse(self.url)
except Exception:
return
self.title = rss.feed.title
if hasattr(rss.feed, 'description'):
self.description = rss.feed.description
self.save()
for entry in rss.entries:
article, newed = Article.get_or_new(url=entry.link)
article.title = entry.title
if hasattr(entry, 'published_parsed'):
article.date_published = datetime.datetime.fromtimestamp(mktime(entry.published_parsed))
article.source = self
try:
article.update_content_by_url()
except Exception:
continue
article.check_framing_allowed()
article.save()
|
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', '[email protected]'), ('Full Name', '[email protected]'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinean Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be togheter, when spliting them by THOUSAND_SEPARATOR
# 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'locmem://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Name and domain for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
############
# MESSAGES #
############
# Class to use as messges backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.user_messages.LegacyFallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
|
|
'''
Build a simple neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
from scipy import optimize, stats
from collections import OrderedDict
from sklearn.cross_validation import KFold
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s'%(pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None, n_words_src=30000, n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen != None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x,seqs_y)):
x[:lengths_x[idx],idx] = s_x
x_mask[:lengths_x[idx]+1,idx] = 1.
y[:lengths_y[idx],idx] = s_y
y_mask[:lengths_y[idx]+1,idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _step_slice(m_, x_, xx_, xc_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += x_
preact += tensor.dot(ctx_, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
non_sequences=[pctx_,
context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params, prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = (ctx * x_mask[:,:,None]).sum(0) / x_mask.sum(0)[:,None]
#ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target)
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
proj_h = proj[0]
ctxs = proj[1]
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0],y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options, prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options, prefix='encoder_r')
ctx = concatenate([proj[0],projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = ctx.mean(0)
#ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
next_state = proj[0]
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x,x_mask,y,y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
import ipdb; ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed'%(n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad'%k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2'%k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2'%k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
diag_c=0.,
clip_c=-1.,
lrate=0.01,
n_words_src=100000,
n_words=100000,
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
datasets=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
valid_datasets=['../data/dev/newstest2011.en.tok', '../data/dev/newstest2011.fr.tok'],
dictionaries=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
use_dropout=False,
reload_=False):
# Model options
model_options = locals().copy()
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:,None]-
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
print 'Building f_grad...',
f_grad = theano.function(inps, grads, profile=profile)
print 'Done'
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = list(numpy.load(saveto)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, x_mask, y, y_mask)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
#import ipdb; ipdb.set_trace()
if best_p != None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5,x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next, x[:,jj][:,None],
model_options, trng=trng, k=1, maxlen=30,
stochastic=stochastic, argmax=False)
print 'Source ',jj,': ',
for vv in x[:,jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ',jj,' : ',
for vv in y[:,jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data, model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
import ipdb; ipdb.set_trace()
print 'Valid ', valid_err
print 'Seen %d samples'%n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
**params)
return valid_err
if __name__ == '__main__':
pass
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
pad_axis = 2 if self.data_format == 'channels_last' else 3
inputs = array_ops.expand_dims(inputs, pad_axis)
outputs = self.pool_function(
inputs,
self.pool_size + (1,),
strides=self.strides + (1,),
padding=self.padding,
data_format=self.data_format)
return array_ops.squeeze(outputs, pad_axis)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], features, length])
else:
return tensor_shape.TensorShape([input_shape[0], length, features])
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
}
base_config = super(Pooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')
class MaxPooling1D(Pooling1D):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(MaxPooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='max'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
@keras_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D')
class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(AveragePooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='avg'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class Pooling2D(Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D')
class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D')
class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: Tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D')
class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers."""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling1D',
'keras.layers.GlobalAvgPool1D')
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
a given step should be masked (excluded from the average).
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape `(batch_size, features)`.
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalAveragePooling1D, self).__init__(data_format=data_format,
**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, backend.floatx())
input_shape = inputs.shape.as_list()
broadcast_shape = [-1, input_shape[steps_axis], 1]
mask = array_ops.reshape(mask, broadcast_shape)
inputs *= mask
return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum(
mask, axis=steps_axis)
else:
return backend.mean(inputs, axis=steps_axis)
def compute_mask(self, inputs, mask=None):
return None
@keras_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D')
class GlobalMaxPooling1D(GlobalPooling1D):
"""Global max pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape `(batch_size, features)`.
"""
def call(self, inputs):
steps_axis = 1 if self.data_format == 'channels_last' else 2
return backend.max(inputs, axis=steps_axis)
class GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling2D',
'keras.layers.GlobalAvgPool2D')
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2])
else:
return backend.mean(inputs, axis=[2, 3])
@keras_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D')
class GlobalMaxPooling2D(GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2])
else:
return backend.max(inputs, axis=[2, 3])
class GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers."""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling3D',
'keras.layers.GlobalAvgPool3D')
class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3])
else:
return backend.mean(inputs, axis=[2, 3, 4])
@keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape `(batch_size, channels)`.
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3])
else:
return backend.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
|
|
"""
fs.mountfs
==========
Contains MountFS class which is a virtual filesystem which can have other filesystems linked as branched directories.
For example, lets say we have two filesystems containing config files and resources respectively::
[config_fs]
|-- config.cfg
`-- defaults.cfg
[resources_fs]
|-- images
| |-- logo.jpg
| `-- photo.jpg
`-- data.dat
We can combine these filesystems in to a single filesystem with the following code::
from fs.mountfs import MountFS
combined_fs = MountFS
combined_fs.mountdir('config', config_fs)
combined_fs.mountdir('resources', resources_fs)
This will create a single filesystem where paths under `config` map to `config_fs`, and paths under `resources` map to `resources_fs`::
[combined_fs]
|-- config
| |-- config.cfg
| `-- defaults.cfg
`-- resources
|-- images
| |-- logo.jpg
| `-- photo.jpg
`-- data.dat
Now both filesystems can be accessed with the same path structure::
print combined_fs.getcontents('/config/defaults.cfg')
read_jpg(combined_fs.open('/resources/images/logo.jpg')
"""
from fs.base import *
from fs.errors import *
from fs.path import *
from fs import _thread_synchronize_default
class DirMount(object):
def __init__(self, path, fs):
self.path = path
self.fs = fs
def __str__(self):
return "Mount point: <%s,%s>" % (self.path,self.fs,)
__repr__ = __str__
def __unicode__(self):
return unicode(str(self))
class FileMount(object):
def __init__(self, path, open_callable, info_callable=None):
self.open_callable = open_callable
def no_info_callable(path):
return {}
self.info_callable = info_callable or no_info_callable
class MountFS(FS):
"""A filesystem that delegates to other filesystems."""
_meta = { 'virtual': True,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
}
DirMount = DirMount
FileMount = FileMount
def __init__(self, thread_synchronize=_thread_synchronize_default):
super(MountFS, self).__init__(thread_synchronize=thread_synchronize)
self.mount_tree = PathMap()
def __str__(self):
return "<%s [%s]>" % (self.__class__.__name__,self.mount_tree.items(),)
__repr__ = __str__
def __unicode__(self):
return unicode(self.__str__())
def _delegate(self, path):
path = abspath(normpath(path))
object = None
head_path = "/"
tail_path = path
for prefix in recursepath(path):
try:
object = self.mount_tree[prefix]
except KeyError:
pass
else:
head_path = prefix
tail_path = path[len(head_path):]
if type(object) is MountFS.DirMount:
return object.fs, head_path, tail_path
if type(object) is MountFS.FileMount:
return self, "/", path
try:
self.mount_tree.iternames(path).next()
except StopIteration:
return None, None, None
else:
return self, "/", path
def getsyspath(self, path, allow_none=False):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
if allow_none:
return None
else:
raise NoSysPathError(path=path)
return fs.getsyspath(delegate_path, allow_none=allow_none)
def getpathurl(self, path, allow_none=False):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
if allow_none:
return None
else:
raise NoPathURLError(path=path)
return fs.getpathurl(delegate_path, allow_none=allow_none)
@synchronize
def desc(self, path):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self:
if fs.isdir(path):
return "Mount dir"
else:
return "Mounted file"
return "Mounted dir, maps to path %s on %s" % (delegate_path, str(fs))
@synchronize
def isdir(self, path):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
object = self.mount_tree.get(path, None)
return not isinstance(object,MountFS.FileMount)
return fs.isdir(delegate_path)
@synchronize
def isfile(self, path):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
object = self.mount_tree.get(path, None)
return isinstance(object,MountFS.FileMount)
return fs.isfile(delegate_path)
@synchronize
def exists(self, path):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
return False
if fs is self:
return True
return fs.exists(delegate_path)
@synchronize
def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
paths = self.mount_tree.names(path)
return self._listdir_helper(path,
paths,
wildcard,
full,
absolute,
dirs_only,
files_only)
else:
paths = fs.listdir(delegate_path,
wildcard=wildcard,
full=False,
absolute=False,
dirs_only=dirs_only,
files_only=files_only)
for nm in self.mount_tree.names(path):
if nm not in paths:
if dirs_only:
if self.isdir(pathjoin(path,nm)):
paths.append(nm)
elif files_only:
if self.isfile(pathjoin(path,nm)):
paths.append(nm)
else:
paths.append(nm)
if full or absolute:
if full:
path = relpath(normpath(path))
else:
path = abspath(normpath(path))
paths = [pathjoin(path, p) for p in paths]
return paths
@synchronize
def ilistdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
paths = self.mount_tree.names(path)
for path in self._listdir_helper(path,paths,wildcard,full,absolute,dirs_only,files_only):
yield path
else:
paths = fs.ilistdir(delegate_path,
wildcard=wildcard,
full=False,
absolute=False,
dirs_only=dirs_only)
extra_paths = set(self.mount_tree.names(path))
if full:
pathhead = relpath(normpath(path))
def mkpath(p):
return pathjoin(pathhead,p)
elif absolute:
pathhead = abspath(normpath(path))
def mkpath(p):
return pathjoin(pathhead,p)
else:
def mkpath(p):
return p
for p in paths:
if p not in extra_paths:
yield mkpath(p)
for p in extra_paths:
if dirs_only:
if self.isdir(pathjoin(path,p)):
yield mkpath(p)
elif files_only:
if self.isfile(pathjoin(path,p)):
yield mkpath(p)
else:
yield mkpath(p)
@synchronize
def makedir(self, path, recursive=False, allow_recreate=False):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" )
if not delegate_path:
if allow_recreate:
return
else:
raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate)
@synchronize
def open(self, path, mode="r", **kwargs):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
callable = object.open_callable
return callable(path, mode, **kwargs)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ResourceNotFoundError(path)
return fs.open(delegate_path, mode, **kwargs)
@synchronize
def setcontents(self, path, data, chunk_size=64*1024):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
return super(MountFS,self).setcontents(path, data, chunk_size=chunk_size)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ParentDirectoryMissingError(path)
return fs.setcontents(delegate_path, data, chunk_size)
@synchronize
def createfile(self, path, wipe=False):
object = self.mount_tree.get(path, None)
if type(object) is MountFS.FileMount:
return super(MountFS,self).createfile(path, wipe=wipe)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ParentDirectoryMissingError(path)
return fs.createfile(delegate_path, wipe=wipe)
@synchronize
def remove(self, path):
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise UnsupportedError("remove file", msg="Can only remove paths within a mounted dir")
return fs.remove(delegate_path)
@synchronize
def removedir(self, path, recursive=False, force=False):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is self or fs is None:
raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path")
return fs.removedir(delegate_path, recursive, force)
@synchronize
def rename(self, src, dst):
fs1, _mount_path1, delegate_path1 = self._delegate(src)
fs2, _mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is not fs2:
raise OperationFailedError("rename resource", path=src)
if fs1 is not self:
return fs1.rename(delegate_path1, delegate_path2)
object = self.mount_tree.get(src, None)
_object2 = self.mount_tree.get(dst, None)
if object is None:
raise ResourceNotFoundError(src)
# TODO!
raise UnsupportedError("rename resource", path=src)
@synchronize
def move(self,src,dst,**kwds):
fs1, _mount_path1, delegate_path1 = self._delegate(src)
fs2, _mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.move(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).move(src,dst,**kwds)
@synchronize
def movedir(self,src,dst,**kwds):
fs1, _mount_path1, delegate_path1 = self._delegate(src)
fs2, _mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.movedir(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).movedir(src,dst,**kwds)
@synchronize
def copy(self,src,dst,**kwds):
fs1, _mount_path1, delegate_path1 = self._delegate(src)
fs2, _mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.copy(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).copy(src,dst,**kwds)
@synchronize
def copydir(self,src,dst,**kwds):
fs1, _mount_path1, delegate_path1 = self._delegate(src)
fs2, _mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is fs2 and fs1 is not self:
fs1.copydir(delegate_path1,delegate_path2,**kwds)
else:
super(MountFS,self).copydir(src,dst,**kwds)
@synchronize
def mountdir(self, path, fs):
"""Mounts a host FS object on a given path.
:param path: A path within the MountFS
:param fs: A filesystem object to mount
"""
self.mount_tree[path] = MountFS.DirMount(path, fs)
mount = mountdir
@synchronize
def mountfile(self, path, open_callable=None, info_callable=None):
"""Mounts a single file path.
:param path: A path within the MountFS
:param open_callable: A callable that returns a file-like object
:param info_callable: A callable that returns a dictionary with information regarding the file-like object
"""
self.mount_tree[path] = MountFS.FileMount(path, callable, info_callable)
@synchronize
def unmount(self, path):
"""Unmounts a path.
:param path: Path to unmount
"""
del self.mount_tree[path]
@synchronize
def settimes(self, path, accessed_time=None, modified_time=None):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
raise UnsupportedError("settimes")
fs.settimes(delegate_path, accessed_time, modified_time)
@synchronize
def getinfo(self, path):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
if self.isfile(path):
return self.mount_tree[path].info_callable(path)
return {}
return fs.getinfo(delegate_path)
@synchronize
def getsize(self, path):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
object = self.mount_tree.get(path, None)
if object is None:
raise ResourceNotFoundError(path)
if not isinstance(object,MountFS.FileMount):
raise ResourceInvalidError(path)
size = object.info_callable(path).get("size", None)
return size
return fs.getinfo(delegate_path).get("size", None)
@synchronize
def getxattr(self,path,name,default=None):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return default
return fs.getxattr(delegate_path,name,default)
@synchronize
def setxattr(self,path,name,value):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
raise UnsupportedError("setxattr")
return fs.setxattr(delegate_path,name,value)
@synchronize
def delxattr(self,path,name):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return True
return fs.delxattr(delegate_path, name)
@synchronize
def listxattrs(self,path):
path = normpath(path)
fs, _mount_path, delegate_path = self._delegate(path)
if fs is None:
raise ResourceNotFoundError(path)
if fs is self:
return []
return fs.listxattrs(delegate_path)
|
|
#!/usr/bin/env python
# Solution to http://www.gchq.gov.uk/press_and_media/news_and_features/Pages/Directors-Christmas-puzzle-2015.aspx
from z3 import *
from PIL import Image
size=25
rules = [
[7,3,1,1,7],
[1,1,2,2,1,1],
[1,3,1,3,1,1,3,1],
[1,3,1,1,6,1,3,1],
[1,3,1,5,2,1,3,1],
[1,1,2,1,1],
[7,1,1,1,1,1,7],
[3,3],
[1,2,3,1,1,3,1,1,2],
[1,1,3,2,1,1],
[4,1,4,2,1,2],
[1,1,1,1,1,4,1,3],
[2,1,1,1,2,5],
[3,2,2,6,3,1],
[1,9,1,1,2,1],
[2,1,2,2,3,1],
[3,1,1,1,1,5,1],
[1,2,2,5],
[7,1,2,1,1,1,3],
[1,1,2,1,2,2,1],
[1,3,1,4,5,1],
[1,3,1,3,10,2],
[1,3,1,1,6,6],
[1,1,2,1,1,2],
[7,2,1,2,5]
]
rules2 = [
[7,2,1,1,7],
[1,1,2,2,1,1],
[1,3,1,3,1,3,1,3,1],
[1,3,1,1,5,1,3,1],
[1,3,1,1,4,1,3,1],
[1,1,1,2,1,1],
[7,1,1,1,1,1,7],
[1,1,3],
[2,1,2,1,8,2,1],
[2,2,1,2,1,1,1,2],
[1,7,3,2,1],
[1,2,3,1,1,1,1,1],
[4,1,1,2,6],
[3,3,1,1,1,3,1],
[1,2,5,2,2],
[2,2,1,1,1,1,1,2,1],
[1,3,3,2,1,8,1],
[6,2,1],
[7,1,4,1,1,3],
[1,1,1,1,4],
[1,3,1,3,7,1],
[1,3,1,1,1,2,1,1,4],
[1,3,1,4,3,3],
[1,1,2,2,2,6,1],
[7,1,3,2,1,1]
]
black = [
(3,3),(3,4), (3,12),(3,13), (3,21),
(8,6),(8,7), (8,10), (8,14),(8,15), (8,18),
(16,6),(16,11),(16,16),(16,20),
(21,3),(21,4),(21,9),(21,10),(21,15),(21,20),(21,21)
]
# Print solution
def solve(s, dots):
if s.check() == sat:
m = s.model()
im = Image.new('L', (size+2,size+2), 255)
for y in range(size):
row=[]
for x in range(size):
pixel = m.evaluate(dots[y][x]).as_long()
if pixel == 0:
im.putpixel((x+1,y+1), 255)
else:
im.putpixel((x+1,y+1), 0)
row.append(str(pixel))
print(''.join(row))
im=im.resize((16*(size+2),16*(size+2)))
im.show()
im.save('result.png')
else:
print('Fail')
# Create rulesets
rule_dots_cover=[]
rule_partials=[]
rule_dot_vals=[]
rule_partials_vals=[]
rule_spacer_vals=[]
# Create pixels
dots = []
for y in range(size):
dots.append([])
for x in range(size):
dots[-1].append(Int('dot_%d_%d' % (y,x)))
rule_dot_vals.append(Or(dots[-1][-1] == 0, dots[-1][-1] == 1))
# Force blacks
for y,x in black:
rule_dot_vals.append(dots[y][x] == 1)
# Parse vertical rules
spacers2 = []
partials2 = []
for x in range(len(rules2)):
col = rules2[x]
# Cumalative size
partials2.append([Int('part2_%d_y0' % (x))])
spacers2.append([])
rule_partials_vals.append(partials2[-1][-1] == 0)
for y in range(len(col)+1):
# Spacer sizes
spacers2[-1].append(Int('space2_%d_%d' % (y,x)))
# Edges can be xero size
if y > 0 and y < len(col):
rule_spacer_vals.append(spacers2[-1][-1] >= 1)
else:
rule_spacer_vals.append(spacers2[-1][-1] >= 0)
# Partial size of last space
partials2[-1].append(Int('part2_space_%d_%d' % (y,x)))
rule_partials_vals.append(partials2[-1][-1] >= 0)
rule_partials.append(partials2[-1][-2] + spacers2[-1][-1] == partials2[-1][-1])
# Add white constraint
for y2 in range(size):
rule_dots_cover.append(If(And(partials2[-1][-2] <= y2, y2 < partials2[-1][-1]), dots[y2][x]==0, dots[y2][x]>=0))
# Block sizes
if y < len(col):
# Partial size of last block
partials2[-1].append(Int('part2_block_%d_%d' % (y,x)))
rule_partials_vals.append(partials2[-1][-1] >= 0)
rule_partials.append(partials2[-1][-2] + col[y] == partials2[-1][-1])
# Add black constraint
for y2 in range(size):
rule_dots_cover.append(If(And(partials2[-1][-2] <= y2, y2 < partials2[-1][-1]), dots[y2][x]==1, dots[y2][x]>=0))
# Add up to col height
rule_partials.append(partials2[-1][-1] == size)
# Parse horizintal rules
spacers = []
partials = []
for y in range(len(rules)):
row = rules[y]
# Cumalative size
partials.append([Int('part_%d_x0' % (y))])
spacers.append([])
rule_partials_vals.append(partials[-1][-1] == 0)
for x in range(len(row)+1):
# Spacer sizes
spacers[-1].append(Int('space_%d_%d' % (y,x)))
# Edges can be zero size
if x > 0 and x < len(row):
rule_spacer_vals.append(spacers[-1][-1] >= 1)
else:
rule_spacer_vals.append(spacers[-1][-1] >= 0)
# Partial size of last space
partials[-1].append(Int('part_space_%d_%d' % (y,x)))
rule_partials_vals.append(partials[-1][-1] >= 0)
rule_partials.append(partials[-1][-2] + spacers[-1][-1] == partials[-1][-1])
# Add white constraint
for x2 in range(size):
rule_dots_cover.append(If(And(partials[-1][-2] <= x2, x2 < partials[-1][-1]), dots[y][x2]==0, dots[y][x2]>=0))
# Block sizes
if x < len(row):
# Partial size of last block
partials[-1].append(Int('part_block_%d_%d' % (y,x)))
rule_partials_vals.append(partials[-1][-1] >= 0)
rule_partials.append(partials[-1][-2] + row[x] == partials[-1][-1])
# Add black constraint
for x2 in range(size):
rule_dots_cover.append(If(And(partials[-1][-2] <= x2, x2 < partials[-1][-1]), dots[y][x2]==1, dots[y][x2]>=0))
# Add up to row width
rule_partials.append(partials[-1][-1] == size)
# Add rulesets to solver
s = Solver()
s.add(rule_spacer_vals)
s.add(rule_partials_vals)
s.add(rule_dot_vals)
s.add(rule_partials)
s.add(rule_dots_cover)
# Show solution
print('Solving...')
solve(s, dots)
|
|
import pytest
from pytest import approx
import os
import pandas as pd
import numpy as np
import calliope
import calliope.exceptions as exceptions
from calliope.core.attrdict import AttrDict
from calliope.preprocess import time
from calliope.test.common.util import build_test_model as build_model
from calliope.test.common.util import defaults, check_error_or_warning
class TestModelRun:
def test_model_from_dict(self):
"""
Test creating a model from dict/AttrDict instead of from YAML
"""
this_path = os.path.dirname(__file__)
model_location = os.path.join(this_path, "common", "test_model", "model.yaml")
model_dict = AttrDict.from_yaml(model_location)
node_dict = AttrDict(
{
"nodes": {
"a": {"techs": {"test_supply_elec": {}, "test_demand_elec": {}}},
"b": {"techs": {"test_supply_elec": {}, "test_demand_elec": {}}},
}
}
)
model_dict.union(node_dict)
model_dict.model["timeseries_data_path"] = os.path.join(
this_path, "common", "test_model", model_dict.model["timeseries_data_path"]
)
# test as AttrDict
calliope.Model(model_dict)
# test as dict
calliope.Model(model_dict.as_dict())
@pytest.mark.filterwarnings(
"ignore:(?s).*Not building the link a,b:calliope.exceptions.ModelWarning"
)
def test_valid_scenarios(self):
"""
Test that valid scenario definition from overrides raises no error and results in applied scenario.
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: ['one', 'two']
overrides:
one:
techs.test_supply_gas.constraints.energy_cap_max: 20
two:
techs.test_supply_elec.constraints.energy_cap_max: 20
nodes:
a:
techs:
test_supply_gas:
test_supply_elec:
test_demand_elec:
"""
)
model = build_model(override_dict=override, scenario="scenario_1")
assert (
model._model_run.nodes["a"].techs.test_supply_gas.constraints.energy_cap_max
== 20
)
assert (
model._model_run.nodes[
"a"
].techs.test_supply_elec.constraints.energy_cap_max
== 20
)
@pytest.mark.filterwarnings(
"ignore:(?s).*Not building the link 0,1:calliope.exceptions.ModelWarning"
)
def test_valid_scenario_of_scenarios(self):
"""
Test that valid scenario definition which groups scenarios and overrides raises
no error and results in applied scenario.
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: ['one', 'two']
scenario_2: ['scenario_1', 'new_location']
overrides:
one:
techs.test_supply_gas.constraints.energy_cap_max: 20
two:
techs.test_supply_elec.constraints.energy_cap_max: 20
new_location:
nodes.b.techs:
test_supply_elec:
nodes:
a:
techs:
test_supply_gas:
test_supply_elec:
test_demand_elec:
"""
)
model = build_model(override_dict=override, scenario="scenario_2")
assert (
model._model_run.nodes[
"a"
].techs.test_supply_gas.constraints.energy_cap_max
== 20
)
assert (
model._model_run.nodes[
"b"
].techs.test_supply_elec.constraints.energy_cap_max
== 20
)
def test_invalid_scenarios_dict(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1:
techs.foo.bar: 1
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario="scenario_1")
assert check_error_or_warning(
error,
"Scenario definition must be a list of override or other scenario names.",
)
def test_invalid_scenarios_str(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: 'foo'
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario="scenario_1")
assert check_error_or_warning(
error,
"Scenario definition must be a list of override or other scenario names.",
)
def test_scenario_name_overlaps_overrides(self):
"""
Test that a scenario name which is a list of possibly overrides is not parsed as overrides.
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
'simple_supply,one_day': ['simple_supply', 'one_day']
"""
)
with pytest.warns(exceptions.ModelWarning) as warn_info:
build_model(
override_dict=override,
scenario="simple_supply,one_day",
)
assert check_error_or_warning(
warn_info,
"Scenario name `simple_supply,one_day` includes commas that won't be parsed as a list of overrides",
)
def test_undefined_carriers(self):
"""
Test that user has input either carrier or carrier_in/_out for each tech
"""
override = AttrDict.from_yaml_string(
"""
techs:
test_undefined_carrier:
essentials:
parent: supply
name: test
constraints:
resource: .inf
energy_cap_max: .inf
nodes.1.techs.test_undefined_carrier:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_conversion_plus_primary_carriers(self):
"""
Test that user has input input/output primary carriers for conversion_plus techs
"""
override1 = {
"techs.test_conversion_plus.essentials.carrier_in": ["gas", "coal"]
}
override2 = {"techs.test_conversion_plus.essentials.primary_carrier_in": "coal"}
override3 = {
"techs.test_conversion_plus.essentials.primary_carrier_out": "coal"
}
model = build_model({}, scenario="simple_conversion_plus,two_hours")
assert (
model._model_run.techs.test_conversion_plus.essentials.get_key(
"primary_carrier_in", None
)
== "gas"
)
# should fail: multiple carriers in, but no primary_carrier_in assigned
with pytest.raises(exceptions.ModelError) as error:
build_model(override1, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_in must be assigned")
# should fail: primary_carrier_in not one of the carriers_in
with pytest.raises(exceptions.ModelError) as error:
build_model(override2, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_in `coal` not one")
# should fail: primary_carrier_out not one of the carriers_out
with pytest.raises(exceptions.ModelError) as error:
build_model(override3, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_out `coal` not one")
def test_incorrect_subset_time(self):
"""
If subset_time is a list, it must have two entries (start_time, end_time)
If subset_time is not a list, it should successfully subset on the given
string/integer
"""
override = lambda param: AttrDict.from_yaml_string(
"model.subset_time: {}".format(param)
)
# should fail: one string in list
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override(["2005-01"]), scenario="simple_supply")
# should fail: three strings in list
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override(["2005-01-01", "2005-01-02", "2005-01-03"]),
scenario="simple_supply",
)
# should pass: two string in list as slice
model = build_model(
override_dict=override(["2005-01-01", "2005-01-07"]),
scenario="simple_supply",
)
assert all(
model.inputs.timesteps.to_index()
== pd.date_range("2005-01", "2005-01-07 23:00:00", freq="H")
)
# should fail: must be a list, not a string
with pytest.raises(exceptions.ModelError):
model = build_model(
override_dict=override("2005-01"), scenario="simple_supply"
)
# should fail: time subset out of range of input data
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=override(["2005-03", "2005-04"]), scenario="simple_supply"
)
assert check_error_or_warning(
error,
"subset time range ['2005-03', '2005-04'] is outside the input data time range [2005-01-01, 2005-02-01]",
)
# should fail: time subset out of range of input data
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override(["2005-02-01", "2005-02-05"]),
scenario="simple_supply",
)
def test_incorrect_date_format(self):
"""
Test the date parser catches a different date format from file than
user input/default (inc. if it is just one line of a file that is incorrect)
"""
# should pass: changing datetime format from default
override1 = {
"model.timeseries_dateformat": "%d/%m/%Y %H:%M:%S",
"techs.test_demand_heat.constraints.resource": "file=demand_heat_diff_dateformat.csv",
"techs.test_demand_elec.constraints.resource": "file=demand_heat_diff_dateformat.csv",
}
model = build_model(override_dict=override1, scenario="simple_conversion")
assert all(
model.inputs.timesteps.to_index()
== pd.date_range("2005-01", "2005-02-01 23:00:00", freq="H")
)
# should fail: wrong dateformat input for one file
override2 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_diff_dateformat.csv"
}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario="simple_conversion")
# should fail: wrong dateformat input for all files
override3 = {"model.timeseries_dateformat": "%d/%m/%Y %H:%M:%S"}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override3, scenario="simple_supply")
# should fail: one value wrong in file
override4 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_wrong_dateformat.csv"
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override4, scenario="simple_conversion")
def test_inconsistent_time_indeces(self):
"""
Test that, including after any time subsetting, the indeces of all time
varying input data are consistent with each other
"""
# should fail: wrong length of demand_heat csv vs demand_elec
override1 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_wrong_length.csv"
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_conversion")
# should pass: wrong length of demand_heat csv, but time subsetting removes the difference
build_model(override_dict=override1, scenario="simple_conversion,one_day")
def test_single_timestep(self):
"""
Test that warning is raised on using 1 timestep, that timestep resolution will
be inferred to be 1 hour
"""
override1 = {
"model.subset_time": ["2005-01-01 00:00:00", "2005-01-01 00:00:00"]
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.warns(exceptions.ModelWarning) as warn_info:
model = build_model(override_dict=override1, scenario="simple_supply")
assert check_error_or_warning(
warn_info,
"Only one timestep defined. Inferring timestep resolution to be 1 hour",
)
assert model.inputs.timestep_resolution == [1]
def test_empty_key_on_explode(self):
"""
On exploding nodes (from ``'1--3'`` or ``'1,2,3'`` to
``['1', '2', '3']``), raise error on the resulting list being empty
"""
list1 = calliope.preprocess.nodes.explode_nodes("1--3")
list2 = calliope.preprocess.nodes.explode_nodes("1,2,3")
assert list1 == list2 == ["1", "2", "3"]
def test_key_clash_on_set_loc_key(self):
"""
Raise error on attempted overwrite of information regarding a recently
exploded location
"""
override = {
"nodes.a.techs.test_supply_elec.constraints.resource": 10,
"nodes.a,b.techs.test_supply_elec.constraints.resource": 15,
}
with pytest.raises(KeyError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_calculate_depreciation(self):
"""
Technologies which define investment costs *must* define lifetime and
interest rate, so that a depreciation rate can be calculated.
If lifetime == inf and interested > 0, depreciation rate will be inf, so
we want to avoid that too.
"""
override1 = {"techs.test_supply_elec.costs.monetary.energy_cap": 10}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override2 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override3 = {
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override3, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override4 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.interest_rate": 0,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario="simple_supply,one_day")
assert check_error_or_warning(excinfo, "`monetary` interest rate of zero")
override5 = {
"techs.test_supply_elec.constraints.lifetime": np.inf,
"techs.test_supply_elec.costs.monetary.interest_rate": 0,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override5, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"No investment monetary cost will be incurred for `test_supply_elec`",
)
override6 = {
"techs.test_supply_elec.constraints.lifetime": np.inf,
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override6, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"No investment monetary cost will be incurred for `test_supply_elec`",
)
override7 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
build_model(override_dict=override7, scenario="simple_supply,one_day")
def test_delete_interest_rate(self):
"""
If only 'interest_rate' is given in the cost class for a technology, we
should be able to handle deleting it without leaving an empty cost key.
"""
override1 = {"techs.test_supply_elec.costs.monetary.interest_rate": 0.1}
m = build_model(override_dict=override1, scenario="simple_supply,one_day")
assert "loc_techs_cost" not in m._model_data.dims
def test_empty_cost_class(self):
"""
If cost is defined, but its value is not a dictionary, ensure it is
deleted
"""
override1 = {"techs.test_supply_elec.costs.carbon": None}
with pytest.warns(exceptions.ModelWarning) as warn_info:
m = build_model(
override_dict=override1,
scenario="simple_supply,one_day,investment_costs",
)
assert check_error_or_warning(
warn_info,
"Deleting empty cost class `carbon` for technology `test_supply_elec` at `a`.",
)
assert (
"carbon" not in m._model_run.nodes["b"].techs.test_supply_elec.costs.keys()
)
assert "carbon" not in m._model_data.coords["costs"].values
def test_strip_link(self):
override = {
"links.a, c.techs": {"test_transmission_elec": None},
"nodes.c.techs": {"test_supply_elec": None},
}
m = build_model(override_dict=override, scenario="simple_supply,one_day")
assert "c" in m._model_run.nodes["a"].links.keys()
def test_dataframes_passed(self):
"""
If model config specifies dataframes to be loaded in (via df=...),
these time series must be passed as arguments in calliope.Model(...).
"""
override = {"techs.test_demand_elec.constraints.resource": "df=demand_elec"}
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=None,
)
assert check_error_or_warning(
error, "no timeseries passed " "as arguments in calliope.Model(...)."
)
def test_dataframe_keys(self):
"""
Any timeseries specified via df=... must correspond to a key in
timeseries_dataframes. An error should be thrown.
"""
override = {"techs.test_demand_elec.constraints.resource": "df=key_1"}
ts_df = {"key_2": pd.DataFrame(np.arange(10))}
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=ts_df,
)
assert check_error_or_warning(
error, "Model attempted to load dataframe with key"
)
def test_invalid_dataframes_passed(self):
"""
`timeseries_dataframes` should be dict of pandas DataFrames.
"""
override = {"techs.test_demand_elec.constraints.resource": "df=demand_elec"}
ts_df_nodict = pd.DataFrame(np.arange(10)) # Not a dict
ts_df_numpy_arrays = {"demand_elec": np.arange(10)} # No pd DataFrames
for timeseries_dataframes in [ts_df_nodict, ts_df_numpy_arrays]:
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=timeseries_dataframes,
)
assert check_error_or_warning(
error, "`timeseries_dataframes` must be dict of pandas DataFrames."
)
class TestChecks:
def test_unrecognised_config_keys(self):
"""
Check that the only top level keys can be 'model', 'run', 'nodes',
'techs', 'tech_groups' (+ 'config_path', but that is an internal addition)
"""
override = {"nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised top-level configuration item: nonsensical_key"
)
def test_missing_config_key(self):
"""
Check that missing 'nodes' raises an error
"""
with pytest.raises(exceptions.ModelError) as excinfo:
build_model() # Not selecting any scenario means no nodes are defined
assert check_error_or_warning(
excinfo, "Model is missing required top-level configuration item: nodes"
)
def test_unrecognised_model_run_keys(self):
"""
Check that the only keys allowed in 'model' and 'run' are those in the
model defaults
"""
override1 = {"model.nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override1, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in model configuration: nonsensical_key"
)
override2 = {"run.nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override2, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in run configuration: nonsensical_key"
)
# A key that should be in run but is given in model
override3 = {"model.solver": "glpk"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override3, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in model configuration: solver"
)
# A key that should be in model but is given in run
override4 = {"run.subset_time": None}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in run configuration: subset_time"
)
@pytest.mark.xfail(reason="SPORES mode will fail until the cost max group constraint can be reproduced")
def test_warn_null_number_of_spores(self):
"""
Check that spores number is greater than 0 if spores run mode is selected
"""
override = {"run.spores_options.spores_number": 0}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(scenario="spores,simple_supply", override_dict=override)
assert check_error_or_warning(
warn, "spores run mode is selected, but a number of 0 spores is requested"
)
@pytest.mark.xfail(reason="SPORES mode will fail until the cost max group constraint can be reproduced")
def test_non_string_score_cost_class(self):
"""
Check that the score_cost_class for spores scoring is a string
"""
override = {"run.spores_options.score_cost_class": 0}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(scenario="spores,simple_supply", override_dict=override)
assert check_error_or_warning(
excinfo, "`run.spores_options.score_cost_class` must be a string"
)
@pytest.mark.parametrize(
"invalid_key", [("monetary"), ("emissions"), ("name"), ("anything_else_really")]
)
def test_unrecognised_tech_keys(self, invalid_key):
"""
Check that no invalid keys are defined for technologies.
"""
override1 = {"techs.test_supply_gas.{}".format(invalid_key): "random_string"}
with pytest.warns(exceptions.ModelWarning):
build_model(override_dict=override1, scenario="simple_supply")
def test_model_version_mismatch(self):
"""
Model config says model.calliope_version = 0.1, which is not what we
are running, so we want a warning.
"""
override = {"model.calliope_version": 0.1}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "Model configuration specifies calliope_version"
)
def test_unknown_carrier_tier(self):
"""
User can only use 'carrier_' + ['in', 'out', 'in_2', 'out_2', 'in_3',
'out_3', 'ratios']
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.essentials.carrier_1: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
override2 = AttrDict.from_yaml_string(
"""
techs.test_conversion_plus.essentials.carrier_out_4: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override2, scenario="simple_conversion_plus,one_day"
)
def test_name_overlap(self):
"""
No tech may have the same identifier as a tech group
"""
override = AttrDict.from_yaml_string(
"""
techs:
supply:
essentials:
name: Supply tech
carrier: gas
parent: supply
constraints:
energy_cap_max: 10
resource: .inf
nodes:
1.techs.supply:
0.techs.supply:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="one_day")
@pytest.mark.parametrize(
"loc_tech",
(
({"nodes": ["1", "foo"]}),
({"techs": ["test_supply_elec", "bar"]}),
({"nodes": ["1", "foo"], "techs": ["test_supply_elec", "bar"]}),
),
)
@pytest.mark.xfail(reason="Planning to remove group constraints")
def test_inexistent_group_constraint_loc_tech(self, loc_tech):
override = {"group_constraints.mygroup": {"energy_cap_max": 100, **loc_tech}}
with pytest.warns(exceptions.ModelWarning) as excinfo:
m = build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Possible misspelling in group constraints:"
)
loc_techs = m._model_data.group_constraint_loc_techs_mygroup.values
assert "foo:test_supply_elec" not in loc_techs
assert "1:bar" not in loc_techs
assert "foo:bar" not in loc_techs
@pytest.mark.xfail(reason="Planning to remove group constraints")
def test_inexistent_group_constraint_empty_loc_tech(self):
override = {
"group_constraints.mygroup": {"energy_cap_max": 100, "locs": ["foo"]}
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
m = build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Constraint group `mygroup` will be completely ignored"
)
assert m._model_run.group_constraints.mygroup.get("exists", True) is False
@pytest.mark.filterwarnings(
"ignore:(?s).*Not building the link a,b:calliope.exceptions.ModelWarning"
)
def test_abstract_base_tech_group_override(self):
"""
Abstract base technology groups can be overridden
"""
override = AttrDict.from_yaml_string(
"""
tech_groups:
supply:
constraints:
lifetime: 25
nodes:
b.techs.test_supply_elec:
b.techs.test_demand_elec:
"""
)
build_model(override_dict=override, scenario="one_day")
def test_unspecified_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override = AttrDict.from_yaml_string(
"""
techs.test_supply_no_parent:
essentials:
name: Supply tech
carrier: gas
constraints:
energy_cap_max: 10
resource: .inf
nodes.b.techs.test_supply_no_parent:
"""
)
with pytest.raises(KeyError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_tech_as_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_tech_parent:
essentials:
name: Supply tech
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
nodes.b.techs.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario="simple_supply,one_day")
check_error_or_warning(
error, "tech `test_supply_tech_parent` has another tech as a parent"
)
override2 = AttrDict.from_yaml_string(
"""
tech_groups.test_supply_group:
essentials:
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
techs.test_supply_tech_parent.essentials:
name: Supply tech
parent: test_supply_group
nodes.b.techs.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario="simple_supply,one_day")
check_error_or_warning(
error, "tech_group `test_supply_group` has a tech as a parent"
)
def test_resource_as_carrier(self):
"""
No carrier in technology or technology group can be called `resource`
"""
override1 = AttrDict.from_yaml_string(
"""
techs:
test_supply_elec:
essentials:
name: Supply tech
carrier: resource
parent: supply
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
override2 = AttrDict.from_yaml_string(
"""
tech_groups:
test_supply_group:
essentials:
name: Supply tech
carrier: resource
parent: supply
techs.test_supply_elec.essentials.parent: test_supply_group
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario="simple_supply,one_day")
@pytest.mark.filterwarnings(
"ignore:(?s).*defines force_resource but not a finite resource:calliope.exceptions.ModelWarning"
)
def test_missing_required_constraints(self):
"""
A technology within an abstract base technology must define a subset of
hardcoded constraints in order to function
"""
# should fail: missing one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply1 = AttrDict.from_yaml_string(
"""
techs:
demand_missing_constraint:
essentials:
parent: demand
carrier: electricity
name: demand missing constraint
switches:
resource_unit: power
nodes.b.techs.demand_missing_constraint:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply1, scenario="simple_supply,one_day"
)
# should pass: giving one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply2 = AttrDict.from_yaml_string(
"""
techs:
supply_missing_constraint:
essentials:
parent: supply
carrier: electricity
name: supply missing constraint
constraints.energy_cap_max: 10
nodes.b.techs.supply_missing_constraint:
"""
)
build_model(override_dict=override_supply2, scenario="simple_supply,one_day")
def test_defining_non_allowed_constraints(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded constraints, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override_supply1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.constraints.storage_cap_max: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply1, scenario="simple_supply,one_day"
)
def test_defining_non_allowed_costs(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded costs, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.costs.monetary.storage_cap: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
# should fail: om_prod not allowed for demand tech
override = AttrDict.from_yaml_string(
"""
techs.test_demand_elec.costs.monetary.om_prod: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_defining_cost_class_with_name_of_cost(self):
"""
A cost class with the same name as one of the possible cost types was
defined, suggesting a user mistake with indentation.
"""
override = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.costs.storage_cap: 10
"""
)
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "`test_supply_elec` at `b` defines storage_cap as a cost class."
)
def test_exporting_unspecified_carrier(self):
"""
User can only define an export carrier if it is defined in
['carrier_out', 'carrier_out_2', 'carrier_out_3']
"""
override_supply = lambda param: AttrDict.from_yaml_string(
"techs.test_supply_elec.constraints.export_carrier: {}".format(param)
)
override_converison_plus = lambda param: AttrDict.from_yaml_string(
"techs.test_conversion_plus.constraints.export_carrier: {}".format(param)
)
# should fail: exporting `heat` not allowed for electricity supply tech
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply("heat"), scenario="simple_supply,one_day"
)
# should fail: exporting `random` not allowed for conversion_plus tech
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_converison_plus("random"),
scenario="simple_conversion_plus,one_day",
)
# should pass: exporting electricity for supply tech
build_model(
override_dict=override_supply("electricity"),
scenario="simple_supply,one_day",
)
# should pass: exporting heat for conversion tech
build_model(
override_dict=override_converison_plus("heat"),
scenario="simple_conversion_plus,one_day",
)
def test_tech_directly_in_nodes(self):
"""
A tech defined directly within a location rather than within techs
inside that location is probably an oversight.
"""
override = {"nodes.b.test_supply_elec.costs.storage_cap": 10}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "Node `b` contains unrecognised keys ['test_supply_elec']"
)
def test_tech_defined_twice_in_links(self):
"""
A technology can only be defined once for a link, even if that link is
defined twice (i.e. `A,B` and `B,A`).
"""
override = {
"links.a,b.techs.test_transmission_elec": None,
"links.b,a.techs.test_transmission_elec": None,
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"Technology test_transmission_elec defined twice on a link defined "
"in both directions (e.g. `A,B` and `B,A`)",
)
override = {
"links.a,b.techs": {
"test_transmission_elec": None,
"test_transmission_heat": None,
},
"links.b,a.techs": {
"test_transmission_elec": None,
"test_transmission_heat": None,
},
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, ["test_transmission_elec", "test_transmission_heat"]
)
# We do allow a link to be defined twice, so long as the same tech isn't in both
override = {
"techs.test_transmission_heat_2": {
"essentials.name": "Transmission heat tech",
"essentials.carrier": "heat",
"essentials.parent": "transmission",
},
"links.a,b.techs": {"test_transmission_elec": None},
"links.b,a.techs": {"test_transmission_heat_2": None},
}
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_allowed_time_varying_constraints(self):
"""
`file=` is only allowed on a hardcoded list of constraints, unless
`_time_varying` is appended to the constraint (i.e. user input)
"""
allowed_constraints_no_file = list(
set(defaults.tech_groups.storage.allowed_constraints).difference(
defaults.model.file_allowed
)
)
allowed_constraints_file = list(
set(defaults.tech_groups.storage.allowed_constraints).intersection(
defaults.model.file_allowed
)
)
override = lambda param: AttrDict.from_yaml_string(
"techs.test_storage.constraints.{}: file=binary_one_day.csv".format(param)
)
# should fail: Cannot have `file=` on the following constraints
for param in allowed_constraints_no_file:
with pytest.raises(exceptions.ModelError) as errors:
build_model(
override_dict=override(param), scenario="simple_storage,one_day"
)
assert check_error_or_warning(
errors,
"Cannot load data from file for configuration"
" `techs.test_storage.constraints.{}`".format(param),
)
# should pass: can have `file=` on the following constraints
for param in allowed_constraints_file:
build_model(
override_dict=override(param), scenario="simple_storage,one_day"
)
def test_incorrect_node_coordinates(self):
"""
Either all or no nodes must have `coordinates` defined and, if all
defined, they must be in the same coordinate system (lat/lon or x/y)
"""
def _override(param0, param1):
override = {}
if param0 is not None:
override.update({"nodes.a.coordinates": param0})
if param1 is not None:
override.update({"nodes.b.coordinates": param1})
return override
cartesian0 = {"x": 0, "y": 1}
cartesian1 = {"x": 1, "y": 1}
geographic0 = {"lat": 0, "lon": 1}
geographic1 = {"lat": 1, "lon": 1}
fictional0 = {"a": 0, "b": 1}
fictional1 = {"a": 1, "b": 1}
# should fail: cannot have nodes in one place and not in another
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(cartesian0, None),
scenario="simple_supply,one_day",
)
check_error_or_warning(
error, "Either all or no nodes must have `coordinates` defined"
)
# should fail: cannot have cartesian coordinates in one place and geographic in another
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(cartesian0, geographic1),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "All nodes must use the same coordinate format")
# should fail: cannot use a non-cartesian or non-geographic coordinate system
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(fictional0, fictional1),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "Unidentified coordinate system")
# should fail: coordinates must be given as key:value pairs
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override([0, 1], [1, 1]),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "Coordinates must be given in the format")
# should pass: cartesian coordinates in both places
build_model(
override_dict=_override(cartesian0, cartesian1),
scenario="simple_supply,one_day",
)
# should pass: geographic coordinates in both places
build_model(
override_dict=_override(geographic0, geographic1),
scenario="simple_supply,one_day",
)
def test_one_way(self):
"""
With one_way transmission, we remove one direction of a link from
loc_tech_carriers_prod and the other from loc_tech_carriers_con.
"""
override = {
"links.X1,N1.techs.heat_pipes.switches.one_way": True,
"links.N1,X2.techs.heat_pipes.switches.one_way": True,
"links.N1,X3.techs.heat_pipes.switches.one_way": True,
"model.subset_time": ["2005-01-01", "2005-01-01"],
}
m = calliope.examples.urban_scale(override_dict=override)
m.run(build_only=True)
removed_prod_links = [
("X1", "heat_pipes:N1"),
("N1", "heat_pipes:X2"),
("N1", "heat_pipes:X3"),
]
removed_con_links = [
("N1", "heat_pipes:X1"),
("X2", "heat_pipes:N1"),
("X3", "heat_pipes:N1"),
]
for link in removed_prod_links:
assert link not in set(i[1:3] for i in m._backend_model.carrier_prod._index)
for link in removed_con_links:
assert link not in set(i[1:3] for i in m._backend_model.carrier_con._index)
def test_carrier_ratio_for_inexistent_carrier(self):
"""
A tech should not define a carrier ratio for a carrier it does
not actually use.
"""
override = AttrDict.from_yaml_string(
"""
nodes.1.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_in:
some_carrier: 1.0
carrier_out_2:
another_carrier: 2.0
"""
)
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert check_error_or_warning(
excinfo,
"Tech `test_conversion_plus` gives a carrier ratio for `another_carrier`, but does not actually",
)
def test_carrier_ratio_for_specified_carrier(self):
"""
The warning for not defining a carrier ratio for a carrier a tech does
not actually use should not be triggered if the carrier is defined.
"""
override = AttrDict.from_yaml_string(
"""
nodes.b.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_in:
heat: 1.0
"""
)
with pytest.warns(None) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert "Tech `test_conversion_plus` gives a carrier ratio" not in [
str(i) for i in excinfo.list
]
def test_carrier_ratio_from_file(self):
"""
It is possible to load a timeseries carrier_ratio from file
"""
override = AttrDict.from_yaml_string(
"""
nodes.b.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_out.heat: file=carrier_ratio.csv
"""
)
with pytest.warns(None) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert "Cannot load data from file for configuration" not in [
str(i) for i in excinfo.list
]
@pytest.mark.filterwarnings("ignore:(?s).*Integer:calliope.exceptions.ModelWarning")
def test_milp_constraints(self):
"""
If `units` is defined, but not `energy_cap_per_unit`, throw an error
"""
# should fail: no energy_cap_per_unit
override1 = AttrDict.from_yaml_string(
"techs.test_supply_elec.constraints.units_max: 4"
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
# should pass: energy_cap_per_unit given
override2 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.constraints:
units_max: 4
energy_cap_per_unit: 5
"""
)
build_model(override_dict=override2, scenario="simple_supply,one_day")
def test_force_resource_ignored(self):
"""
If a technology is defines force_resource but is not in loc_techs_finite_resource
it will have no effect
"""
override = {
"techs.test_supply_elec.constraints.resource": np.inf,
"techs.test_supply_elec.switches.force_resource": True,
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"Cannot have `force_resource` = True",
)
def test_override_coordinates(self):
"""
Check that warning is raised if we are completely overhauling the
coordinate system with an override
"""
override = {
"nodes": {
"X1.coordinates": {"lat": 51.4596158, "lon": -0.1613446},
"X2.coordinates": {"lat": 51.4652373, "lon": -0.1141548},
"X3.coordinates": {"lat": 51.4287016, "lon": -0.1310635},
"N1.coordinates": {"lat": 51.4450766, "lon": -0.1247183},
},
"links": {
"X1,X2.techs.power_lines.distance": 10,
"X1,X3.techs.power_lines.distance": 5,
"X1,N1.techs.heat_pipes.distance": 3,
"N1,X2.techs.heat_pipes.distance": 3,
"N1,X3.techs.heat_pipes.distance": 4,
},
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
calliope.examples.urban_scale(override_dict=override)
assert check_error_or_warning(excinfo, "Updated from coordinate system")
def test_clustering_and_cyclic_storage(self):
"""
Don't allow time clustering with cyclic storage if not also using
storage_inter_cluster
"""
override = {
"model.subset_time": ["2005-01-01", "2005-01-04"],
"model.time": {
"function": "apply_clustering",
"function_options": {
"clustering_func": "file=cluster_days.csv:0",
"how": "mean",
"storage_inter_cluster": False,
},
},
"run.cyclic_storage": True,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override, scenario="simple_supply")
assert check_error_or_warning(error, "cannot have cyclic storage")
def test_incorrect_resource_unit(self):
"""
Only `energy`, `energy_per_cap`, or `energy_per_area` is allowed under
`resource unit`.
"""
def _override(resource_unit):
return {"techs.test_supply_elec.switches.resource_unit": resource_unit}
with pytest.raises(exceptions.ModelError) as error:
build_model(_override("power"), scenario="simple_supply")
build_model(_override("energy"), scenario="simple_supply")
build_model(_override("energy_per_cap"), scenario="simple_supply")
build_model(_override("energy_per_area"), scenario="simple_supply")
assert check_error_or_warning(
error, "`power` is an unknown resource unit for `test_supply_elec`"
)
@pytest.mark.parametrize(
"constraints,costs",
(
({"units_max": 2, "energy_cap_per_unit": 5}, None),
({"units_equals": 2, "energy_cap_per_unit": 5}, None),
({"units_min": 2, "energy_cap_per_unit": 5}, None),
(None, {"purchase": 2}),
),
)
@pytest.mark.xfail(
reason="Expected fail because now the setting of integer/binary variables is more explicit, so users should be aware without the need of a warning"
)
def test_milp_supply_warning(self, constraints, costs):
override_constraints = {}
override_costs = {}
if constraints is not None:
override_constraints.update(
{"techs.test_supply_elec.constraints": constraints}
)
if costs is not None:
override_costs.update({"techs.test_supply_elec.costs.monetary": costs})
override = {**override_constraints, **override_costs}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
override_dict=override,
scenario="simple_supply,one_day,investment_costs",
)
assert check_error_or_warning(
warn,
"Integer and / or binary decision variables are included in this model",
)
@pytest.mark.parametrize(
"constraints,costs",
(
(
{"units_max": 2, "storage_cap_per_unit": 5, "energy_cap_per_unit": 5},
None,
),
(
{
"units_equals": 2,
"storage_cap_per_unit": 5,
"energy_cap_per_unit": 5,
},
None,
),
(
{"units_min": 2, "storage_cap_per_unit": 5, "energy_cap_per_unit": 5},
None,
),
(None, {"purchase": 2}),
),
)
@pytest.mark.xfail(
reason="Expected fail because now the setting of integer/binary variables is more explicit, so users should be aware without the need of a warning"
)
def test_milp_storage_warning(self, constraints, costs):
override_constraints = {}
override_costs = {}
if constraints is not None:
override_constraints.update({"techs.test_storage.constraints": constraints})
if costs is not None:
override_costs.update({"techs.test_storage.costs.monetary": costs})
override = {**override_constraints, **override_costs}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
override_dict=override,
scenario="simple_storage,one_day,investment_costs",
)
assert check_error_or_warning(
warn,
"Integer and / or binary decision variables are included in this model",
)
def test_fail_on_string(self):
with pytest.raises(calliope.exceptions.ModelError) as exception:
build_model(
model_file="weighted_obj_func.yaml",
scenario="illegal_string_cost_class",
)
assert check_error_or_warning(
exception, "`run.objective_options.cost_class` must be a dictionary."
)
def test_warn_on_using_default(self):
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
model_file="weighted_obj_func.yaml",
scenario="emissions_objective_without_removing_monetary_default",
)
assert check_error_or_warning(
warn, "Monetary cost class with a weight of 1 is still included"
)
@pytest.mark.parametrize(
"override",
[
({"run.objective_options.cost_class": {"monetary": None}}),
(
{
"run.objective_options.cost_class": {
"monetary": None,
"emissions": None,
}
}
),
],
)
def test_warn_on_no_weight(self, override):
with pytest.warns(exceptions.ModelWarning) as warn:
model = build_model(
model_file="weighted_obj_func.yaml", override_dict=override
)
assert check_error_or_warning(
warn, "cost class monetary has weight = None, setting weight to 1"
)
assert all(
model.run_config["objective_options"]["cost_class"][i] == 1
for i in override["run.objective_options.cost_class"].keys()
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_initial_fractional_value(self):
"""
Check that the storage_initial value is a fraction
"""
with pytest.raises(exceptions.ModelError) as error:
build_model(
{"techs.test_storage.constraints.storage_initial": 5},
"simple_storage,two_hours,investment_costs",
)
assert check_error_or_warning(
error, "storage_initial values larger than 1 are not allowed."
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_initial_smaller_than_discharge_depth(self):
"""
Check that the storage_initial value is at least equalt to the storage_discharge_depth
"""
with pytest.raises(exceptions.ModelError) as error:
build_model(
{"techs.test_storage.constraints.storage_initial": 0},
"simple_storage,two_hours,investment_costs,storage_discharge_depth",
)
assert check_error_or_warning(
error, "storage_initial is smaller than storage_discharge_depth."
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_inter_cluster_vs_storage_discharge_depth(self):
"""
Check that the storage_inter_cluster is not used together with storage_discharge_depth
"""
with pytest.raises(exceptions.ModelError) as error:
override = {"model.subset_time": ["2005-01-01", "2005-01-04"]}
build_model(override, "clustering,simple_storage,storage_discharge_depth")
assert check_error_or_warning(
error,
"storage_discharge_depth is currently not allowed when time clustering is active.",
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_warn_on_undefined_cost_classes(self):
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
model_file="weighted_obj_func.yaml",
scenario="undefined_class_objective",
)
assert check_error_or_warning(
warn,
"Cost classes `{'random_class'}` are defined in the objective options but not ",
)
class TestUtil:
def test_vincenty(self):
# London to Paris: about 344 km
coords = [(51.507222, -0.1275), (48.8567, 2.3508)]
distance = calliope.preprocess.util.vincenty(coords[0], coords[1])
assert distance == pytest.approx(343834) # in meters
class TestTime:
@pytest.fixture
def model_national(self, load_timeseries_from_dataframes):
"""
Return national scale example model. If load_timeseries_from_dataframes
is True, timeseries are read into dataframes and model is called using them.
If not, the timeseries are read in from CSV.
"""
if load_timeseries_from_dataframes:
# Create dictionary with dataframes
timeseries_data_path = os.path.join(
calliope.examples._PATHS["national_scale"], "timeseries_data/"
)
timeseries_dataframes = {}
timeseries_dataframes["csp_resource"] = pd.read_csv(
os.path.join(timeseries_data_path, "csp_resource.csv"), index_col=0
)
timeseries_dataframes["demand_1"] = pd.read_csv(
os.path.join(timeseries_data_path, "demand-1.csv"), index_col=0
)
timeseries_dataframes["demand_2"] = pd.read_csv(
os.path.join(timeseries_data_path, "demand-2.csv"), index_col=0
)
# Create override dict telling calliope to load timeseries from df
override_dict = {
"techs.csp.constraints.resource": "df=csp_resource",
"nodes.region1.techs.demand_power.constraints.resource": "df=demand_1:demand",
"nodes.region2.techs.demand_power.constraints.resource": "df=demand_2:demand",
}
return calliope.examples.national_scale(
timeseries_dataframes=timeseries_dataframes, override_dict=override_dict
)
else:
return calliope.examples.national_scale()
@pytest.fixture
def model_urban(self):
return calliope.examples.urban_scale(
override_dict={"model.subset_time": ["2005-01-01", "2005-01-10"]}
)
def test_add_max_demand_timesteps(self, model_urban):
data = model_urban._model_data_pre_clustering.copy()
data = time.add_max_demand_timesteps(data)
assert data["max_demand_timesteps"].loc[
dict(carriers="heat")
].values == np.datetime64("2005-01-05T07:00:00")
assert data["max_demand_timesteps"].loc[
dict(carriers="electricity")
].values == np.datetime64("2005-01-10T09:00:00")
@pytest.mark.parametrize("load_timeseries_from_dataframes", [False, True])
def test_timeseries_from_csv(self, model_national):
"""
Timeseries data should be successfully loaded into national_scale example
model. This test checks whether this happens with timeseries loaded both
from CSV (`load_timeseries_from_dataframes`=False, called via file=...) and
from dataframes (`load_timeseries_from_dataframes`=True, called via df=...).
"""
model = model_national
assert model.inputs.resource.loc[("region1", "demand_power")].values[
0
] == approx(-25284.48)
assert model.inputs.resource.loc[("region2", "demand_power")].values[
0
] == approx(-2254.098)
assert model.inputs.resource.loc[("region1-1", "csp")].values[8] == approx(
0.263805
)
assert model.inputs.resource.loc[("region1-2", "csp")].values[8] == approx(
0.096755
)
assert model.inputs.resource.loc[("region1-3", "csp")].values[8] == approx(0.0)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from unittest import mock
from oslo_config import cfg
from oslo_utils import timeutils as tu
from senlin.common import consts
from senlin.common import context
from senlin.common import exception as exc
from senlin.common import utils
from senlin.engine import health_manager as hm
from senlin.engine import node as node_mod
from senlin.engine.notifications import nova_endpoint
from senlin import objects
from senlin.objects import cluster as obj_cluster
from senlin.objects import node as obj_node
from senlin.objects import profile as obj_profile
from senlin.rpc import client as rpc_client
from senlin.tests.unit.common import base
class TestChaseUp(base.SenlinTestCase):
def test_less_than_one_interval(self):
start = tu.utcnow(True)
# we assume that the delay before next line is < 5 seconds
res = hm.chase_up(start, 5)
self.assertLessEqual(res, 5)
def test_more_than_one_interval(self):
start = tu.utcnow(True)
time.sleep(2)
# we assume that the delay before next line is < 5 seconds
res = hm.chase_up(start, 1)
self.assertLessEqual(res, 1)
@mock.patch('oslo_messaging.NotificationFilter')
class TestNovaNotificationEndpoint(base.SenlinTestCase):
@mock.patch('senlin.rpc.client.get_engine_client')
def test_init(self, mock_rpc, mock_filter):
x_filter = mock_filter.return_value
event_map = {
'compute.instance.pause.end': 'PAUSE',
'compute.instance.power_off.end': 'POWER_OFF',
'compute.instance.rebuild.error': 'REBUILD',
'compute.instance.shutdown.end': 'SHUTDOWN',
'compute.instance.soft_delete.end': 'SOFT_DELETE',
}
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
mock_filter.assert_called_once_with(
publisher_id='^compute.*',
event_type='^compute\.instance\..*',
context={'project_id': '^PROJECT$'})
mock_rpc.assert_called_once_with()
self.assertEqual(x_filter, endpoint.filter_rule)
self.assertEqual(mock_rpc.return_value, endpoint.rpc)
for e in event_map:
self.assertIn(e, endpoint.VM_FAILURE_EVENTS)
self.assertEqual(event_map[e], endpoint.VM_FAILURE_EVENTS[e])
self.assertEqual('PROJECT', endpoint.project_id)
self.assertEqual('CLUSTER_ID', endpoint.cluster_id)
@mock.patch.object(context.RequestContext, 'from_dict')
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info(self, mock_rpc, mock_context, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {
'metadata': {
'cluster_id': 'CLUSTER_ID',
'cluster_node_id': 'FAKE_NODE',
'cluster_node_index': '123',
},
'instance_id': 'PHYSICAL_ID',
'user_id': 'USER',
'state': 'shutoff',
}
metadata = {'timestamp': 'TIMESTAMP'}
call_ctx = mock.Mock()
mock_context.return_value = call_ctx
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end',
payload, metadata)
self.assertIsNone(res)
x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY)
req = x_rpc.call.call_args[0][2]
self.assertIsInstance(req, objects.NodeRecoverRequest)
self.assertEqual('FAKE_NODE', req.identity)
expected_params = {
'event': 'SHUTDOWN',
'state': 'shutoff',
'instance_id': 'PHYSICAL_ID',
'timestamp': 'TIMESTAMP',
'publisher': 'PUBLISHER',
'operation': 'REBUILD'
}
self.assertEqual(expected_params, req.params)
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_no_metadata(self, mock_rpc, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {'metadata': {}}
metadata = {'timestamp': 'TIMESTAMP'}
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end',
payload, metadata)
self.assertIsNone(res)
self.assertEqual(0, x_rpc.node_recover.call_count)
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_no_cluster_in_metadata(self, mock_rpc, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {'metadata': {'foo': 'bar'}}
metadata = {'timestamp': 'TIMESTAMP'}
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end',
payload, metadata)
self.assertIsNone(res)
self.assertEqual(0, x_rpc.node_recover.call_count)
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_cluster_id_not_match(self, mock_rpc, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {'metadata': {'cluster_id': 'FOOBAR'}}
metadata = {'timestamp': 'TIMESTAMP'}
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end',
payload, metadata)
self.assertIsNone(res)
self.assertEqual(0, x_rpc.node_recover.call_count)
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_event_type_not_interested(self, mock_rpc, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}}
metadata = {'timestamp': 'TIMESTAMP'}
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.start',
payload, metadata)
self.assertIsNone(res)
self.assertEqual(0, x_rpc.node_recover.call_count)
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_no_node_id(self, mock_rpc, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}}
metadata = {'timestamp': 'TIMESTAMP'}
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end',
payload, metadata)
self.assertIsNone(res)
self.assertEqual(0, x_rpc.node_recover.call_count)
@mock.patch.object(context.RequestContext, 'from_dict')
@mock.patch('senlin.rpc.client.get_engine_client')
def test_info_default_values(self, mock_rpc, mock_context, mock_filter):
x_rpc = mock_rpc.return_value
recover_action = {'operation': 'REBUILD'}
endpoint = nova_endpoint.NovaNotificationEndpoint(
'PROJECT', 'CLUSTER_ID', recover_action
)
ctx = mock.Mock()
payload = {
'metadata': {
'cluster_id': 'CLUSTER_ID',
'cluster_node_id': 'NODE_ID'
},
'user_id': 'USER',
}
metadata = {'timestamp': 'TIMESTAMP'}
call_ctx = mock.Mock()
mock_context.return_value = call_ctx
res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end',
payload, metadata)
self.assertIsNone(res)
x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY)
req = x_rpc.call.call_args[0][2]
self.assertIsInstance(req, objects.NodeRecoverRequest)
self.assertEqual('NODE_ID', req.identity)
expected_params = {
'event': 'SHUTDOWN',
'state': 'Unknown',
'instance_id': 'Unknown',
'timestamp': 'TIMESTAMP',
'publisher': 'PUBLISHER',
'operation': 'REBUILD',
}
self.assertEqual(expected_params, req.params)
@mock.patch(
'senlin.engine.notifications.heat_endpoint.HeatNotificationEndpoint')
@mock.patch(
'senlin.engine.notifications.nova_endpoint.NovaNotificationEndpoint')
@mock.patch('oslo_messaging.get_notification_transport')
@mock.patch('oslo_messaging.get_notification_listener')
class TestListenerProc(base.SenlinTestCase):
def test_listener_proc_nova(self, mock_listener, mock_transport,
mock_novaendpoint, mock_heatendpoint):
cfg.CONF.set_override('nova_control_exchange', 'FAKE_EXCHANGE',
group='health_manager')
x_listener = mock.Mock()
mock_listener.return_value = x_listener
x_transport = mock.Mock()
mock_transport.return_value = x_transport
x_endpoint = mock.Mock()
mock_novaendpoint.return_value = x_endpoint
recover_action = {'operation': 'REBUILD'}
res = hm.ListenerProc('FAKE_EXCHANGE', 'PROJECT_ID', 'CLUSTER_ID',
recover_action)
self.assertIsNone(res)
mock_transport.assert_called_once_with(cfg.CONF)
mock_novaendpoint.assert_called_once_with('PROJECT_ID', 'CLUSTER_ID',
recover_action)
mock_listener.assert_called_once_with(
x_transport, [mock_novaendpoint().target], [x_endpoint],
executor='threading', pool="senlin-listeners")
x_listener.start.assert_called_once_with()
def test_listener_proc_heat(self, mock_listener, mock_transport,
mock_novaendpoint, mock_heatendpoint):
x_listener = mock.Mock()
mock_listener.return_value = x_listener
x_transport = mock.Mock()
mock_transport.return_value = x_transport
x_endpoint = mock.Mock()
mock_heatendpoint.return_value = x_endpoint
recover_action = {'operation': 'REBUILD'}
res = hm.ListenerProc('heat', 'PROJECT_ID', 'CLUSTER_ID',
recover_action)
self.assertIsNone(res)
mock_transport.assert_called_once_with(cfg.CONF)
mock_heatendpoint.assert_called_once_with('PROJECT_ID', 'CLUSTER_ID',
recover_action)
mock_listener.assert_called_once_with(
x_transport, [mock_heatendpoint().target], [x_endpoint],
executor='threading', pool="senlin-listeners")
x_listener.start.assert_called_once_with()
class TestHealthCheckType(base.SenlinTestCase):
def setUp(self):
super(TestHealthCheckType, self).setUp()
self.hc = hm.NodePollStatusHealthCheck(
cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1,
params=''
)
def test_factory(self):
cid = 'CLUSTER_ID'
interval = 1
params = {
'detection_modes': [
{
'type': 'NODE_STATUS_POLLING',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
},
{
'type': 'HYPERVISOR_STATUS_POLLING',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
},
{
'type': 'NODE_STATUS_POLL_URL',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
}
],
'node_update_timeout': 300,
}
for d in params['detection_modes']:
hc = hm.HealthCheckType.factory(d['type'], cid, interval, params)
self.assertEqual(cid, hc.cluster_id)
self.assertEqual(interval, hc.interval)
self.assertEqual(d, hc.params)
self.assertEqual(
params['node_update_timeout'], hc.node_update_timeout)
def test_factory_invalid_type(self):
cid = 'CLUSTER_ID'
interval = 1
params = {
'detection_modes': [
{
'type': 'blah',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
},
],
'node_update_timeout': 300,
}
with self.assertRaisesRegex(Exception, 'Invalid detection type: blah'):
hm.HealthCheckType.factory('blah', cid, interval, params)
def test_factory_same_type_twice(self):
cid = 'CLUSTER_ID'
interval = 1
params = {
'detection_modes': [
{
'type': 'NODE_STATUS_POLLING',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
},
{
'type': 'NODE_STATUS_POLLING',
'poll_url': '',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': '',
'poll_url_retry_limit': '',
'poll_url_retry_interval': ''
}
],
'node_update_timeout': 300,
}
with self.assertRaisesRegex(
Exception,
'.*Encountered 2 instances of type NODE_STATUS_POLLING'):
hm.HealthCheckType.factory(
'NODE_STATUS_POLLING', cid, interval, params)
class TestNodePollStatusHealthCheck(base.SenlinTestCase):
def setUp(self):
super(TestNodePollStatusHealthCheck, self).setUp()
self.hc = hm.NodePollStatusHealthCheck(
cluster_id='CLUSTER_ID',
interval=1, node_update_timeout=1, params=''
)
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_healthy(self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = True
mock_node_obj.return_value = x_entity
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_not_called()
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_healthy_internal_error(
self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.side_effect = exc.InternalError(
message='error')
mock_node_obj.return_value = x_entity
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_not_called()
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_unhealthy(self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = False
mock_node_obj.return_value = x_entity
mock_tu.return_value = True
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertFalse(res)
mock_tu.assert_called_once_with(node.updated_at, 1)
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_unhealthy_within_timeout(
self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = False
mock_node_obj.return_value = x_entity
mock_tu.return_value = False
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_called_once_with(node.updated_at, 1)
class TestHypervisorPollStatusHealthCheck(base.SenlinTestCase):
def setUp(self):
super(TestHypervisorPollStatusHealthCheck, self).setUp()
self.hc = hm.HypervisorPollStatusHealthCheck(
cluster_id='CLUSTER_ID',
interval=1, node_update_timeout=1, params=''
)
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_healthy(self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = True
mock_node_obj.return_value = x_entity
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_not_called()
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_healthy_internal_error(
self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.side_effect = exc.InternalError(
message='error')
mock_node_obj.return_value = x_entity
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_not_called()
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_unhealthy(self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = False
mock_node_obj.return_value = x_entity
mock_tu.return_value = True
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertFalse(res)
mock_tu.assert_called_once_with(node.updated_at, 1)
@mock.patch.object(node_mod.Node, '_from_object')
@mock.patch.object(tu, 'is_older_than')
def test_run_health_check_unhealthy_within_timeout(
self, mock_tu, mock_node_obj):
x_entity = mock.Mock()
x_entity.do_healthcheck.return_value = False
mock_node_obj.return_value = x_entity
mock_tu.return_value = False
ctx = mock.Mock()
node = mock.Mock(id='FAKE_NODE1', status="ERROR",
updated_at='2018-08-13 18:00:00',
init_at='2018-08-13 17:00:00')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_tu.assert_called_once_with(node.updated_at, 1)
class TestNodePollUrlHealthCheck(base.SenlinTestCase):
def setUp(self):
super(TestNodePollUrlHealthCheck, self).setUp()
default_params = {
'poll_url': 'FAKE_POLL_URL',
'poll_url_ssl_verify': True,
'poll_url_conn_error_as_unhealthy': True,
'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN',
'poll_url_retry_limit': 2,
'poll_url_retry_interval': 1,
'node_update_timeout': 5
}
self.hc = hm.NodePollUrlHealthCheck(
cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1,
params=default_params
)
def test_expand_url_template(self):
url_template = 'https://abc123/foo/bar'
node = mock.Mock()
# do it
res = self.hc._expand_url_template(url_template, node)
self.assertEqual(res, url_template)
def test_expand_url_template_nodename(self):
node = mock.Mock()
node.name = 'name'
url_template = 'https://abc123/{nodename}/bar'
expanded_url = 'https://abc123/{}/bar'.format(node.name)
# do it
res = self.hc._expand_url_template(url_template, node)
self.assertEqual(res, expanded_url)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_healthy(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ("Healthy because this return value "
"contains FAKE_HEALTHY_PATTERN")
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1,
verify=True)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_healthy_min_timeout(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ("Healthy because this return value "
"contains FAKE_HEALTHY_PATTERN")
self.hc.params['poll_url_retry_interval'] = 0
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1,
verify=True)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_healthy_timeout(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ("Healthy because this return value "
"contains FAKE_HEALTHY_PATTERN")
self.hc.params['poll_url_retry_interval'] = 100
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=10,
verify=True)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_unhealthy_inactive(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_RECOVERING
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ""
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_not_called()
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_unhealthy_update_timeout(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.id = 'FAKE_NODE_ID'
node.updated_at = 'FAKE_UPDATE_TIME'
node.status = consts.NS_ACTIVE
mock_time.return_value = False
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ""
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_has_calls(
[mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)])
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_unhealthy_init_timeout(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.id = 'FAKE_NODE_ID'
node.updated_at = None
node.init_at = 'FAKE_INIT_TIME'
node.status = consts.NS_ACTIVE
mock_time.return_value = False
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ""
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_has_calls(
[mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)])
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_unhealthy(self, mock_url_fetch, mock_expand_url,
mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
node.id = 'FAKE_ID'
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.return_value = ""
# do it
res = self.hc.run_health_check(ctx, node)
self.assertFalse(res)
mock_url_fetch.assert_has_calls(
[
mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True),
mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)
]
)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_conn_error(self,
mock_url_fetch,
mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
node.id = 'FAKE_ID'
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.side_effect = utils.URLFetchError("Error")
# do it
res = self.hc.run_health_check(ctx, node)
self.assertFalse(res)
mock_url_fetch.assert_has_calls(
[
mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True),
mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)
]
)
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_conn_other_error(self,
mock_url_fetch,
mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
node.id = 'FAKE_ID'
mock_time.return_value = True
mock_expand_url.side_effect = Exception('blah')
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_not_called()
@mock.patch.object(tu, "is_older_than")
@mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template")
@mock.patch.object(utils, 'url_fetch')
def test_run_health_check_conn_error_noop(
self, mock_url_fetch, mock_expand_url, mock_time):
ctx = mock.Mock()
node = mock.Mock()
node.status = consts.NS_ACTIVE
node.id = 'FAKE_ID'
mock_time.return_value = True
mock_expand_url.return_value = 'FAKE_EXPANDED_URL'
mock_url_fetch.side_effect = utils.URLFetchError("Error")
self.hc.params['poll_url_conn_error_as_unhealthy'] = False
# do it
res = self.hc.run_health_check(ctx, node)
self.assertTrue(res)
mock_url_fetch.assert_has_calls(
[
mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True),
]
)
class TestHealthCheck(base.SenlinTestCase):
def setUp(self):
super(TestHealthCheck, self).setUp()
ctx = mock.Mock()
self.fake_rpc = mock.Mock()
with mock.patch.object(rpc_client, 'get_engine_client',
return_value=self.fake_rpc):
self.hc = hm.HealthCheck(
ctx=ctx,
engine_id='ENGINE_ID',
cluster_id='CID',
check_type=consts.NODE_STATUS_POLLING,
interval=60,
node_update_timeout=60,
params={
'node_update_timeout': 60,
'detection_modes': [
{'type': consts.NODE_STATUS_POLLING}
],
'recovery_conditional': consts.ANY_FAILED
},
enabled=True)
def test_get_health_check_types_polling(self):
self.hc.get_health_check_types()
self.assertEqual(consts.POLLING, self.hc.type)
def test_get_health_check_types_events(self):
self.hc.check_type = consts.LIFECYCLE_EVENTS
self.hc.get_health_check_types()
self.assertEqual(consts.EVENTS, self.hc.type)
def test_get_recover_actions(self):
self.hc.params = {
'node_delete_timeout': 60,
'node_force_recreate': True,
'recover_action': [{'name': 'FAKE_RECOVER_ACTION'}]
}
self.hc.get_recover_actions()
self.assertEqual(self.hc.params['node_delete_timeout'],
self.hc.recover_action['delete_timeout'])
self.assertEqual(self.hc.params['node_force_recreate'],
self.hc.recover_action['force_recreate'])
self.assertEqual(self.hc.params['recover_action'][0]['name'],
self.hc.recover_action['operation'])
@mock.patch.object(obj_node.Node, 'get_all_by_cluster')
@mock.patch.object(hm.HealthCheck, "_recover_node")
@mock.patch.object(hm.HealthCheck, "_wait_for_action")
@mock.patch.object(obj_cluster.Cluster, 'get')
@mock.patch.object(context, 'get_service_context')
def test_execute_health_check_any_mode_healthy(
self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes):
x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID',
id='CID')
mock_get.return_value = x_cluster
ctx = mock.Mock()
mock_ctx.return_value = ctx
mock_wait.return_value = (True, "")
x_node1 = mock.Mock(id='FAKE_NODE1', status="ERROR")
x_node2 = mock.Mock(id='FAKE_NODE2', status="ERROR")
mock_nodes.return_value = [x_node1, x_node2]
hc_true = {'run_health_check.return_value': True}
hc_test_values = [
[
mock.Mock(**hc_true),
mock.Mock(**hc_true),
mock.Mock(**hc_true),
],
]
for hc_mocks in hc_test_values:
self.hc.health_check_types = hc_mocks
mock_get.reset_mock()
mock_ctx.reset_mock()
mock_recover.reset_mock()
mock_wait.reset_mock()
# do it
self.hc.execute_health_check()
mock_get.assert_called_once_with(self.hc.ctx, 'CID',
project_safe=False)
mock_ctx.assert_called_once_with(user_id=x_cluster.user,
project_id=x_cluster.project)
for mock_hc in hc_mocks:
mock_hc.run_health_check.assert_has_calls(
[
mock.call(ctx, x_node1),
mock.call(ctx, x_node2)
]
)
mock_recover.assert_not_called()
mock_wait.assert_not_called()
@mock.patch.object(obj_node.Node, 'get_all_by_cluster')
@mock.patch.object(hm.HealthCheck, "_recover_node")
@mock.patch.object(hm.HealthCheck, "_wait_for_action")
@mock.patch.object(obj_cluster.Cluster, 'get')
@mock.patch.object(context, 'get_service_context')
def test_execute_health_check_all_mode_unhealthy(
self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes):
self.hc.cluster_id = 'CLUSTER_ID'
self.hc.interval = 1
self.hc.recovery_cond = consts.ALL_FAILED
self.hc.node_update_timeout = 1
self.hc.recovery_action = {'operation': 'REBUILD'}
x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID',
id='CLUSTER_ID')
mock_get.return_value = x_cluster
ctx = mock.Mock()
mock_ctx.return_value = ctx
mock_wait.return_value = (True, "")
x_node = mock.Mock(id='FAKE_NODE', status="ERROR")
mock_nodes.return_value = [x_node]
mock_recover.return_value = {'action': 'FAKE_ACTION_ID'}
hc_false = {'run_health_check.return_value': False}
hc_test_values = [
[
mock.Mock(**hc_false),
]
]
for hc_mocks in hc_test_values:
self.hc.health_check_types = hc_mocks
mock_get.reset_mock()
mock_ctx.reset_mock()
mock_recover.reset_mock()
mock_wait.reset_mock()
# do it
self.hc.execute_health_check()
mock_get.assert_called_once_with(self.hc.ctx, 'CLUSTER_ID',
project_safe=False)
mock_ctx.assert_called_once_with(user_id=x_cluster.user,
project_id=x_cluster.project)
for mock_hc in hc_mocks:
mock_hc.run_health_check.assert_has_calls(
[
mock.call(ctx, x_node)
]
)
mock_recover.assert_called_once_with(ctx, 'FAKE_NODE')
mock_wait.assert_called_once_with(
ctx, 'FAKE_ACTION_ID', self.hc.node_update_timeout)
@mock.patch.object(obj_cluster.Cluster, 'get')
@mock.patch.object(context, 'get_service_context')
def test_execute_health_check_cluster_not_found(self, mock_ctx, mock_get):
mock_get.return_value = None
self.hc.execute_health_check()
mock_ctx.assert_not_called()
@mock.patch.object(hm.HealthCheck, "_recover_node")
def test_check_node_health_any_failed(self, mock_recover):
x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID',
id='CLUSTER_ID')
x_node = mock.Mock(id='FAKE_NODE', status="ERROR")
ctx = mock.Mock()
self.hc.params['recovery_conditional'] = consts.ANY_FAILED
mock_hc_1 = mock.Mock()
mock_hc_1.run_health_check.return_value = True
mock_hc_2 = mock.Mock()
mock_hc_2.run_health_check.return_value = False
self.hc.health_check_types = [mock_hc_1, mock_hc_2]
self.hc._check_node_health(ctx, x_node, x_cluster)
mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node)
mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node)
mock_recover.assert_called_once_with(ctx, x_node.id)
@mock.patch.object(hm.HealthCheck, "_recover_node")
def test_check_node_health_all_failed(self, mock_recover):
x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID',
id='CLUSTER_ID')
x_node = mock.Mock(id='FAKE_NODE', status="ERROR")
ctx = mock.Mock()
self.hc.params['recovery_conditional'] = consts.ALL_FAILED
mock_hc_1 = mock.Mock()
mock_hc_1.run_health_check.return_value = False
mock_hc_2 = mock.Mock()
mock_hc_2.run_health_check.return_value = False
self.hc.health_check_types = [mock_hc_1, mock_hc_2]
self.hc._check_node_health(ctx, x_node, x_cluster)
mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node)
mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node)
mock_recover.assert_called_once_with(ctx, x_node.id)
@mock.patch.object(hm.HealthCheck, "_recover_node")
def test_check_node_health_all_failed_negative(self, mock_recover):
x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID',
id='CLUSTER_ID')
x_node = mock.Mock(id='FAKE_NODE', status="ERROR")
ctx = mock.Mock()
self.hc.params['recovery_conditional'] = consts.ALL_FAILED
mock_hc_1 = mock.Mock()
mock_hc_1.run_health_check.return_value = False
mock_hc_2 = mock.Mock()
mock_hc_2.run_health_check.return_value = True
self.hc.health_check_types = [mock_hc_1, mock_hc_2]
self.hc._check_node_health(ctx, x_node, x_cluster)
mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node)
mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node)
mock_recover.assert_not_called()
@mock.patch('senlin.objects.ActionGetRequest')
def test_wait_for_action(self, mock_action_req):
x_req = mock.Mock()
mock_action_req.return_value = x_req
x_action = {'status': consts.ACTION_SUCCEEDED}
self.fake_rpc.call.return_value = x_action
ctx = mock.Mock()
action_id = 'FAKE_ACTION_ID'
timeout = 5
# do it
res, err = self.hc._wait_for_action(ctx, action_id, timeout)
self.assertTrue(res)
self.assertEqual(err, '')
self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req)
@mock.patch('senlin.objects.ActionGetRequest')
def test_wait_for_action_success_before_timeout(self, mock_action_req):
x_req = mock.Mock()
mock_action_req.return_value = x_req
x_action1 = {'status': consts.ACTION_RUNNING}
x_action2 = {'status': consts.ACTION_SUCCEEDED}
self.fake_rpc.call.side_effect = [x_action1, x_action2]
ctx = mock.Mock()
action_id = 'FAKE_ACTION_ID'
timeout = 5
# do it
res, err = self.hc._wait_for_action(ctx, action_id, timeout)
self.assertTrue(res)
self.assertEqual(err, '')
self.fake_rpc.call.assert_has_calls(
[
mock.call(ctx, 'action_get', x_req),
mock.call(ctx, 'action_get', x_req)
]
)
@mock.patch('senlin.objects.ActionGetRequest')
def test_wait_for_action_timeout(self, mock_action_req):
x_req = mock.Mock()
mock_action_req.return_value = x_req
x_action = {'status': consts.ACTION_RUNNING}
self.fake_rpc.call.return_value = x_action
ctx = mock.Mock()
action_id = 'FAKE_ACTION_ID'
timeout = 5
# do it
res, err = self.hc._wait_for_action(ctx, action_id, timeout)
self.assertFalse(res)
self.assertTrue(re.search('timeout', err, re.IGNORECASE))
self.fake_rpc.call.assert_has_calls(
[
mock.call(ctx, 'action_get', x_req)
]
)
@mock.patch('senlin.objects.ActionGetRequest')
def test_wait_for_action_failed(self, mock_action_req):
x_req = mock.Mock()
mock_action_req.return_value = x_req
x_action = {'status': consts.ACTION_FAILED}
self.fake_rpc.call.return_value = x_action
ctx = mock.Mock()
action_id = 'FAKE_ACTION_ID'
timeout = 5
# do it
res, err = self.hc._wait_for_action(ctx, action_id, timeout)
self.assertFalse(res)
self.assertEqual(err, 'Cluster check action failed or cancelled')
self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req)
@mock.patch('senlin.objects.ActionGetRequest')
def test_wait_for_action_cancelled(self, mock_action_req):
x_req = mock.Mock()
mock_action_req.return_value = x_req
x_action = {'status': consts.ACTION_CANCELLED}
self.fake_rpc.call.return_value = x_action
ctx = mock.Mock()
action_id = 'FAKE_ACTION_ID'
timeout = 5
# do it
res, err = self.hc._wait_for_action(ctx, action_id, timeout)
self.assertFalse(res)
self.assertEqual(err, 'Cluster check action failed or cancelled')
self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req)
@mock.patch('senlin.objects.NodeRecoverRequest', autospec=True)
def test_recover_node(self, mock_req):
ctx = mock.Mock()
node_id = 'FAKE_NODE'
self.hc.recover_action = {'operation': 'REBUILD'}
x_req = mock.Mock
mock_req.return_value = x_req
x_action = {'action': 'RECOVER_ID1'}
self.fake_rpc.call.return_value = x_action
# do it
res = self.hc._recover_node(ctx, node_id)
self.assertEqual(x_action, res)
mock_req.assert_called_once_with(
identity=node_id, params=self.hc.recover_action)
self.fake_rpc.call.assert_called_once_with(ctx, 'node_recover', x_req)
@mock.patch('senlin.objects.NodeRecoverRequest', autospec=True)
def test_recover_node_failed(self, mock_req):
ctx = mock.Mock()
node_id = 'FAKE_NODE'
self.hc.recover_action = {'operation': 'REBUILD'}
x_req = mock.Mock
mock_req.return_value = x_req
self.fake_rpc.call.side_effect = Exception('boom')
# do it
res = self.hc._recover_node(ctx, node_id)
self.assertIsNone(res)
mock_req.assert_called_once_with(
identity=node_id, params=self.hc.recover_action)
self.fake_rpc.call.assert_called_once_with(ctx, 'node_recover', x_req)
@mock.patch('senlin.objects.HealthRegistry', autospec=True)
def test_db_create(self, mock_hrdb):
self.hc.db_create()
mock_hrdb.create.assert_called_once_with(
self.hc.ctx, self.hc.cluster_id, self.hc.check_type,
self.hc.interval, self.hc.params, self.hc.engine_id,
self.hc.enabled)
@mock.patch('senlin.objects.HealthRegistry', autospec=True)
def test_db_delete(self, mock_hrdb):
self.hc.db_delete()
mock_hrdb.delete.assert_called_once_with(self.hc.ctx,
self.hc.cluster_id)
@mock.patch('senlin.objects.HealthRegistry', autospec=True)
def test_enable(self, mock_hrdb):
self.hc.enable()
mock_hrdb.update.assert_called_once_with(
self.hc.ctx, self.hc.cluster_id, {'enabled': True})
@mock.patch('senlin.objects.HealthRegistry', autospec=True)
def test_disable(self, mock_hrdb):
self.hc.disable()
mock_hrdb.update.assert_called_once_with(
self.hc.ctx, self.hc.cluster_id, {'enabled': False})
class TestRuntimeHealthRegistry(base.SenlinTestCase):
def setUp(self):
super(TestRuntimeHealthRegistry, self).setUp()
mock_ctx = mock.Mock()
self.mock_tg = mock.Mock()
self.rhr = hm.RuntimeHealthRegistry(mock_ctx, 'ENGINE_ID',
self.mock_tg)
def create_mock_entry(self, ctx=None, engine_id='ENGINE_ID',
cluster_id='CID',
check_type=None,
interval=60, node_update_timeout=60, params=None,
enabled=True, timer=None, listener=None,
type=consts.POLLING):
mock_entry = mock.Mock(
ctx=ctx,
engine_id=engine_id,
cluster_id=cluster_id,
check_type=check_type,
interval=interval,
node_update_timeout=node_update_timeout,
params=params,
enabled=enabled,
timer=timer,
listener=listener,
execute_health_check=mock.Mock(),
type=type)
return mock_entry
@mock.patch.object(hm, 'HealthCheck')
def test_register_cluster(self, mock_hc):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING])
mock_entry.db_create = mock.Mock()
mock_hc.return_value = mock_entry
self.rhr.register_cluster('CID', 60, 60, {})
self.assertEqual(mock_entry, self.rhr.registries['CID'])
self.mock_tg.add_dynamic_timer.assert_called_once_with(
mock_entry.execute_health_check, None, None)
self.mock_tg.add_thread.assert_not_called()
mock_entry.db_create.assert_called_once_with()
@mock.patch.object(hm, 'HealthCheck')
def test_register_cluster_failed(self, mock_hc):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING])
mock_entry.db_create = mock.Mock()
mock_entry.db_delete = mock.Mock()
mock_hc.return_value = mock_entry
self.rhr.add_health_check = mock.Mock()
self.rhr.add_health_check.side_effect = Exception
self.rhr.register_cluster('CID', 60, 60, {})
self.assertEqual(mock_entry, self.rhr.registries['CID'])
self.mock_tg.add_dynamic_timer.assert_not_called()
self.mock_tg.add_thread.assert_not_called()
mock_entry.db_create.assert_called_once_with()
mock_entry.db_delete.assert_called_once_with()
def test_unregister_cluster_with_timer(self):
timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
timer=timer)
self.rhr.registries['CID'] = mock_entry
mock_entry.db_delete = mock.Mock()
self.rhr.unregister_cluster('CID')
mock_entry.db_delete.assert_called_once_with()
timer.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_called_once_with(timer)
self.assertIsNone(mock_entry.timer)
def test_unregister_cluster_with_listener(self):
listener = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
listener=listener)
self.rhr.registries['CID'] = mock_entry
mock_entry.db_delete = mock.Mock()
self.rhr.unregister_cluster('CID')
mock_entry.db_delete.assert_called_once_with()
listener.stop.assert_called_once_with()
self.mock_tg.thread_done.assert_called_once_with(listener)
self.assertIsNone(mock_entry.listener)
def test_unregister_cluster_failed(self):
listener = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
listener=listener)
self.rhr.registries['CID'] = mock_entry
mock_entry.db_delete.side_effect = Exception
self.rhr.unregister_cluster('CID')
listener.stop.assert_called_once_with()
self.mock_tg.thread_done.assert_called_once_with(listener)
self.assertIsNone(mock_entry.listener)
def test_enable_cluster(self):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
enabled=False)
def mock_enable():
mock_entry.enabled = True
return True
mock_entry.enable = mock_enable
self.rhr.registries['CID'] = mock_entry
self.rhr.enable_cluster('CID')
self.assertTrue(mock_entry.enabled)
self.mock_tg.add_dynamic_timer.assert_called_once_with(
mock_entry.execute_health_check, None, None)
self.mock_tg.add_thread.assert_not_called()
def test_enable_cluster_failed(self):
timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
enabled=False, timer=timer)
mock_entry.enable = mock.Mock()
mock_entry.enable.side_effect = Exception
self.rhr.registries['CID'] = mock_entry
self.rhr.enable_cluster('CID')
self.mock_tg.add_dynamic_timer.assert_not_called()
self.mock_tg.add_thread.assert_not_called()
timer.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_called_once_with(timer)
def test_disable_cluster(self):
timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
enabled=True, timer=timer)
def mock_disable():
mock_entry.enabled = False
mock_entry.disable = mock_disable
self.rhr.registries['CID'] = mock_entry
self.rhr.disable_cluster('CID')
self.assertEqual(False, mock_entry.enabled)
self.mock_tg.add_dynamic_timer.assert_not_called()
self.mock_tg.add_thread.assert_not_called()
timer.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_called_once_with(timer)
def test_disable_cluster_failed(self):
timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING],
enabled=True, timer=timer)
mock_entry.enable.side_effect = Exception
self.rhr.registries['CID'] = mock_entry
self.rhr.disable_cluster('CID')
self.mock_tg.add_dynamic_timer.assert_not_called()
self.mock_tg.add_thread.assert_not_called()
timer.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_called_once_with(timer)
def test_add_timer(self):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING])
self.rhr.registries['CID'] = mock_entry
fake_timer = mock.Mock()
self.mock_tg.add_dynamic_timer = mock.Mock()
self.mock_tg.add_dynamic_timer.return_value = fake_timer
self.rhr._add_timer('CID')
self.assertEqual(fake_timer, mock_entry.timer)
self.mock_tg.add_dynamic_timer.assert_called_once_with(
mock_entry.execute_health_check, None, None)
def test_add_timer_failed(self):
fake_timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer)
self.rhr.registries['CID'] = mock_entry
self.mock_tg.add_dynamic_timer = mock.Mock()
self.rhr._add_timer('CID')
self.assertEqual(fake_timer, mock_entry.timer)
self.mock_tg.add_dynamic_timer.assert_not_called()
@mock.patch.object(obj_profile.Profile, 'get')
@mock.patch.object(obj_cluster.Cluster, 'get')
def test_add_listener_nova(self, mock_cluster, mock_profile):
cfg.CONF.set_override('nova_control_exchange', 'FAKE_NOVA_EXCHANGE',
group='health_manager')
mock_entry = self.create_mock_entry(
check_type=[consts.LIFECYCLE_EVENTS])
self.rhr.registries['CID'] = mock_entry
fake_listener = mock.Mock()
x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID')
mock_cluster.return_value = x_cluster
x_profile = mock.Mock(type='os.nova.server-1.0')
mock_profile.return_value = x_profile
self.mock_tg.add_thread = mock.Mock()
self.mock_tg.add_thread.return_value = fake_listener
self.rhr._add_listener('CID')
mock_cluster.assert_called_once_with(self.rhr.ctx, 'CID',
project_safe=False)
mock_profile.assert_called_once_with(self.rhr.ctx, 'PROFILE_ID',
project_safe=False)
self.mock_tg.add_thread.assert_called_once_with(
hm.ListenerProc, 'FAKE_NOVA_EXCHANGE', 'PROJECT_ID', 'CID',
mock_entry.recover_action)
@mock.patch.object(obj_profile.Profile, 'get')
@mock.patch.object(obj_cluster.Cluster, 'get')
def test_add_listener_heat(self, mock_cluster, mock_profile):
cfg.CONF.set_override('heat_control_exchange', 'FAKE_HEAT_EXCHANGE',
group='health_manager')
mock_entry = self.create_mock_entry(
check_type=[consts.LIFECYCLE_EVENTS])
self.rhr.registries['CID'] = mock_entry
fake_listener = mock.Mock()
x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID')
mock_cluster.return_value = x_cluster
x_profile = mock.Mock(type='os.heat.stack-1.0')
mock_profile.return_value = x_profile
self.mock_tg.add_thread = mock.Mock()
self.mock_tg.add_thread.return_value = fake_listener
self.rhr._add_listener('CID')
mock_cluster.assert_called_once_with(self.rhr.ctx, 'CID',
project_safe=False)
mock_profile.assert_called_once_with(self.rhr.ctx, 'PROFILE_ID',
project_safe=False)
self.mock_tg.add_thread.assert_called_once_with(
hm.ListenerProc, 'FAKE_HEAT_EXCHANGE', 'PROJECT_ID', 'CID',
mock_entry.recover_action)
@mock.patch.object(obj_profile.Profile, 'get')
@mock.patch.object(obj_cluster.Cluster, 'get')
def test_add_listener_failed(self, mock_cluster, mock_profile):
cfg.CONF.set_override('heat_control_exchange', 'FAKE_HEAT_EXCHANGE',
group='health_manager')
fake_listener = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.LIFECYCLE_EVENTS], listener=fake_listener)
self.rhr.registries['CID'] = mock_entry
x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID')
mock_cluster.return_value = x_cluster
x_profile = mock.Mock(type='os.heat.stack-1.0')
mock_profile.return_value = x_profile
self.mock_tg.add_thread = mock.Mock()
self.rhr._add_listener('CID')
mock_cluster.assert_not_called()
mock_profile.assert_not_called()
self.mock_tg.add_thread.assert_not_called()
def test_add_health_check_polling(self):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING])
self.rhr.registries['CID'] = mock_entry
self.rhr._add_timer = mock.Mock()
self.rhr._add_listener = mock.Mock()
self.rhr.add_health_check(mock_entry)
self.rhr._add_timer.assert_called_once_with('CID')
self.rhr._add_listener.assert_not_called()
def test_add_health_check_events(self):
mock_entry = self.create_mock_entry(
check_type=[consts.LIFECYCLE_EVENTS], type=consts.EVENTS)
self.rhr.registries['CID'] = mock_entry
self.rhr._add_timer = mock.Mock()
self.rhr._add_listener = mock.Mock()
self.rhr.add_health_check(mock_entry)
self.rhr._add_timer.assert_not_called()
self.rhr._add_listener.assert_called_once_with('CID')
def test_add_health_check_disabled(self):
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING], enabled=False)
self.rhr.registries['CID'] = mock_entry
self.rhr._add_timer = mock.Mock()
self.rhr._add_listener = mock.Mock()
self.rhr.add_health_check(mock_entry)
self.rhr._add_timer.assert_not_called()
self.rhr._add_listener.assert_not_called()
def test_add_health_check_timer_exists(self):
fake_timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer)
self.rhr.registries['CID'] = mock_entry
self.rhr._add_timer = mock.Mock()
self.rhr._add_listener = mock.Mock()
self.rhr.add_health_check(mock_entry)
self.rhr._add_timer.assert_not_called()
self.rhr._add_listener.assert_not_called()
def test_remove_health_check_timer(self):
fake_timer = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer)
self.rhr.registries['CID'] = mock_entry
self.rhr.remove_health_check(mock_entry)
fake_timer.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_called_once_with(fake_timer)
self.mock_tg.thread_done.assert_not_called()
self.assertIsNone(mock_entry.timer)
def test_remove_health_check_listener(self):
fake_listener = mock.Mock()
mock_entry = self.create_mock_entry(
check_type=[consts.NODE_STATUS_POLLING], listener=fake_listener)
self.rhr.registries['CID'] = mock_entry
self.rhr.remove_health_check(mock_entry)
fake_listener.stop.assert_called_once_with()
self.mock_tg.timer_done.assert_not_called()
self.mock_tg.thread_done.assert_called_once_with(fake_listener)
self.assertIsNone(mock_entry.listener)
|
|
"""Identity related views."""
from __future__ import unicode_literals
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _, ungettext
from django.views import generic
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test
)
from reversion import revisions as reversion
from modoboa.core.models import User
from modoboa.lib.exceptions import (
PermDeniedException, BadRequest
)
from modoboa.lib.listing import (
get_sort_order, get_listing_page
)
from modoboa.lib.web_utils import render_to_json_response
from ..forms import AccountForm, AccountWizard
from ..lib import get_identities
from ..models import Mailbox, Domain
from .. import signals
@login_required
@user_passes_test(
lambda u: u.has_perm("core.add_user") or
u.has_perm("admin.add_alias")
)
def _identities(request):
filters = dict((fname, request.GET.get(fname, None))
for fname in ['searchquery', 'idtfilter', 'grpfilter'])
request.session['identities_filters'] = filters
idents_list = get_identities(request.user, **filters)
sort_order, sort_dir = get_sort_order(request.GET, "identity",
["identity", "name_or_rcpt", "tags"])
if sort_order in ["identity", "name_or_rcpt"]:
objects = sorted(idents_list, key=lambda o: getattr(o, sort_order),
reverse=sort_dir == '-')
else:
objects = sorted(idents_list, key=lambda o: o.tags[0],
reverse=sort_dir == '-')
context = {
"handle_mailboxes": request.localconfig.parameters.get_value(
"handle_mailboxes", raise_exception=False)
}
page = get_listing_page(objects, request.GET.get("page", 1))
if page is None:
context["length"] = 0
else:
context["headers"] = render_to_string(
"admin/identity_headers.html", {}, request)
context["rows"] = render_to_string(
"admin/identities_table.html", {
"identities": page.object_list
}, request
)
context["pages"] = [page.number]
return render_to_json_response(context)
@login_required
@permission_required("admin.add_mailbox")
def list_quotas(request):
from modoboa.lib.db_utils import db_type
sort_order, sort_dir = get_sort_order(request.GET, "address")
mboxes = Mailbox.objects.get_for_admin(
request.user, request.GET.get("searchquery", None)
)
mboxes = mboxes.exclude(quota=0)
if sort_order in ["address", "quota"]:
mboxes = mboxes.order_by("%s%s" % (sort_dir, sort_order))
elif sort_order == "quota_value__bytes":
where = "admin_mailbox.address||'@'||admin_domain.name"
mboxes = mboxes.extra(
select={"quota_value__bytes": "admin_quota.bytes"},
where=["admin_quota.username=%s" % where],
tables=["admin_quota", "admin_domain"],
order_by=["%s%s" % (sort_dir, sort_order)]
)
elif sort_order == "quota_usage":
where = "admin_mailbox.address||'@'||admin_domain.name"
db_type = db_type()
if db_type == "postgres":
select = (
"(admin_quota.bytes::float / (CAST(admin_mailbox.quota "
"AS BIGINT) * 1048576)) * 100"
)
else:
select = (
"admin_quota.bytes / (admin_mailbox.quota "
"* 1048576) * 100"
)
if db_type == "mysql":
where = "CONCAT(admin_mailbox.address,'@',admin_domain.name)"
mboxes = mboxes.extra(
select={'quota_usage': select},
where=["admin_quota.username=%s" % where],
tables=["admin_quota", "admin_domain"],
order_by=["%s%s" % (sort_dir, sort_order)]
)
else:
raise BadRequest(_("Invalid request"))
page = get_listing_page(mboxes, request.GET.get("page", 1))
context = {
"headers": render_to_string(
"admin/quota_headers.html", {}, request
)
}
if page is None:
context["length"] = 0
else:
context["rows"] = render_to_string(
"admin/quotas.html", {"mboxes": page}, request
)
context["pages"] = [page.number]
return render_to_json_response(context)
@login_required
@user_passes_test(
lambda u: u.has_perm("admin.add_user") or
u.has_perm("admin.add_alias") or
u.has_perm("admin.add_mailbox")
)
def get_next_page(request):
"""Return the next page of the identity list."""
if request.GET.get("objtype", "identity") == "identity":
return _identities(request)
return list_quotas(request)
@login_required
@user_passes_test(
lambda u: u.has_perm("admin.add_user") or
u.has_perm("admin.add_alias")
)
@ensure_csrf_cookie
def identities(request, tplname="admin/identities.html"):
return render(request, tplname, {
"selection": "identities",
"deflocation": "list/"
})
@login_required
@permission_required("core.add_user")
def accounts_list(request):
accs = User.objects.filter(is_superuser=False) \
.exclude(groups__name='SimpleUsers')
res = [a.username for a in accs.all()]
return render_to_json_response(res)
@login_required
@permission_required("core.add_user")
@reversion.create_revision()
def newaccount(request):
"""Create a new account."""
return AccountWizard(request).process()
@login_required
@permission_required("core.change_user")
@reversion.create_revision()
def editaccount(request, pk):
account = User.objects.get(pk=pk)
if not request.user.can_access(account):
raise PermDeniedException
mb = account.mailbox if hasattr(account, "mailbox") else None
instances = dict(general=account, mail=mb, perms=account)
results = signals.get_account_form_instances.send(
sender="editaccount", user=request.user, account=account)
for result in results:
instances.update(result[1])
return AccountForm(request, instances=instances).process()
@login_required
@permission_required("core.delete_user")
def delaccount(request, pk):
User.objects.get(pk=pk).delete()
return render_to_json_response(
ungettext("Account deleted", "Accounts deleted", 1)
)
@login_required
@permission_required("admin.add_domain")
def remove_permission(request):
domid = request.GET.get("domid", None)
daid = request.GET.get("daid", None)
if domid is None or daid is None:
raise BadRequest(_("Invalid request"))
try:
account = User.objects.get(pk=daid)
domain = Domain.objects.get(pk=domid)
except (User.DoesNotExist, Domain.DoesNotExist):
raise BadRequest(_("Invalid request"))
if not request.user.can_access(account) or \
not request.user.can_access(domain):
raise PermDeniedException
domain.remove_admin(account)
return render_to_json_response({})
class AccountDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Account."""
model = User
permission_required = "core.add_user"
template_name = "admin/account_detail.html"
def has_permission(self):
"""Check object-level access."""
result = super(AccountDetailView, self).has_permission()
if not result:
return result
return self.request.user.can_access(self.get_object())
def get_context_data(self, **kwargs):
"""Add information to context."""
context = super(AccountDetailView, self).get_context_data(**kwargs)
del context["user"]
result = signals.extra_account_dashboard_widgets.send(
self.__class__, user=self.request.user, account=self.object)
context["templates"] = {"left": [], "right": []}
for receiver, widgets in result:
for widget in widgets:
context["templates"][widget["column"]].append(
widget["template"])
context.update(widget["context"])
if self.object.role in ["Resellers", "DomainAdmins"]:
context["domains"] = Domain.objects.get_for_admin(self.object)
context["selection"] = "identities"
return context
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test mongo using the synchronizer, i.e. as it would be used by an
user
"""
import time
import os
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path[0:0] = [""]
from pymongo import MongoClient
from tests import mongo_host, STRESS_COUNT
from tests.setup_cluster import (start_replica_set,
kill_replica_set,
start_mongo_proc,
restart_mongo_proc,
kill_mongo_proc)
from mongo_connector.doc_managers.mongo_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from pymongo.errors import OperationFailure, AutoReconnect
from tests.util import assert_soon
class TestSynchronizer(unittest.TestCase):
""" Tests the mongo instance
"""
@classmethod
def setUpClass(cls):
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
cls.standalone_port = start_mongo_proc(options=['--nojournal',
'--noprealloc'])
cls.mongo_doc = DocManager('%s:%d' % (mongo_host, cls.standalone_port))
cls.mongo_doc._remove()
_, cls.secondary_p, cls.primary_p = start_replica_set('test-mongo')
cls.conn = MongoClient(mongo_host, cls.primary_p,
replicaSet='test-mongo')
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
kill_mongo_proc(cls.standalone_port)
kill_replica_set('test-mongo')
def tearDown(self):
self.connector.join()
def setUp(self):
self.connector = Connector(
address='%s:%s' % (mongo_host, self.primary_p),
oplog_checkpoint="config.txt",
target_url='%s:%d' % (mongo_host, self.standalone_port),
ns_set=['test.test'],
u_key='_id',
auth_key=None,
doc_manager='mongo_connector/doc_managers/mongo_doc_manager.py'
)
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
self.conn['test']['test'].remove()
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 0)
def test_shard_length(self):
"""Tests the shard_length to see if the shard set was recognized
properly
"""
self.assertEqual(len(self.connector.shard_set), 1)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)
result_set_1 = self.mongo_doc._search()
self.assertEqual(sum(1 for _ in result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], result_set_2['_id'])
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)
self.conn['test']['test'].remove({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) != 1)
self.assertEqual(sum(1 for _ in self.mongo_doc._search()), 0)
def test_rollback(self):
"""Tests rollback. We force a rollback by adding a doc, killing the
primary, adding another doc, killing the new primary, and then
restarting both.
"""
primary_conn = MongoClient(mongo_host, self.primary_p)
self.conn['test']['test'].insert({'name': 'paul'})
condition = lambda: self.conn['test']['test'].find_one(
{'name': 'paul'}) is not None
assert_soon(condition)
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin = new_primary_conn['admin']
condition = lambda: admin.command("isMaster")['ismaster']
assert_soon(lambda: retry_until_ok(condition))
retry_until_ok(self.conn.test.test.insert,
{'name': 'pauline'})
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search()) == 2)
result_set_1 = list(self.mongo_doc._search())
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 2)
#make sure pauline is there
for item in result_set_1:
if item['name'] == 'pauline':
self.assertEqual(item['_id'], result_set_2['_id'])
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
assert_soon(
lambda: primary_conn['admin'].command("isMaster")['ismaster'])
restart_mongo_proc(self.secondary_p)
time.sleep(2)
result_set_1 = list(self.mongo_doc._search())
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['name'], 'paul')
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_stress(self):
"""Test stress by inserting and removing the number of documents
specified in global
variable
"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
time.sleep(5)
search = self.mongo_doc._search
condition = lambda: sum(1 for _ in search()) == STRESS_COUNT
assert_soon(condition)
for i in range(0, STRESS_COUNT):
result_set_1 = self.mongo_doc._search()
for item in result_set_1:
if(item['name'] == 'Paul' + str(i)):
self.assertEqual(item['_id'], item['_id'])
def test_stressed_rollback(self):
"""Test stressed rollback with number of documents equal to specified
in global variable. Strategy for rollback is the same as before.
"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
search = self.mongo_doc._search
condition = lambda: sum(1 for _ in search()) == STRESS_COUNT
assert_soon(condition)
primary_conn = MongoClient(mongo_host, self.primary_p)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin = new_primary_conn['admin']
assert_soon(lambda: admin.command("isMaster")['ismaster'])
time.sleep(5)
count = -1
while count + 1 < STRESS_COUNT:
try:
count += 1
self.conn['test']['test'].insert(
{'name': 'Pauline ' + str(count)})
except (OperationFailure, AutoReconnect):
time.sleep(1)
assert_soon(lambda: sum(1 for _ in self.mongo_doc._search())
== self.conn['test']['test'].find().count())
result_set_1 = self.mongo_doc._search()
for item in result_set_1:
if 'Pauline' in item['name']:
result_set_2 = self.conn['test']['test'].find_one(
{'name': item['name']})
self.assertEqual(item['_id'], result_set_2['_id'])
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
db_admin = primary_conn['admin']
assert_soon(lambda: db_admin.command("isMaster")['ismaster'])
restart_mongo_proc(self.secondary_p)
search = self.mongo_doc._search
condition = lambda: sum(1 for _ in search()) == STRESS_COUNT
assert_soon(condition)
result_set_1 = list(self.mongo_doc._search())
self.assertEqual(len(result_set_1), STRESS_COUNT)
for item in result_set_1:
self.assertTrue('Paul' in item['name'])
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), STRESS_COUNT)
if __name__ == '__main__':
unittest.main()
|
|
"""Class to hold all cover accessories."""
import logging
from pyhap.const import (
CATEGORY_GARAGE_DOOR_OPENER,
CATEGORY_WINDOW,
CATEGORY_WINDOW_COVERING,
)
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
STATE_CLOSED,
STATE_CLOSING,
STATE_ON,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change_event
from .accessories import TYPES, HomeAccessory
from .const import (
ATTR_OBSTRUCTION_DETECTED,
CHAR_CURRENT_DOOR_STATE,
CHAR_CURRENT_POSITION,
CHAR_CURRENT_TILT_ANGLE,
CHAR_HOLD_POSITION,
CHAR_OBSTRUCTION_DETECTED,
CHAR_POSITION_STATE,
CHAR_TARGET_DOOR_STATE,
CHAR_TARGET_POSITION,
CHAR_TARGET_TILT_ANGLE,
CONF_LINKED_OBSTRUCTION_SENSOR,
HK_DOOR_CLOSED,
HK_DOOR_CLOSING,
HK_DOOR_OPEN,
HK_DOOR_OPENING,
HK_POSITION_GOING_TO_MAX,
HK_POSITION_GOING_TO_MIN,
HK_POSITION_STOPPED,
SERV_GARAGE_DOOR_OPENER,
SERV_WINDOW,
SERV_WINDOW_COVERING,
)
DOOR_CURRENT_HASS_TO_HK = {
STATE_OPEN: HK_DOOR_OPEN,
STATE_CLOSED: HK_DOOR_CLOSED,
STATE_OPENING: HK_DOOR_OPENING,
STATE_CLOSING: HK_DOOR_CLOSING,
}
# HomeKit only has two states for
# Target Door State:
# 0: Open
# 1: Closed
# Opening is mapped to 0 since the target is Open
# Closing is mapped to 1 since the target is Closed
DOOR_TARGET_HASS_TO_HK = {
STATE_OPEN: HK_DOOR_OPEN,
STATE_CLOSED: HK_DOOR_CLOSED,
STATE_OPENING: HK_DOOR_OPEN,
STATE_CLOSING: HK_DOOR_CLOSED,
}
_LOGGER = logging.getLogger(__name__)
@TYPES.register("GarageDoorOpener")
class GarageDoorOpener(HomeAccessory):
"""Generate a Garage Door Opener accessory for a cover entity.
The cover entity must be in the 'garage' device class
and support no more than open, close, and stop.
"""
def __init__(self, *args):
"""Initialize a GarageDoorOpener accessory object."""
super().__init__(*args, category=CATEGORY_GARAGE_DOOR_OPENER)
state = self.hass.states.get(self.entity_id)
serv_garage_door = self.add_preload_service(SERV_GARAGE_DOOR_OPENER)
self.char_current_state = serv_garage_door.configure_char(
CHAR_CURRENT_DOOR_STATE, value=0
)
self.char_target_state = serv_garage_door.configure_char(
CHAR_TARGET_DOOR_STATE, value=0, setter_callback=self.set_state
)
self.char_obstruction_detected = serv_garage_door.configure_char(
CHAR_OBSTRUCTION_DETECTED, value=False
)
self.linked_obstruction_sensor = self.config.get(CONF_LINKED_OBSTRUCTION_SENSOR)
if self.linked_obstruction_sensor:
self._async_update_obstruction_state(
self.hass.states.get(self.linked_obstruction_sensor)
)
self.async_update_state(state)
async def run(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self.linked_obstruction_sensor:
async_track_state_change_event(
self.hass,
[self.linked_obstruction_sensor],
self._async_update_obstruction_event,
)
await super().run()
@callback
def _async_update_obstruction_event(self, event):
"""Handle state change event listener callback."""
self._async_update_obstruction_state(event.data.get("new_state"))
@callback
def _async_update_obstruction_state(self, new_state):
"""Handle linked obstruction sensor state change to update HomeKit value."""
if not new_state:
return
detected = new_state.state == STATE_ON
if self.char_obstruction_detected.value == detected:
return
self.char_obstruction_detected.set_value(detected)
_LOGGER.debug(
"%s: Set linked obstruction %s sensor to %d",
self.entity_id,
self.linked_obstruction_sensor,
detected,
)
def set_state(self, value):
"""Change garage state if call came from HomeKit."""
_LOGGER.debug("%s: Set state to %d", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id}
if value == HK_DOOR_OPEN:
if self.char_current_state.value != value:
self.char_current_state.set_value(HK_DOOR_OPENING)
self.async_call_service(DOMAIN, SERVICE_OPEN_COVER, params)
elif value == HK_DOOR_CLOSED:
if self.char_current_state.value != value:
self.char_current_state.set_value(HK_DOOR_CLOSING)
self.async_call_service(DOMAIN, SERVICE_CLOSE_COVER, params)
@callback
def async_update_state(self, new_state):
"""Update cover state after state changed."""
hass_state = new_state.state
target_door_state = DOOR_TARGET_HASS_TO_HK.get(hass_state)
current_door_state = DOOR_CURRENT_HASS_TO_HK.get(hass_state)
if ATTR_OBSTRUCTION_DETECTED in new_state.attributes:
obstruction_detected = (
new_state.attributes[ATTR_OBSTRUCTION_DETECTED] is True
)
if self.char_obstruction_detected.value != obstruction_detected:
self.char_obstruction_detected.set_value(obstruction_detected)
if (
target_door_state is not None
and self.char_target_state.value != target_door_state
):
self.char_target_state.set_value(target_door_state)
if (
current_door_state is not None
and self.char_current_state.value != current_door_state
):
self.char_current_state.set_value(current_door_state)
class OpeningDeviceBase(HomeAccessory):
"""Generate a base Window accessory for a cover entity.
This class is used for WindowCoveringBasic and
WindowCovering
"""
def __init__(self, *args, category, service):
"""Initialize a OpeningDeviceBase accessory object."""
super().__init__(*args, category=category)
state = self.hass.states.get(self.entity_id)
self.features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
self._supports_stop = self.features & SUPPORT_STOP
self.chars = []
if self._supports_stop:
self.chars.append(CHAR_HOLD_POSITION)
self._supports_tilt = self.features & SUPPORT_SET_TILT_POSITION
if self._supports_tilt:
self.chars.extend([CHAR_TARGET_TILT_ANGLE, CHAR_CURRENT_TILT_ANGLE])
self.serv_cover = self.add_preload_service(service, self.chars)
if self._supports_stop:
self.char_hold_position = self.serv_cover.configure_char(
CHAR_HOLD_POSITION, setter_callback=self.set_stop
)
if self._supports_tilt:
self.char_target_tilt = self.serv_cover.configure_char(
CHAR_TARGET_TILT_ANGLE, setter_callback=self.set_tilt
)
self.char_current_tilt = self.serv_cover.configure_char(
CHAR_CURRENT_TILT_ANGLE, value=0
)
def set_stop(self, value):
"""Stop the cover motion from HomeKit."""
if value != 1:
return
self.async_call_service(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: self.entity_id}
)
def set_tilt(self, value):
"""Set tilt to value if call came from HomeKit."""
_LOGGER.info("%s: Set tilt to %d", self.entity_id, value)
# HomeKit sends values between -90 and 90.
# We'll have to normalize to [0,100]
value = round((value + 90) / 180.0 * 100.0)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_TILT_POSITION: value}
self.async_call_service(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, params, value)
@callback
def async_update_state(self, new_state):
"""Update cover position and tilt after state changed."""
# update tilt
current_tilt = new_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
if isinstance(current_tilt, (float, int)):
# HomeKit sends values between -90 and 90.
# We'll have to normalize to [0,100]
current_tilt = (current_tilt / 100.0 * 180.0) - 90.0
current_tilt = int(current_tilt)
if self.char_current_tilt.value != current_tilt:
self.char_current_tilt.set_value(current_tilt)
if self.char_target_tilt.value != current_tilt:
self.char_target_tilt.set_value(current_tilt)
class OpeningDevice(OpeningDeviceBase, HomeAccessory):
"""Generate a Window/WindowOpening accessory for a cover entity.
The cover entity must support: set_cover_position.
"""
def __init__(self, *args, category, service):
"""Initialize a WindowCovering accessory object."""
super().__init__(*args, category=category, service=service)
state = self.hass.states.get(self.entity_id)
self.char_current_position = self.serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0
)
self.char_target_position = self.serv_cover.configure_char(
CHAR_TARGET_POSITION, value=0, setter_callback=self.move_cover
)
self.char_position_state = self.serv_cover.configure_char(
CHAR_POSITION_STATE, value=HK_POSITION_STOPPED
)
self.async_update_state(state)
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug("%s: Set position to %d", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_POSITION: value}
self.async_call_service(DOMAIN, SERVICE_SET_COVER_POSITION, params, value)
@callback
def async_update_state(self, new_state):
"""Update cover position and tilt after state changed."""
current_position = new_state.attributes.get(ATTR_CURRENT_POSITION)
if isinstance(current_position, (float, int)):
current_position = int(current_position)
if self.char_current_position.value != current_position:
self.char_current_position.set_value(current_position)
if self.char_target_position.value != current_position:
self.char_target_position.set_value(current_position)
position_state = _hass_state_to_position_start(new_state.state)
if self.char_position_state.value != position_state:
self.char_position_state.set_value(position_state)
super().async_update_state(new_state)
@TYPES.register("Window")
class Window(OpeningDevice):
"""Generate a Window accessory for a cover entity with DEVICE_CLASS_WINDOW.
The entity must support: set_cover_position.
"""
def __init__(self, *args):
"""Initialize a Window accessory object."""
super().__init__(*args, category=CATEGORY_WINDOW, service=SERV_WINDOW)
@TYPES.register("WindowCovering")
class WindowCovering(OpeningDevice):
"""Generate a WindowCovering accessory for a cover entity.
The entity must support: set_cover_position.
"""
def __init__(self, *args):
"""Initialize a WindowCovering accessory object."""
super().__init__(
*args, category=CATEGORY_WINDOW_COVERING, service=SERV_WINDOW_COVERING
)
@TYPES.register("WindowCoveringBasic")
class WindowCoveringBasic(OpeningDeviceBase, HomeAccessory):
"""Generate a Window accessory for a cover entity.
The cover entity must support: open_cover, close_cover,
stop_cover (optional).
"""
def __init__(self, *args):
"""Initialize a WindowCoveringBasic accessory object."""
super().__init__(
*args, category=CATEGORY_WINDOW_COVERING, service=SERV_WINDOW_COVERING
)
state = self.hass.states.get(self.entity_id)
self.char_current_position = self.serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0
)
self.char_target_position = self.serv_cover.configure_char(
CHAR_TARGET_POSITION, value=0, setter_callback=self.move_cover
)
self.char_position_state = self.serv_cover.configure_char(
CHAR_POSITION_STATE, value=HK_POSITION_STOPPED
)
self.async_update_state(state)
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug("%s: Set position to %d", self.entity_id, value)
if (
self._supports_stop
and value > 70
or not self._supports_stop
and value >= 50
):
service, position = (SERVICE_OPEN_COVER, 100)
elif value < 30 or not self._supports_stop:
service, position = (SERVICE_CLOSE_COVER, 0)
else:
service, position = (SERVICE_STOP_COVER, 50)
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
# Snap the current/target position to the expected final position.
self.char_current_position.set_value(position)
self.char_target_position.set_value(position)
@callback
def async_update_state(self, new_state):
"""Update cover position after state changed."""
position_mapping = {STATE_OPEN: 100, STATE_CLOSED: 0}
hk_position = position_mapping.get(new_state.state)
if hk_position is not None:
if self.char_current_position.value != hk_position:
self.char_current_position.set_value(hk_position)
if self.char_target_position.value != hk_position:
self.char_target_position.set_value(hk_position)
position_state = _hass_state_to_position_start(new_state.state)
if self.char_position_state.value != position_state:
self.char_position_state.set_value(position_state)
super().async_update_state(new_state)
def _hass_state_to_position_start(state):
"""Convert hass state to homekit position state."""
if state == STATE_OPENING:
return HK_POSITION_GOING_TO_MAX
if state == STATE_CLOSING:
return HK_POSITION_GOING_TO_MIN
return HK_POSITION_STOPPED
|
|
# We can test part of the module without zlib.
try:
import zlib
except ImportError:
zlib = None
import zipfile, os, unittest, sys, shutil, struct
from StringIO import StringIO
from tempfile import TemporaryFile
from random import randint, random
import test.test_support as support
from test.test_support import TESTFN, run_unittest, findfile
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('/ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
self.line_gen = ["Zipfile test line %d. random float: %f" % (i, random())
for i in xrange(FIXEDTEST_SIZE)]
self.data = '\n'.join(self.line_gen) + '\n'
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
zipfp.close()
def zipTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEquals(len(lines), 4) # Number of files + header
self.assert_('File Name' in lines[0])
self.assert_('Modified' in lines[0])
self.assert_('Size' in lines[0])
fn, date, time, size = lines[1].split()
self.assertEquals(fn, 'another.name')
# XXX: timestamp is not tested
self.assertEquals(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
# Check infolist
infos = zipfp.infolist()
names = [ i.filename for i in infos ]
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
for i in infos:
self.assertEquals(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another"+os.extsep+"name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEquals(info.filename, nm)
self.assertEquals(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
def zipOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
zipopen2 = zipfp.open("another"+os.extsep+"name")
while 1:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
self.assertEqual(''.join(zipdata2), self.data)
zipfp.close()
def testOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_STORED)
def testOpenViaZipInfo(self):
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.writestr("name", "foo")
zipfp.writestr("name", "bar")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
infos = zipfp.infolist()
data = ""
for info in infos:
data += zipfp.open(info).read()
self.assert_(data == "foobar" or data == "barfoo")
data = ""
for info in infos:
data += zipfp.read(info)
self.assert_(data == "foobar" or data == "barfoo")
zipfp.close()
def zipRandomOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
zipfp.close()
def testRandomOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_STORED)
def zipReadlineTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
zipopen = zipfp.open(TESTFN)
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
zipfp.close()
def zipReadlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
ziplines = zipfp.open(TESTFN).readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def zipIterlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for line, zipline in zip(self.line_gen, zipfp.open(TESTFN)):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def testReadlineStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlineTest(f, zipfile.ZIP_STORED)
def testReadlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlinesTest(f, zipfile.ZIP_STORED)
def testIterlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipIterlinesTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_DEFLATED)
def testOpenDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_DEFLATED)
def testRandomOpenDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_DEFLATED)
def testReadlineDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlineTest(f, zipfile.ZIP_DEFLATED)
def testReadlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipReadlinesTest(f, zipfile.ZIP_DEFLATED)
def testIterlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipIterlinesTest(f, zipfile.ZIP_DEFLATED)
def testLowCompression(self):
# Checks for cases where compressed data is larger than original
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED)
zipfp.writestr("strfile", '12')
zipfp.close()
# Get an open object for strfile
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_DEFLATED)
openobj = zipfp.open("strfile")
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.read(1), '2')
def testAbsoluteArcnames(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.write(TESTFN, "/absolute")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED)
self.assertEqual(zipfp.namelist(), ["absolute"])
zipfp.close()
def testAppendToZipFile(self):
# Test appending to an existing zipfile
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
zipfp.write(TESTFN, TESTFN)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED)
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
zipfp.close()
def testAppendToNonZipFile(self):
# Test appending to an existing file that is not a zipfile
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
d = 'I am not a ZipFile!'*10
f = file(TESTFN2, 'wb')
f.write(d)
f.close()
zipfp = zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED)
zipfp.write(TESTFN, TESTFN)
zipfp.close()
f = file(TESTFN2, 'rb')
f.seek(len(d))
zipfp = zipfile.ZipFile(f, "r")
self.assertEqual(zipfp.namelist(), [TESTFN])
zipfp.close()
f.close()
def test_WriteDefaultName(self):
# Check that calling ZipFile.write without arcname specified produces the expected result
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.write(TESTFN)
self.assertEqual(zipfp.read(TESTFN), file(TESTFN).read())
zipfp.close()
def test_PerFileCompression(self):
# Check that files within a Zip archive can have different compression options
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
zipfp.close()
def test_WriteToReadonly(self):
# Check that trying to call write() on a readonly ZipFile object
# raises a RuntimeError
zipf = zipfile.ZipFile(TESTFN2, mode="w")
zipf.writestr("somefile.txt", "bogus")
zipf.close()
zipf = zipfile.ZipFile(TESTFN2, mode="r")
self.assertRaises(RuntimeError, zipf.write, TESTFN)
zipf.close()
def testExtract(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
if os.path.isabs(fpath):
correctfile = os.path.join(os.getcwd(), fpath[1:])
else:
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
self.assertEqual(fdata, file(writtenfile, "rb").read())
os.remove(writtenfile)
zipfp.close()
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def testExtractAll(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED)
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r")
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
if os.path.isabs(fpath):
outfile = os.path.join(os.getcwd(), fpath[1:])
else:
outfile = os.path.join(os.getcwd(), fpath)
self.assertEqual(fdata, file(outfile, "rb").read())
os.remove(outfile)
zipfp.close()
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.makeTestArchive(f, compression)
zipfp = zipfile.ZipFile(f, "r")
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0600 << 16)
def test_writestr_permissions(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def tearDown(self):
os.remove(TESTFN)
os.remove(TESTFN2)
class TestZip64InSmallFiles(unittest.TestCase):
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
line_gen = ("Test of zipfile line %d." % i for i in range(0, FIXEDTEST_SIZE))
self.data = '\n'.join(line_gen)
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def largeFileExceptionTest(self, f, compression):
zipfp = zipfile.ZipFile(f, "w", compression)
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another"+os.extsep+"name")
zipfp.close()
def largeFileExceptionTest2(self, f, compression):
zipfp = zipfile.ZipFile(f, "w", compression)
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another"+os.extsep+"name", self.data)
zipfp.close()
def testLargeFileException(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.largeFileExceptionTest(f, zipfile.ZIP_STORED)
self.largeFileExceptionTest2(f, zipfile.ZIP_STORED)
def zipTest(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression, allowZip64=True)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
zipfp.close()
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEquals(len(lines), 4) # Number of files + header
self.assert_('File Name' in lines[0])
self.assert_('Modified' in lines[0])
self.assert_('Size' in lines[0])
fn, date, time, size = lines[1].split()
self.assertEquals(fn, 'another.name')
# XXX: timestamp is not tested
self.assertEquals(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
# Check infolist
infos = zipfp.infolist()
names = [ i.filename for i in infos ]
self.assertEquals(len(names), 3)
self.assert_(TESTFN in names)
self.assert_("another"+os.extsep+"name" in names)
self.assert_("strfile" in names)
for i in infos:
self.assertEquals(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another"+os.extsep+"name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEquals(info.filename, nm)
self.assertEquals(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
if zlib:
def testDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_DEFLATED)
def testAbsoluteArcnames(self):
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED, allowZip64=True)
zipfp.write(TESTFN, "/absolute")
zipfp.close()
zipfp = zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED)
self.assertEqual(zipfp.namelist(), ["absolute"])
zipfp.close()
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
os.remove(TESTFN)
os.remove(TESTFN2)
class PyZipFileTests(unittest.TestCase):
def testWritePyfile(self):
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assert_(bn not in zipfp.namelist())
self.assert_(bn + 'o' in zipfp.namelist() or bn + 'c' in zipfp.namelist())
zipfp.close()
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s"%("testpackage", os.path.basename(fn))
self.assert_(bn not in zipfp.namelist())
self.assert_(bn + 'o' in zipfp.namelist() or bn + 'c' in zipfp.namelist())
zipfp.close()
def testWritePythonPackage(self):
import email
packagedir = os.path.dirname(email.__file__)
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the hieararchy
names = zipfp.namelist()
self.assert_('email/__init__.pyo' in names or 'email/__init__.pyc' in names)
self.assert_('email/mime/text.pyo' in names or 'email/mime/text.pyc' in names)
def testWritePythonDirectory(self):
os.mkdir(TESTFN2)
try:
fp = open(os.path.join(TESTFN2, "mod1.py"), "w")
fp.write("print 42\n")
fp.close()
fp = open(os.path.join(TESTFN2, "mod2.py"), "w")
fp.write("print 42 * 42\n")
fp.close()
fp = open(os.path.join(TESTFN2, "mod2.txt"), "w")
fp.write("bla bla bla\n")
fp.close()
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assert_('mod1.pyc' in names or 'mod1.pyo' in names)
self.assert_('mod2.pyc' in names or 'mod2.pyo' in names)
self.assert_('mod2.txt' not in names)
finally:
shutil.rmtree(TESTFN2)
def testWriteNonPyfile(self):
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
file(TESTFN, 'w').write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
class OtherTests(unittest.TestCase):
def testUnicodeFilenames(self):
zf = zipfile.ZipFile(TESTFN, "w")
zf.writestr(u"foo.txt", "Test for unicode filename")
zf.writestr(u"\xf6.txt", "Test for unicode filename")
self.assertTrue(isinstance(zf.infolist()[0].filename, unicode))
zf.close()
zf = zipfile.ZipFile(TESTFN, "r")
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, u"\xf6.txt")
zf.close()
def testCreateNonExistentFileForAppend(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = 'hello, world. this is some content.'
try:
zf = zipfile.ZipFile(TESTFN, 'a')
zf.writestr(filename, content)
zf.close()
except IOError, (errno, errmsg):
self.fail('Could not append data to a non-existent zip file.')
self.assert_(os.path.exists(TESTFN))
zf = zipfile.ZipFile(TESTFN, 'r')
self.assertEqual(zf.read(filename), content)
zf.close()
def testCloseErroneousFile(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the traceback
# holds a reference to the ZipFile object and, indirectly, the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
fp = open(TESTFN, "w")
fp.write("this is not a legal zip file\n")
fp.close()
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipfile:
pass
def testIsZipErroneousFile(self):
# This test checks that the is_zipfile function correctly identifies
# a file that is not a zip file
fp = open(TESTFN, "w")
fp.write("this is not a legal zip file\n")
fp.close()
chk = zipfile.is_zipfile(TESTFN)
self.assert_(chk is False)
def testIsZipValidFile(self):
# This test checks that the is_zipfile function correctly identifies
# a file that is a zip file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
chk = zipfile.is_zipfile(TESTFN)
self.assert_(chk is True)
def testNonExistentFileRaisesIOError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(IOError, zipfile.ZipFile, TESTFN)
def testClosedZipRaisesRuntimeError(self):
# Verify that testzip() doesn't swallow inappropriate exceptions.
data = StringIO()
zipf = zipfile.ZipFile(data, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
# This is correct; calling .read on a closed ZipFile should throw
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
file(TESTFN, 'w').write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_BadConstructorMode(self):
# Check that bad modes passed to ZipFile constructor are caught
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_BadOpenMode(self):
# Check that bad modes passed to ZipFile.open are caught
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipf = zipfile.ZipFile(TESTFN, mode="r")
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
zipf.close()
def test_Read0(self):
# Check that calling read(0) on a ZipExtFile object returns an empty
# string and doesn't advance file pointer
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
f = zipf.open("foo.txt")
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), "O, for a Muse of Fire!")
zipf.close()
def test_OpenNonexistentItem(self):
# Check that attempting to call open() for an item that doesn't
# exist in the archive raises a RuntimeError
zipf = zipfile.ZipFile(TESTFN, mode="w")
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_BadCompressionMode(self):
# Check that bad compression methods passed to ZipFile.open are caught
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_NullByteInFilename(self):
# Check that a filename containing a null byte is properly terminated
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.writestr("foo.txt\x00qqq", "O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_StructSizes(self):
# check that ZIP internal structure sizes are calculated correctly
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def testComments(self):
# This test checks that comments on the archive are handled properly
# check default comment is empty
zipf = zipfile.ZipFile(TESTFN, mode="w")
self.assertEqual(zipf.comment, '')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, '')
zipfr.close()
# check a simple short comment
comment = 'Bravely taking to his feet, he beat a very brave retreat.'
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment)
zipfr.close()
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in xrange((1 << 16)-1)])
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment2)
zipfr.close()
# check a comment that is too long is truncated
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.comment = comment2 + 'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipfr = zipfile.ZipFile(TESTFN, mode="r")
self.assertEqual(zipfr.comment, comment2)
zipfr.close()
def tearDown(self):
support.unlink(TESTFN)
support.unlink(TESTFN2)
class DecryptionTests(unittest.TestCase):
# This test checks that ZIP decryption works. Since the library does not
# support encryption at the moment, we use a pre-generated encrypted
# ZIP file
data = (
'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = 'zipfile.py encryption test'
plain2 = '\x00'*512
def setUp(self):
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
self.zip = zipfile.ZipFile(TESTFN, "r")
fp = open(TESTFN2, "wb")
fp.write(self.data2)
fp.close()
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def testNoPassword(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def testBadPassword(self):
self.zip.setpassword("perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword("perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def testGoodPassword(self):
self.zip.setpassword("python")
self.assertEquals(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword("12345")
self.assertEquals(self.zip2.read("zero"), self.plain2)
class TestsWithRandomBinaryFiles(unittest.TestCase):
def setUp(self):
datacount = randint(16, 64)*1024 + randint(1, 1024)
self.data = ''.join((struct.pack('<f', random()*randint(-1000, 1000)) for i in xrange(datacount)))
# Make a source file with some lines
fp = open(TESTFN, "wb")
fp.write(self.data)
fp.close()
def tearDown(self):
support.unlink(TESTFN)
support.unlink(TESTFN2)
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
zipfp.write(TESTFN, "another"+os.extsep+"name")
zipfp.write(TESTFN, TESTFN)
zipfp.close()
def zipTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another"+os.extsep+"name"), self.data)
zipfp.close()
def testStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipTest(f, zipfile.ZIP_STORED)
def zipOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
zipopen2 = zipfp.open("another"+os.extsep+"name")
while 1:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = ''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = ''.join(zipdata2)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
zipfp.close()
def testOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipOpenTest(f, zipfile.ZIP_STORED)
def zipRandomOpenTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r", compression)
zipdata1 = []
zipopen1 = zipfp.open(TESTFN)
while 1:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = ''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
zipfp.close()
def testRandomOpenStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zipRandomOpenTest(f, zipfile.ZIP_STORED)
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
zipfp = zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED)
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
zipfp.close()
def testSameFile(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
zopen2 = zipf.open('ones')
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
zipf.close()
def testDifferentFile(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
zopen2 = zipf.open('twos')
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
zipf.close()
def testInterleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
zipf = zipfile.ZipFile(TESTFN2, mode="r")
zopen1 = zipf.open('ones')
data1 = zopen1.read(500)
zopen2 = zipf.open('twos')
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
zipf.close()
def tearDown(self):
os.remove(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def testExtractDir(self):
zipf = zipfile.ZipFile(findfile("zipdir.zip"))
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def testStoreDir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
shutil.rmtree(TESTFN2)
if os.path.exists(TESTFN):
os.remove(TESTFN)
class UniversalNewlineTests(unittest.TestCase):
def setUp(self):
self.line_gen = ["Test of zipfile line %d." % i for i in xrange(FIXEDTEST_SIZE)]
self.seps = ('\r', '\r\n', '\n')
self.arcdata, self.arcfiles = {}, {}
for n, s in enumerate(self.seps):
self.arcdata[s] = s.join(self.line_gen) + s
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
open(self.arcfiles[s], "wb").write(self.arcdata[s])
def makeTestArchive(self, f, compression):
# Create the ZIP archive
zipfp = zipfile.ZipFile(f, "w", compression)
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
zipfp.close()
def readTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
zipdata = zipfp.open(fn, "rU").read()
self.assertEqual(self.arcdata[sep], zipdata)
zipfp.close()
def readlineTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
zipopen = zipfp.open(fn, "rU")
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
zipfp.close()
def readlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
ziplines = zipfp.open(fn, "rU").readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def iterlinesTest(self, f, compression):
self.makeTestArchive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")):
self.assertEqual(zipline, line + '\n')
zipfp.close()
def testReadStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readTest(f, zipfile.ZIP_STORED)
def testReadlineStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlineTest(f, zipfile.ZIP_STORED)
def testReadlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlinesTest(f, zipfile.ZIP_STORED)
def testIterlinesStored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlinesTest(f, zipfile.ZIP_STORED)
if zlib:
def testReadDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readTest(f, zipfile.ZIP_DEFLATED)
def testReadlineDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlineTest(f, zipfile.ZIP_DEFLATED)
def testReadlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlinesTest(f, zipfile.ZIP_DEFLATED)
def testIterlinesDeflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlinesTest(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for sep, fn in self.arcfiles.items():
os.remove(fn)
support.unlink(TESTFN)
support.unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, TestZip64InSmallFiles, OtherTests,
PyZipFileTests, DecryptionTests, TestsWithMultipleOpens,
TestWithDirectory,
UniversalNewlineTests, TestsWithRandomBinaryFiles)
if __name__ == "__main__":
test_main()
|
|
"""Substitute for unittest
If a class inherits Tester, calling its method run() on an instance runs alls
the methods starting with "test_". Before running the method, executes method
setUp() if present.
If the test failed, print the exception, and the line in the script where the
exception happened.
"""
import re
import sys
import time
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, msg):
raise Exception(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class Tester:
def assertEqual(self, result, expected, msg=None):
if result != expected:
if msg is not None:
raise AssertionError(msg)
raise AssertionError('assertEqual, expected %s, got %s'
%(expected, result))
def assertNotEqual(self, result, expected):
if result == expected:
raise AssertionError('assertNotEqual, expected %s, got %s'
%(expected, result))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertIs(self, a, b):
if not a is b:
raise AssertionError('%s is %s should be true' %(a,b))
def assertIsInstance(self, obj, klass):
if not isinstance(obj, klass):
raise AssertionError('%s is not an instance of %s' %(obj, klass))
def assertIsNot(self, a, b):
if a is b:
raise AssertionError('%s is %s should be false' %(a,b))
def assertIn(self, item, container):
if not item in container:
raise AssertionError('%s should be in %s' %(item, container))
def assertNotIn(self, item, container):
if item in container:
raise AssertionError('%s should not be in %s' %(item, container))
def assertTrue(self, item, msg=None):
if item is not True:
raise AssertionError(msg or '%s is not True' %item)
def assertFalse(self, item, msg=None):
if item is not False:
raise AssertionError(msg or '%s is not False' %item)
def fail(self, *args):
raise Exception(str(args))
def run(self, *methods):
if not methods:
methods = [m for m in dir(self) if m.startswith('test_')
and callable(getattr(self, m))]
report = TestReport(type(self).__name__)
for method in methods:
if method.startswith('test'):
f = getattr(self, method)
lineno = f.__code__.co_firstlineno
if hasattr(self, 'setUp'):
self.setUp()
t0 = time.time()
try:
f()
report.add(method[5:], lineno,
round((time.time()-t0)*1000), 'ok')
except SkipTest as exc:
print('skip test', exc)
report.add(method[5:], lineno,
round((time.time()-t0)*1000), 'skipped')
except Exception as exc:
errmsg = str(exc)
errline = '<nc>'
tb = sys.exc_info()[2]
try:
fname = tb.tb_frame.f_code.co_filename
except:
fname = '<nc>'
while True:
if fname == type(self).__module__:
errline = tb.tb_lineno
break
tb = tb.tb_next
if tb is None:
break
fname = tb.tb_frame.f_code.co_filename
report.add(method[5:], lineno,
round((time.time()-t0)*1000), 'fail',
'line {}\n{}'.format(errline, errmsg))
return report
class MethodReport:
"""Stores the results on a method : line number, execution time, status
(one of "ok", "skipped", "error") and optional additional information"""
def __init__(self, lineno, time, status, args):
self.lineno = lineno
self.time = time
self.status = status
self.args = args
class TestReport:
"""Used to store the results of tests on a class"""
def __init__(self, class_name):
self.class_name = class_name
self.records = {}
def add(self, method, lineno, time, status, *args):
self.records[method] = MethodReport(lineno, time, status, args)
def format_html(self, name="test_report"):
"""Returns the report as an HTML table"""
html = ('<table id="%s" class="report">\n' %name +
'<tr class="header"><th>Test</th><th>Line</th><th>Time (ms)</th>'+
'<th>Status</th><th>Comments</th></tr>\n')
methods = list(self.records.keys())
methods.sort()
for method in methods:
value = self.records[method]
html += ('<tr class="method"><td>{0}</td>'+
'<td class="number">{1.lineno}</td>'+
'<td class="number">{1.time}</td>'+
'<td class="report_cell">{1.status}</td>').format(method, value)
if value.args:
html += '<td><pre>{}</pre></td>'.format(value.args[0])
else:
html += '<td> </td>'
html += '</tr>\n'
return html + '</table>'
def __str__(self):
res = 'Class %s\n' %self.class_name
methods = list(self.records.keys())
methods.sort()
for method in methods:
report = self.records[method]
res += '{:15} {1.status} {1.lineno}\n {1.args[0]}'.format(method, report)
return res
TestCase = Tester # unittest interface
tester = Tester()
assertRaises = tester.assertRaises
class SkipTest(Exception):
pass
def skip(msg):
def decorator(f):
def g(*args, **kw):
print('raise skip test')
raise SkipTest(msg)
return g
return decorator
def skipUnless(condition, msg):
if condition:
def decorator(f):
return f
else:
def decorator(f):
def g(*args, **kw):
print('raise skip test')
raise SkipTest(msg)
return g
return decorator
class Support:
def cpython_only(self, func):
def f(*args, **kw):
raise SkipTest('CPython test only')
return f
def requires_IEEE_754(self, func):
return func
support = Support()
if __name__=='__main__':
t = 1, 2
assertRaises(TypeError, t, '__setitem__', 0, 1)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
import shutil
from pymatgen.util.testing import PymatgenTest
from monty.functools import lazy_property
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.abinit import *
from pymatgen.io.abinit.flows import *
from pymatgen.io.abinit.works import *
from pymatgen.io.abinit.tasks import *
from pymatgen.io.abinit.pseudos import Pseudo
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class FakeAbinitInput(object):
"""Emulate an Abinit input."""
@lazy_property
def pseudos(self):
return [Pseudo.as_pseudo(ref_file("14si.pspnc"))]
@lazy_property
def structure(self):
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
return Structure(lattice, ["Si", "Si"], coords)
def get(self, key, default=None):
"""The real AbinitInput is a dict-like object."""
if default is not None: return default
return key
class FlowUnitTest(PymatgenTest):
"""Provides helper function for testing Abinit flows."""
MANAGER = """\
policy:
autoparal: 1
qadapters:
- &batch
priority: 1
queue:
qtype: slurm
qname: Oban
qparams:
mail_user: nobody@nowhere
limits:
timelimit: 0:20:00
min_cores: 4
max_cores: 12
#condition: {"$eq": {omp_threads: 2}}
hardware:
num_nodes: 10
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
job:
modules:
- intel/compilerpro/13.0.1.117
- fftw3/intel/3.3
shell_env:
PATH: /home/user/tmp_intel13/src/98_main/:/home/user//NAPS/intel13/bin:$PATH
LD_LIBRARY_PATH: /home/user/NAPS/intel13/lib:$LD_LIBRARY_PATH
mpi_runner: mpirun
# Connection to the MongoDb database (optional)
db_connector:
database: abinit
collection: test
#host: 0.0.0.0
#port: 8080
#user: gmatteo
#password: helloworld
batch_adapter: *batch
"""
def setUp(self):
"""Initialization phase."""
super(FlowUnitTest, self).setUp()
# Temporary directory for the flow.
self.workdir = tempfile.mkdtemp()
# Create the TaskManager.
self.manager = TaskManager.from_string(self.MANAGER)
# Fake input file
self.fake_input = FakeAbinitInput()
def tearDown(self):
"""Delete workdir"""
shutil.rmtree(self.workdir)
class FlowTest(FlowUnitTest):
def test_base(self):
"""Testing Flow..."""
aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
flow = Flow(workdir=self.workdir, manager=self.manager)
# Build a work with a task
work = flow.register_task(self.fake_input)
assert work.is_work
task0_w0 = work[0]
atrue(task0_w0.is_task)
print(task0_w0.status.colored)
atrue(len(flow) == 1)
aequal(flow.num_tasks, 1)
atrue(flow.has_db)
#print(task0_w0.input_structure)
print(task0_w0.make_input)
# Task history
assert len(task0_w0.history) == 0
task0_w0.history.info("Hello %s", "world")
assert len(task0_w0.history) == 1
print(task0_w0.history)
record = task0_w0.history.pop()
print(record, repr(record))
assert record.get_message(asctime=False) == "Hello world"
assert len(task0_w0.history) == 0
assert flow.select_tasks(nids=task0_w0.node_id)[0] == task0_w0
assert flow.select_tasks(wslice=slice(0,1,1)) == [task0_w0]
# Build a workflow containing two tasks depending on task0_w0
work = Work()
atrue(work.is_work)
work.register(self.fake_input)
work.register(self.fake_input)
aequal(len(work), 2)
flow.register_work(work, deps={task0_w0: "WFK"})
atrue(flow.is_flow)
aequal(len(flow), 2)
# Add another work without dependencies.
task0_w2 = flow.register_task(self.fake_input)[0]
atrue(len(flow) == 3)
afalse(flow.is_work)
# Allocate internal tables
flow.allocate()
# Check dependecies.
atrue(flow[1].depends_on(task0_w0))
atrue(flow[1][0].depends_on(task0_w0))
atrue(flow[1][0] in task0_w0.get_children())
atrue(task0_w0 in flow[1][0].get_parents())
afalse(flow[2][0].depends_on(task0_w0))
afalse(flow[2][0] in task0_w0.get_children())
afalse(task0_w0 in flow[2][0].get_parents())
aequal(flow[1].pos, 1)
aequal(flow[1][0].pos, (1, 0))
aequal(flow[2][0].pos, (2, 0))
afalse(flow.all_ok)
aequal(flow.num_tasks, 4)
aequal(flow.ncores_used, 0)
# API for iterations
aequal(len(list(flow.iflat_tasks(status="Initialized"))), sum(len(work) for work in flow))
aequal(list(flow.iflat_tasks(nids=task0_w0.node_id)), [task0_w0])
aequal([task0_w0], flow.tasks_from_nids(task0_w0.node_id))
aequal([(0, 0)], flow.wti_from_nids(task0_w0.node_id))
aequal([task0_w2], flow.tasks_from_nids([task0_w2.node_id]))
aequal([(2, 0)], flow.wti_from_nids([task0_w2.node_id]))
# Check for deadlocks
flow.check_dependencies()
# Save the flow in pickle format.
flow.build_and_pickle_dump()
# Find the pickle file in workdir and recreate the flow.
same_flow = Flow.pickle_load(self.workdir)
aequal(same_flow, flow)
# to/from string
# FIXME This does not work with py3k
#s = flow.pickle_dumps(protocol=0)
#same_flow = Flow.pickle_loads(s)
#aequal(same_flow, flow)
self.assertMSONable(flow)
flow.show_info()
flow.show_summary()
flow.show_inputs()
flow.show_inputs(varnames="znucl")
# Test show_status
flow.show_status()
flow.show_event_handlers()
def test_workdir(self):
"""Testing if one can use workdir=None in flow.__init__ and then flow.allocate(workdir)."""
flow = Flow(workdir=None, manager=self.manager)
flow.register_task(self.fake_input)
#flow.register_work(work)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
# If flow.workdir is None, we should used flow.allocate(workdir)
with self.assertRaises(RuntimeError): flow.allocate()
tmpdir = tempfile.mkdtemp()
flow.allocate(workdir=tmpdir)
print(flow)
assert len(flow) == 2
flow.build()
for i, work in enumerate(flow):
assert work.workdir == os.path.join(tmpdir, "w%d" % i)
for t, task in enumerate(work):
assert task.workdir == os.path.join(work.workdir, "t%d" % t)
class TestFlowInSpectatorMode(FlowUnitTest):
def test_spectator(self):
flow = Flow(workdir=self.workdir, manager=self.manager)
work0 = Work()
work0.register_scf_task(self.fake_input)
work0.register_scf_task(self.fake_input)
work1 = Work()
work1.register_scf_task(self.fake_input)
flow.register_work(work0)
flow.register_work(work1)
flow.disconnect_signals()
flow.disconnect_signals()
flow.connect_signals()
flow.connect_signals()
for mode in [False, True]:
flow.set_spectator_mode(mode=mode)
assert flow.in_spectator_mode == mode
for node in flow.iflat_nodes():
assert node.in_spectator_mode == mode
assert len(list(flow.iflat_nodes())) == 1 + len(flow.works) + sum(len(work) for work in flow)
assert flow.node_from_nid(flow.node_id) == flow
flow.set_spectator_mode(mode=False)
flow.build_and_pickle_dump()
# picke load always returns a flow in spectator mode.
flow = Flow.pickle_load(flow.workdir)
assert flow.in_spectator_mode
#with self.assertRaises(flow.SpectatorError): flow.pickle_dump()
#with self.assertRaises(flow.SpectatorError): flow.make_scheduler().start()
work = flow[0]
assert work.send_signal(work.S_OK) is None
#with self.assertRaises(work.SpectatorError): work.on_ok()
#with self.assertRaises(work.SpectatorError): work.on_all_ok()
task = work[0]
assert task.send_signal(task.S_OK) is None
#with self.assertRaises(task.SpectatorError): task._on_done()
#with self.assertRaises(task.SpectatorError): task.on_ok()
#with self.assertRaises(task.SpectatorError): task._on_ok()
class TestBatchLauncher(FlowUnitTest):
def test_batchlauncher(self):
"""Testing BatchLauncher methods."""
# Create the TaskManager.
manager = TaskManager.from_string(self.MANAGER)
print("batch_adapter", manager.batch_adapter)
assert manager.batch_adapter is not None
def build_flow_with_name(name):
"""Build a flow with workdir None and the given name."""
flow = Flow(workdir=None, manager=self.manager)
flow.set_name(name)
flow.register_task(self.fake_input)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
return flow
from pymatgen.io.abinit.launcher import BatchLauncher
tmpdir = tempfile.mkdtemp()
batch = BatchLauncher(workdir=tmpdir, manager=manager)
print(batch)
flow0 = build_flow_with_name("flow0")
flow1 = build_flow_with_name("flow1")
flow2_same_name = build_flow_with_name("flow1")
batch.add_flow(flow0)
# Cannot add the same flow twice.
with self.assertRaises(batch.Error):
batch.add_flow(flow0)
batch.add_flow(flow1)
# Cannot add two flows with the same name.
with self.assertRaises(batch.Error):
batch.add_flow(flow2_same_name)
batch.submit(dry_run=True)
for i, flow in enumerate([flow0, flow1]):
assert flow.workdir == os.path.join(batch.workdir, "flow%d" % i)
batch.pickle_dump()
batch_from_pickle = BatchLauncher.pickle_load(batch.workdir)
assert all(f1 == f2 for f1, f2 in zip(batch.flows, batch_from_pickle.flows))
if __name__ == '__main__':
import unittest
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves content for "script" handlers using the Java runtime."""
import os
import os.path
import sys
import tempfile
import threading
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import java_application
from google.appengine.tools.devappserver2 import util
# TODO: figure out what's needed to react to file changes
class JavaRuntimeInstanceFactory(instance.InstanceFactory):
"""A factory that creates new Java runtime Instances."""
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='_java_app',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='_java_app',
login='admin')
FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.ALWAYS
def __init__(self, request_data, runtime_config_getter, module_configuration):
"""Initializer for JavaRuntimeInstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
runtime_config_getter: A function that can be called without arguments
and returns the runtime_config_pb2.RuntimeConfig containing the
configuration for the runtime.
module_configuration: An application_configuration.ModuleConfiguration
instance representing the configuration of the module that owns the
runtime.
"""
super(JavaRuntimeInstanceFactory, self).__init__(request_data, 1)
self._runtime_config_getter = runtime_config_getter
self._module_configuration = module_configuration
self._application_lock = threading.Lock()
self._java_application = java_application.JavaApplication(
self._module_configuration)
self._for_jetty9 = (module_configuration.runtime == 'vm' or
util.is_env_flex(module_configuration.env))
self._java_command = self._make_java_command()
def _make_java_command(self):
# We should be in .../google/appengine/tools/devappserver2/java_runtime.py
# and we want to find .../google/appengine/tools and thence
# .../google/appengine/tools/java/lib
java_home = os.environ.get('JAVA_HOME')
if java_home and os.path.exists(java_home):
java_bin = os.path.join(java_home, 'bin/java')
else:
java_bin = 'java'
java_dir = os.environ.get('APP_ENGINE_JAVA_PATH', None)
tools_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not java_dir or not os.path.exists(java_dir):
java_dir = os.path.join(tools_dir, 'java')
java_lib_dir = os.path.join(java_dir, 'lib')
assert os.path.isdir(java_lib_dir), java_lib_dir
class_path = os.path.join(java_lib_dir, 'appengine-tools-api.jar')
assert os.path.isfile(class_path), class_path
jdk_overrides_jar = os.path.join(java_lib_dir, 'override',
'appengine-dev-jdk-overrides.jar')
assert os.path.isfile(jdk_overrides_jar), jdk_overrides_jar
if self._for_jetty9:
jetty_home = os.environ.get('APP_ENGINE_JETTY_HOME', None)
jetty_base = os.environ.get('APP_ENGINE_JETTY_BASE', None)
if not jetty_home:
jetty_home = os.path.join(java_lib_dir, 'java-managed-vm',
'appengine-java-vmruntime')
if not jetty_base:
jetty_base = os.path.join(java_lib_dir, 'jetty-base-sdk')
args = [
java_bin,
('-Dgcloud.java.application=%s' %
self._module_configuration.application_root),
'-Djetty.home=%s' % jetty_home,
'-Djetty.base=%s' % jetty_base,
]
args.extend(self._runtime_config_getter().java_config.jvm_args)
args.append('-jar')
args.append('%s/start.jar' % jetty_home)
else:
args = [
java_bin,
'-cp', class_path,
'-Dappengine.sdk.root=' + java_dir,
'-Dappengine.runtime=' + self._module_configuration.runtime,
'-Xbootclasspath/p:' + jdk_overrides_jar,
]
if self._module_configuration.runtime.startswith('java8'):
args.append('-Duse_jetty9_runtime=true')
if sys.platform == 'darwin':
args.append('-XstartOnFirstThread')
args.extend(self._runtime_config_getter().java_config.jvm_args)
args.append(
'com.google.appengine.tools.development.devappserver2.'
'StandaloneInstance')
return args
def get_restart_directories(self):
"""Returns a list of directories where changes trigger a restart.
Returns:
A list of directories where changes trigger a restart.
"""
# TODO: implement
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
# TODO: implement
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
# TODO: implement
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
def instance_config_getter():
runtime_config = self._runtime_config_getter()
runtime_config.instance_id = str(instance_id)
return runtime_config
def extra_args_getter(port):
return 'jetty.port=%s' % port
env = self._java_application.get_environment()
runtime_config = instance_config_getter()
for env_entry in runtime_config.environ:
env[env_entry.key] = env_entry.value
if self._for_jetty9:
start_process_flavor = http_runtime.START_PROCESS_REVERSE_NO_FILE
env['APP_ENGINE_LOG_CONFIG_PATTERN'] = (
os.path.join(tempfile.mkdtemp(suffix='gae'), 'log.%g'))
else:
start_process_flavor = http_runtime.START_PROCESS_FILE
with self._application_lock:
proxy = http_runtime.HttpRuntimeProxy(
self._java_command,
instance_config_getter,
self._module_configuration,
env=env,
start_process_flavor=start_process_flavor,
extra_args_getter=extra_args_getter)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 15:34:12 2015
@author: Numan Laanait -- [email protected]
"""
from __future__ import division, print_function, absolute_import
from skimage.feature import match_descriptors, register_translation
from skimage.measure import ransac
from skimage.transform import warp, SimilarityTransform
import warnings
import h5py
import numpy as np
import skimage.feature
import multiprocessing as mp
class ImageTransformation(object):
#TODO: io operations and merging the 2 classes -Oleg
# Oleg: reading, shaping data from h5.
# Figure out storage of features and descriptors as well as reading.
# Merge all methods from FeatureExtraction and GeometricTransform.
# Don't merge ancillary functions and transforms.
pass
# TODO: Docstrings following numpy standard.
#### Functions
def pickle_keypoints(keypoints):
"""
Function to pickle cv2.sift keypoint objects
"""
kpArray = np.array([])
for point in keypoints:
kpArray = np.append(kpArray, [point.pt[1], point.pt[0]])
kpArray = np.reshape(kpArray, (int(kpArray.size / 2), 2))
return kpArray
# Class to do feature extraction. This is a wrapper on scikit-image and openCV feature extraction detectors.
# TODO: Add support for opencV or implement sift.
# TODO: Add io operations for extracted features.
# TODO: Memory checking, since some of the features are quite large.
class FeatureExtractorParallel(object):
"""
This is an Object used to contain a data set and has methods to perform
feature extraction on the data set that are detector based.
Begin by loading a detector for features and a computer vision library.
Parameters
----------
detector_name : (string)
name of detector.
lib : (string)
computer vision library to use (opencv or skimage)
The following can be used for:
lib = opencv: SIFT, ORB, SURF
lib = skimage: ORB, BRIEF, CENSURE
"""
def __init__(self, detector_name, lib):
self.data = []
self.lib = lib
try:
if self.lib == 'opencv':
pass
# detector = cv2.__getattribute__(detector_name)
elif self.lib == 'skimage':
self.detector = skimage.feature.__getattribute__(detector_name)
except AttributeError:
print('Error: The Library does not contain the specified detector')
def clearData(self):
del self.data
self.data = []
def loadData(self, dataset):
"""
This is a Method that loads h5 Dataset to be corrected.
Parameters
----------
dataset : h5py.Dataset
Dataset to be corrected
"""
if not isinstance(dataset, h5py.Dataset):
warnings.warn('Error: Data must be an h5 Dataset object')
else:
self.data = dataset
dim = int(np.sqrt(self.data.shape[-1]))
self.data = self.data.reshape(-1, dim, dim)
def getData(self):
"""
This is a Method that returns the loaded h5 Dataset.
"""
return self.data
def getFeatures(self, **kwargs):
"""
This is a Method that returns features (keypoints and descriptors)
that are obtained by using the FeatureExtractor.Detector object.
Parameters
----------
processors : int, optional
Number of processors to use, default = 1.
mask : boolean, optional, default False.
Whether to use
Returns
-------
keypts :
keypoints
descs :
descriptors
"""
detector = self.detector
dset = self.data
lib = self.lib
processes = kwargs.get('processors', 1)
mask = kwargs.get('mask', False)
origin = kwargs.get('origin', [0, 0])
winSize = kwargs.get('window_size', 0)
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
mask_ind = np.mask_indices(dset.shape[-1], mask_func, winSize)
self.data = np.array([imp[mask_ind].reshape(winSize, winSize) for imp in dset])
# detect and compute keypoints
def detect(image):
if lib == 'opencv':
image = (image - image.mean()) / image.std()
image = image.astype('uint8')
k_obj, d_obj = detector.detectAndCompute(image, None)
keypts, descs = pickle_keypoints(k_obj), pickle_keypoints(d_obj)
elif lib == 'skimage':
imp = (image - image.mean()) / np.std(image)
imp[imp < 0] = 0
imp.astype('float32')
detector.detect_and_extract(imp)
keypts, descs = detector.keypoints, detector.descriptors
return keypts, descs
# start pool of workers
print('launching %i kernels...' % (processes))
pool = mp.Pool(processes)
tasks = [(imp) for imp in self.data]
chunk = int(self.data.shape[0] / processes)
jobs = pool.imap(detect, tasks, chunksize=chunk)
# get keypoints and descriptors
results = []
print('Extracting features...')
try:
for j in jobs:
results.append(j)
except ValueError:
warnings.warn('ValueError something about 2d-image. Probably some of the detector input params are wrong.')
keypts = [itm[0].astype('int') for itm in results]
desc = [itm[1] for itm in results]
# close the pool
print('Closing down the kernels... \n')
pool.close()
return keypts, desc
class FeatureExtractorSerial(object):
"""
This is an Object used to contain a data set and has methods to perform
feature extraction on the data set that are detector based.
Begin by loading a detector for features and a computer vision library.
Parameters
----------
detector_name : (string)
name of detector.
lib : (string)
computer vision library to use (opencv or skimage)
The following can be used for:
lib = opencv: SIFT, ORB, SURF
lib = skimage: ORB, BRIEF, CENSURE
"""
def __init__(self, detector_name, lib):
self.data = []
self.lib = lib
try:
if self.lib == 'opencv':
pass
# detector = cv2.__getattribute__(detector_name)
elif self.lib == 'skimage':
self.detector = skimage.feature.__getattribute__(detector_name)
except AttributeError:
print('Error: The Library does not contain the specified detector')
def clearData(self):
del self.data
self.data = []
def loadData(self, dataset):
"""
This is a Method that loads h5 Dataset to be corrected.
Parameters
----------
dataset : h5py.Dataset
"""
if not isinstance(dataset, h5py.Dataset):
warnings.warn('Error: Data must be an h5 Dataset object')
else:
self.data = dataset
dim = int(np.sqrt(self.data.shape[-1]))
self.data = self.data.reshape(-1, dim, dim)
def getData(self):
"""
This is a Method that returns the loaded h5 Dataset.
"""
return self.data
def getFeatures(self, **kwargs):
"""
This is a Method that returns features (keypoints and descriptors)
that are obtained by using the FeatureExtractor.Detector object.
Parameters
----------
mask : boolean, optional
Whether to use, default False.
Returns
-------
keypts :
descriptors
descs :
keypoints
"""
detector = self.detector
dset = self.data
lib = self.lib
mask = kwargs.get('mask', False)
origin = kwargs.get('origin', [0, 0])
winSize = kwargs.get('window_size', 0)
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
mask_ind = np.mask_indices(dset.shape[-1], mask_func, winSize)
self.data = np.array([imp[mask_ind].reshape(winSize, winSize) for imp in dset])
# detect and compute keypoints
def detect(image):
if lib == 'opencv':
image = (image - image.mean()) / image.std()
image = image.astype('uint8')
k_obj, d_obj = detector.detectAndCompute(image, None)
keypts, descs = pickle_keypoints(k_obj), pickle_keypoints(d_obj)
elif lib == 'skimage':
imp = (image - image.mean()) / np.std(image)
imp[imp < 0] = 0
imp.astype('float32')
detector.detect_and_extract(imp)
keypts, descs = detector.keypoints, detector.descriptors
return keypts, descs
# start pool of workers
results = [detect(imp) for imp in self.data]
# get keypoints and descriptors
keypts = [itm[0].astype('int') for itm in results]
desc = [itm[1] for itm in results]
return keypts, desc
#TODO: Docstrings following numpy standard.
# Functions
def euclidMatch(Matches, keypts1, keypts2, misalign):
"""
Function that thresholds the matches, found from a comparison of
their descriptors, by the maximum expected misalignment.
"""
filteredMatches = np.array([])
deltaX =(keypts1[Matches[:,0],:][:,0]-keypts2[Matches[:,1],:][:,0])**2
deltaY =(keypts1[Matches[:,0],:][:,1]-keypts2[Matches[:,1],:][:,1])**2
dist = np.apply_along_axis(np.sqrt, 0, deltaX + deltaY)
filteredMatches = np.where(dist[:] < misalign, True, False)
return filteredMatches
# function is taken as is from scikit-image.
def _center_and_normalize_points(points):
"""
Center and normalize image points.
The points are transformed in a two-step procedure that is expressed
as a transformation matrix. The matrix of the resulting points is usually
better conditioned than the matrix of the original points.
Center the image points, such that the new coordinate system has its
origin at the centroid of the image points.
Normalize the image points, such that the mean distance from the points
to the origin of the coordinate system is sqrt(2).
Parameters
----------
points : (N, 2) array
The coordinates of the image points.
Returns
-------
matrix : (3, 3) array
The transformation matrix to obtain the new points.
new_points : (N, 2) array
The transformed image points.
"""
centroid = np.mean(points, axis=0)
rms = np.sqrt(np.sum((points - centroid) ** 2) / points.shape[0])
norm_factor = np.sqrt(2) / rms
matrix = np.array([[norm_factor, 0, -norm_factor * centroid[0]],
[0, norm_factor, -norm_factor * centroid[1]],
[0, 0, 1]])
pointsh = np.row_stack([points.T, np.ones((points.shape[0]),)])
new_pointsh = np.dot(matrix, pointsh).T
new_points = new_pointsh[:, :2]
new_points[:, 0] /= new_pointsh[:, 2]
new_points[:, 1] /= new_pointsh[:, 2]
return matrix, new_points
class TranslationTransform(object):
"""
2D translation using homogeneous representation:
The transformation matrix is:
[[1 1 tX]
[1 1 tY]
[0 0 1]]
X: translation of x-axis.
Y: translation of y-axis.
Parameters
----------
translation : tuple
(tX, tY)
Attributes
----------
params : (3, 3) array
Homogeneous transformation matrix.
"""
def __init__(self, matrix = None, translation = None):
params = translation
if params and matrix is not None:
raise ValueError("You cannot specify the transformation matrix and"
" the implicit parameters at the same time.")
elif matrix is not None:
if matrix.shape != (3, 3):
raise ValueError("Invalid shape of transformation matrix.")
self.params = matrix
elif params:
if translation is None:
translation = (0., 0.)
self.params = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
], dtype = 'float32')
self.params[0:2, 2] = translation
else:
# default to an identity transform
self.params = np.eye(3)
def estimate(self, src, dst):
#evaluate transformation matrix from src, dst
# coordinates
try:
xs = src[:, 0][0]
ys = src[:, 1][1]
xd = dst[:, 0][0]
yd = dst[:, 1][1]
S = np.array([[1., 0., xd-xs],
[0., 1., yd-ys],
[0., 0., 1.]
],dtype = 'float32')
self.params = S
return True
except IndexError:
return False
@property
def _inv_matrix(self):
inv_matrix = self.params
inv_matrix[0:2,2] = - inv_matrix[0:2,2]
return inv_matrix
def _apply_mat(self, coords, matrix):
coords = np.array(coords, copy=False, ndmin=2)
x, y = np.transpose(coords)
src = np.vstack((x, y, np.ones_like(x)))
dst = np.dot(src.transpose(), matrix.transpose())
# rescale to homogeneous coordinates
dst[:, 0] /= dst[:, 2]
dst[:, 1] /= dst[:, 2]
return dst[:, :2]
def __call__(self, coords):
return self._apply_mat(coords, self.params)
def inverse(self, coords):
""" Apply inverse transformation.
Parameters
----------
coords : (N, 2) array
Source coordinates.
Returns
-------
coords : (N, 2) array
Transformed coordinates.
"""
return self._apply_mat(coords, self._inv_matrix)
def residuals(self, src, dst):
"""
Determine residuals of transformed destination coordinates.
For each transformed source coordinate the euclidean distance to the
respective destination coordinate is determined.
Parameters
----------
src : (N, 2) array
Source coordinates.
dst : (N, 2) array
Destination coordinates.
Returns
-------
residuals : (N, ) array
Residual for coordinate.
"""
return np.sqrt(np.sum((self(src) - dst)**2, axis=1))
@property
def translation(self):
return self.params[0:2, 2]
class RigidTransform(object):
"""
2D translation using homogeneous representation:
The transformation matrix is:
[[cos(theta) -sin(theta) tX]
[sin(theta) cos(theta) tY]
[0 0 1]]
X: translation along x-axis.
Y: translation along y-axis.
theta: rotation angle in radians.
Parameters
----------
translation : tuple
(tX, tY)
rotation : float
in radians.
Attributes
----------
params : (3, 3) array
Homogeneous transformation matrix.
"""
def __init__(self, matrix = None, rotation = None, translation = None):
params = any(param is not None
for param in (rotation, translation))
if params and matrix is not None:
raise ValueError("You cannot specify the transformation matrix and"
" the implicit parameters at the same time.")
elif matrix is not None:
if matrix.shape != (3, 3):
raise ValueError("Invalid shape of transformation matrix.")
self.params = matrix
elif params:
if translation is None:
translation = (0, 0)
if rotation is None:
rotation = 0
self.params = np.array([
[np.cos(rotation), - np.sin(rotation), 0],
[np.sin(rotation), np.cos(rotation), 0],
[ 0, 0, 1]
])
self.params[0:2, 2] = translation
else:
# default to an identity transform
self.params = np.eye(3)
def estimate(self, src, dst):
"""
Set the transformation matrix with the explicit parameters.
You can determine the over-, well- and under-determined parameters
with the total least-squares method.
Number of source and destination coordinates must match.
The transformation is defined as::
X = a0 * x - b0 * y + a1
Y = b0 * x + a0 * y + b1
These equations can be transformed to the following form::
0 = a0 * x - b0 * y + a1 - X
0 = b0 * x + a0 * y + b1 - Y
which exist for each set of corresponding points, so we have a set of
N * 2 equations. The coefficients appear linearly so we can write
A x = 0, where::
A = [[x 1 -y 0 -X]
[y 0 x 1 -Y]
...
...
]
x.T = [a0 a1 b0 b1 c3]
In case of total least-squares the solution of this homogeneous system
of equations is the right singular vector of A which corresponds to the
smallest singular value normed by the coefficient c3.
Parameters
----------
src : (N, 2) array
Source coordinates.
dst : (N, 2) array
Destination coordinates.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
try:
src_matrix, src = _center_and_normalize_points(src)
dst_matrix, dst = _center_and_normalize_points(dst)
except ZeroDivisionError:
self.params = np.nan * np.empty((3, 3))
return False
xs = src[:, 0]
ys = src[:, 1]
xd = dst[:, 0]
yd = dst[:, 1]
rows = src.shape[0]
# params: a0, a1, b0, b1
A = np.zeros((rows * 2, 5))
A[:rows, 0] = xs
A[:rows, 2] = - ys
A[:rows, 1] = 1
A[rows:, 2] = xs
A[rows:, 0] = ys
A[rows:, 3] = 1
A[:rows, 4] = xd
A[rows:, 4] = yd
_, _, V = np.linalg.svd(A)
# solution is right singular vector that corresponds to smallest
# singular value
a0, a1, b0, b1 = - V[-1, :-1] / V[-1, -1]
S = np.array([[a0, -b0, a1],
[b0, a0, b1],
[ 0, 0, 1]])
# De-center and de-normalize
S = np.dot(np.linalg.inv(dst_matrix), np.dot(S, src_matrix))
self.params = S
return True
def _apply_mat(self, coords, matrix):
coords = np.array(coords, copy=False, ndmin=2)
x, y = np.transpose(coords)
src = np.vstack((x, y, np.ones_like(x)))
dst = np.dot(src.transpose(), matrix.transpose())
# rescale to homogeneous coordinates
dst[:, 0] /= dst[:, 2]
dst[:, 1] /= dst[:, 2]
return dst[:, :2]
def __call__(self, coords):
return self._apply_mat(coords, self.params)
def inverse(self, coords):
"""
Apply inverse transformation.
Parameters
----------
coords : (N, 2) array
Source coordinates.
Returns
-------
coords : (N, 2) array
Transformed coordinates.
"""
return self._apply_mat(coords, self._inv_matrix)
def residuals(self, src, dst):
"""
Determine residuals of transformed destination coordinates.
For each transformed source coordinate the euclidean distance to the
respective destination coordinate is determined.
Parameters
----------
src : (N, 2) array
Source coordinates.
dst : (N, 2) array
Destination coordinates.
Returns
-------
residuals : (N, ) array
Residual for coordinate.
"""
return np.sqrt(np.sum((self(src) - dst)**2, axis=1))
@property
def _inv_matrix(self):
return np.linalg.inv(self.params)
@property
def rotation(self):
return np.atan2(self.params[1, 0], self.params[1, 1])
@property
def translation(self):
return self.params[0:2, 2]
# Class to do geometric transformations. This is a wrapper on scikit-image functionality.
# TODO: io operations for features and optical geometric transformations.
class geoTransformerParallel(object):
"""
This object contains methods to perform geometric transformations on
a sequence of images. Some of the capabilities are:
+ Homography by feature extraction.
+ Intensity-based image registration.
+ Projection Correction.
"""
def __init__(self):
self.__init__
self.data = []
self.features = []
def clearData(self):
"""
This is a Method to clear the data from the object.
"""
del self.data
self.data = []
def loadData(self, dataset):
"""
This is a Method that loads h5 Dataset to be corrected.
Parameters
----------
input: h5py.dataset
"""
if not isinstance(dataset, h5py.Dataset):
warnings.warn( 'Error: Data must be an h5 Dataset object' )
else:
self.data = dataset
dim = int(np.sqrt(self.data.shape[-1]))
self.data = self.data.reshape(-1,dim,dim)
def loadFeatures(self, features):
"""
This is a Method that loads features to be used for homography etc ...
Parameters
----------
features : tuple
[keypoints, descriptors]
These can come from FeatureExtractor.getFeatures() or elsewhere.
The format is :
keypoints = [np.ndarray([y_position, x_position])]
descriptors = [np.ndarray()]
"""
self.features = features
def matchFeatures(self, **kwargs):
"""
This is a Method that computes similarity between keypoints based on their
descriptors. Currently only skimage.feature.match_descriptors is implemented.
In the future will need to add opencv2.matchers.
Parameters
----------
processors: int, optional
Number of processors to use, default = 1.
maximum_distance: int, optional
maximum_distance (int) of misalignment, default = infinity.
Used to filter the matches before optimizing the transformation.
Returns
-------
Matches
"""
desc = self.features[-1]
keypts = self.features[0]
processes = kwargs.get('processors', 1)
maxDis = kwargs.get('maximum_distance', np.infty)
def match(desc):
desc1, desc2 = desc[0], desc[1]
matches = match_descriptors(desc1, desc2, cross_check=True)
return matches
# start pool of workers
pool = mp.Pool(processes)
print('launching %i kernels...'%(processes))
tasks = [ (desc1, desc2) for desc1, desc2 in zip(desc[:],desc[1:]) ]
chunk = int(len(desc)/processes)
jobs = pool.imap(match, tasks, chunksize = chunk)
# get matches
print('Extracting Matches From the Descriptors...')
matches =[]
for j in jobs:
matches.append(j)
# close the pool
print('Closing down the kernels...\n')
pool.close()
# impose maximum_distance misalignment constraints on matches
filt_matches = []
for match, key1, key2 in zip(matches, keypts[:],keypts[1:]):
filteredMask = euclidMatch(match, key1, key2, maxDis)
filt_matches.append(match[filteredMask])
return matches, filt_matches
def findTransformation(self, transform, matches, processes, **kwargs):
"""
This is a Method that finds the optimal transformation between two images
given matching features using a random sample consensus.
Parameters
----------
transform: skimage.transform object
matches : list
matches found through match_features method.
processes : int
Number of processors to use.
**kwargs are passed to skimage.transform.ransac
Returns
-------
Transformations
"""
keypts = self.features[0]
def optimization(Pts):
robustTrans, inliers = ransac((Pts[0], Pts[1]), transform, **kwargs)
output = [robustTrans, inliers]
return output
# start pool of workers
print('launching %i kernels...'%(processes))
pool = mp.Pool(processes)
tasks = [ (key1[match[:, 0]], key2[match[:, 1]])
for match, key1, key2 in zip(matches,keypts[:],keypts[1:]) ]
chunk = int(len(keypts)/processes)
jobs = pool.imap(optimization, tasks, chunksize = chunk)
# get Transforms and inlier matches
transforms, trueMatches =[], []
print('Extracting Inlier Matches with RANSAC...')
try:
for j in jobs:
transforms.append(j[0])
trueMatches.append(j[1])
except np.linalg.LinAlgError:
pass
# close the pool
pool.close()
print('Closing down the kernels...\n')
return transforms, trueMatches
#TODO: Need parallel version for transforming stack of images.
def applyTransformation(self, transforms, **kwargs):
"""
This is the method that takes the list of transformation found by findTransformation
and applies them to the data set.
Parameters
----------
transforms: (list of skimage.GeoemetricTransform objects).
The objects must be inititated with the desired parameters.
transformation : string, optional.
The type of geometric transformation to use (i.e. translation, rigid, etc..)
Currently, only translation is implemented.
default, translation.
origin : int, optional
The position in the data to take as origin, i.e. don't transform.
default, center image in the stack.
processors : int, optional
Number of processors to use, default = 1.
Currently,only one processor is used.
Returns
-------
Transformed images, transformations
"""
dic = ['processors','origin','transformation']
for key in kwargs.keys():
if key not in dic:
print('%s is not a parameter of this function' %(str(key)))
processes = kwargs.get('processors', 1)
origin = kwargs.get('origin', int(self.data.shape[0]/2))
transformation = kwargs.get('transformation','translation')
dset = self.data
# For now restricting this to just translation... Straightforward to generalize to other transform objects.
if transformation == 'translation':
YTrans = np.array([trans.translation[0] for trans in transforms])
XTrans = np.array([trans.translation[1] for trans in transforms])
chainL = []
for y, x in zip(range(0,YTrans.size+1), range(0,XTrans.size+1)):
if y < origin:
ychain = -np.sum(YTrans[y:origin])
xchain = -np.sum(XTrans[x:origin])
elif y > origin:
ychain = np.sum(YTrans[origin:y])
xchain = np.sum(XTrans[origin:x])
else:
ychain = 0
xchain = 0
chainL.append([xchain, ychain])
chainTransforms = []
for params in chainL:
T = TranslationTransform(translation = params)
chainTransforms.append(T)
# Just need a single function that does boths
if transformation == 'rotation':
rotTrans = np.array([trans.rotation for trans in transforms])
YTrans = np.array([trans.translation[0] for trans in transforms])
XTrans = np.array([trans.translation[1] for trans in transforms])
chainL = []
for x in range(0,rotTrans.size+1):
if x < origin:
rotchain = -np.sum(rotTrans[x:origin])
ychain = -np.sum(YTrans[x:origin])
xchain = -np.sum(XTrans[x:origin])
elif x > origin:
rotchain = np.sum(rotTrans[origin:x])
ychain = np.sum(YTrans[origin:x])
xchain = np.sum(XTrans[origin:x])
else:
rotchain = 0
ychain = 0
xchain = 0
chainL.append([rotchain, xchain, ychain])
chainTransforms = []
for params in chainL:
T = SimilarityTransform(scale = 1.0, rotation = np.deg2rad(params[0]), translation = (params[1],params[2]))
# T = SimilarityTransform(rotation = params, translation = (0,0))
chainTransforms.append(T)
# Use the chain transformations to transform the dataset
output_shape = dset[0].shape
# output_shape = (2048, 2048)
def warping(datum):
imp, transform = datum[0], datum[1]
transimp = warp(imp, inverse_map= transform, output_shape = output_shape,
cval = 0, preserve_range = True)
return transimp
# #start pool of workers
# #somehow wrap function crashes when run in parallel! run sequentially for now.
# pool = mp.Pool(processes)
# print('launching %i kernels...'%(processes))
# tasks = [ (imp, transform) for imp, transform in zip(dset, chainTransforms) ]
# chunk = int(dset.shape[0]/processes)
# jobs = pool.imap(warping, tasks, chunksize = 1)
# #close the pool
# pool.close()
# print('Closing down the kernels... \n')
#
# get transformed images and pack into 3d np.ndarray
print('Transforming Images...')
transImages = np.copy(dset[:])
for imp, transform, itm in zip( transImages, chainTransforms, range(0,transImages.shape[0])):
transimp = warping([imp, transform])
transImages[itm] = transimp
print('Image #%i'%(itm))
return transImages, chainTransforms
def correlationTransformation(self, **kwargs):
"""
Uses Cross-correlation to find a translation between 2 images.
Parameters
----------
Processors: int, optional
Number of processors to use, default = 1.
Returns
-------
Transformations.
"""
processes = kwargs.get('processors', 1)
pool = mp.Pool(processes)
print('launching %i kernels...'%(processes))
def register(images):
imp1, imp2 = images[0], images[1]
shifts, _, _ = register_translation(imp1,imp2)
return shifts
dim = int(np.sqrt(self.data.shape[-1]))
tasks = [ (imp1, imp2)
for imp1, imp2 in zip(self.data[:], self.data[1:]) ]
chunk = int((self.data.shape[0] - 1)/processes)
jobs = pool.imap(register, tasks, chunksize = chunk)
# get Transforms and inlier matches
results = []
print('Extracting Translations')
try:
for j in jobs:
results.append(j)
except:
warnings.warn('Skipped Some Entry... dunno why!!')
# close the pool
pool.close()
return results
class geoTransformerSerial(object):
"""
This object contains methods to perform geometric transformations on
a sequence of images. Some of the capabilities are:
+ Homography by feature extraction.
+ Intensity-based image registration.
+ Projection Correction.
"""
def __init__(self):
self.__init__
self.data = []
self.features = []
def clearData(self):
"""
This is a Method to clear the data from the object.
"""
del self.data
self.data = []
def loadData(self, dataset):
"""
This is a Method that loads h5 Dataset to be corrected.
Parameters
----------
dataset : h5py.dataset
"""
if not isinstance(dataset, h5py.Dataset):
warnings.warn( 'Error: Data must be an h5 Dataset object' )
else:
self.data = dataset
dim = int(np.sqrt(self.data.shape[-1]))
self.data = self.data.reshape(-1,dim,dim)
def loadFeatures(self, features):
"""
This is a Method that loads features to be used for homography etc ...
Parameters
----------
features : tuple
[keypoints, descriptors]
These can come from FeatureExtractor.getFeatures() or elsewhere.
The format is :
keypoints = [np.ndarray([y_position, x_position])]
descriptors = [np.ndarray()]
"""
self.features = features
def matchFeatures(self, **kwargs):
"""
This is a Method that computes similarity between keypoints based on their
descriptors. Currently only skimage.feature.match_descriptors is implemented.
In the future will need to add opencv2.matchers.
Parameters
----------
maximum_distance: int, optional
maximum_distance (int) of misalignment, default = infinity.
Used to filter the matches before optimizing the transformation.
Returns
-------
Matches.
"""
desc = self.features[-1]
keypts = self.features[0]
maxDis = kwargs.get('maximum_distance', np.infty)
processes = kwargs.get('processes', 2)
def match(desc):
desc1, desc2 = desc[0], desc[1]
matches = match_descriptors(desc1, desc2, cross_check=True)
return matches
# start pool of workers
pool = mp.Pool(processes)
print('launching %i kernels...'%(processes))
tasks = [ (desc1, desc2) for desc1, desc2 in zip(desc[:],desc[1:]) ]
chunk = int(len(desc)/processes)
jobs = pool.imap(match, tasks, chunksize = chunk)
# get matches
print('Extracting Matches From the Descriptors...')
matches =[]
for j in jobs:
matches.append(j)
# close the pool
print('Closing down the kernels...\n')
pool.close()
# impose maximum_distance misalignment constraints on matches
filt_matches = []
for match, key1, key2 in zip(matches, keypts[:],keypts[1:]):
filteredMask = euclidMatch(match, key1, key2, maxDis)
filt_matches.append(match[filteredMask])
return matches, filt_matches
#TODO: Need Better Error Handling.
def findTransformation(self, transform, matches, processes, **kwargs):
"""
This is a Method that finds the optimal transformation between two images
given matching features using a random sample consensus.
Parameters
----------
transform : skimage.transform object
matches : list
matches found through match_features method.
processors : int
Number of processors to use.
**kwargs are passed to skimage.transform.ransac
Returns
-------
Transformations.
"""
keypts = self.features[0]
def optimization(Pts):
robustTrans, inliers = ransac((Pts[0], Pts[1]), transform, **kwargs)
output = [robustTrans, inliers]
return output
results = [optimization(key1[match[:, 0]], key2[match[:, 1]])
for match, key1, key2 in zip(matches,keypts[:],keypts[1:])]
# get Transforms and inlier matches
transforms, trueMatches =[], []
print('Extracting Inlier Matches with RANSAC...')
try:
for res in results:
transforms.append(res[0])
trueMatches.append(res[1])
except np.linalg.LinAlgError:
print('Error: Inverse of the transformation failed!!!')
return transforms, trueMatches
def applyTransformation(self, transforms, **kwargs):
"""
This is the method that takes the list of transformation found by findTransformation
and applies them to the data set.
Parameters
transforms: (list of skimage.GeoemetricTransform objects).
The objects must be inititated with the desired parameters.
transformation: string, optional.
The type of geometric transformation to use (i.e. translation, rigid, etc..)
Currently, only translation is implemented.
default, translation.
origin: int, optional
The position in the data to take as origin, i.e. don't transform.
default, center image in the stack.
Returns
-------
Transformed images, transformations
"""
dic = ['processors','origin','transformation']
for key in kwargs.keys():
if key not in dic:
print('%s is not a parameter of this function' %(str(key)))
processes = kwargs.get('processors', 1)
origin = kwargs.get('origin', int(self.data.shape[0]/2))
transformation = kwargs.get('transformation','translation')
dset = self.data
# For now restricting this to just translation... Straightforward to generalize to other transform objects.
if transformation == 'translation':
YTrans = np.array([trans.translation[0] for trans in transforms])
XTrans = np.array([trans.translation[1] for trans in transforms])
chainL = []
for y, x in zip(range(0,YTrans.size+1), range(0,XTrans.size+1)):
if y < origin:
ychain = -np.sum(YTrans[y:origin])
xchain = -np.sum(XTrans[x:origin])
elif y > origin:
ychain = np.sum(YTrans[origin:y])
xchain = np.sum(XTrans[origin:x])
else:
ychain = 0
xchain = 0
chainL.append([xchain, ychain])
chainTransforms = []
for params in chainL:
T = TranslationTransform(translation = params)
chainTransforms.append(T)
# Just need a single function that does boths
if transformation == 'rotation':
rotTrans = np.array([trans.rotation for trans in transforms])
YTrans = np.array([trans.translation[0] for trans in transforms])
XTrans = np.array([trans.translation[1] for trans in transforms])
chainL = []
for x in range(0,rotTrans.size+1):
if x < origin:
rotchain = -np.sum(rotTrans[x:origin])
ychain = -np.sum(YTrans[x:origin])
xchain = -np.sum(XTrans[x:origin])
elif x > origin:
rotchain = np.sum(rotTrans[origin:x])
ychain = np.sum(YTrans[origin:x])
xchain = np.sum(XTrans[origin:x])
else:
rotchain = 0
ychain = 0
xchain = 0
chainL.append([rotchain, xchain, ychain])
chainTransforms = []
for params in chainL:
T = SimilarityTransform(scale = 1.0, rotation = np.deg2rad(params[0]), translation = (params[1],params[2]))
# T = SimilarityTransform(rotation = params, translation = (0,0))
chainTransforms.append(T)
# Use the chain transformations to transform the dataset
output_shape = dset[0].shape
def warping(datum):
imp, transform = datum[0], datum[1]
transimp = warp(imp, inverse_map= transform, output_shape = output_shape,
cval = 0, preserve_range = True)
return transimp
# get transformed images and pack into 3d np.ndarray
print('Transforming Images...')
transImages = np.copy(dset[:])
for imp, transform, itm in zip( transImages, chainTransforms, range(0,transImages.shape[0])):
transimp = warping([imp, transform])
transImages[itm] = transimp
print('Image #%i'%(itm))
return transImages, chainTransforms
def correlationTransformation(self, **kwargs):
"""
Uses Cross-correlation to find a translation between 2 images.
Parameters
----------
Processors: int, optional
Number of processors to use, default = 1.
Returns
-------
Transformations.
"""
processes = kwargs.get('processors', 1)
pool = mp.Pool(processes)
print('launching %i kernels...'%(processes))
def register(images):
imp1, imp2 = images[0], images[1]
shifts, _, _ = register_translation(imp1,imp2)
return shifts
results = [register((imp1, imp2))
for imp1, imp2 in zip(self.data[:], self.data[1:])]
return results
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
"""
Possible values of the language identifier field in the name records of the
TTF 'name' table if the 'platform' identifier code is TT_PLATFORM_MICROSOFT.
TT_MS_LANGID_SANSKRIT_INDIA
TT_MS_LANGID_ENGLISH_UNITED_KINGDOM
TT_MS_LANGID_ENGLISH_BELIZE
TT_MS_LANGID_ARABIC_LEBANON
TT_MS_LANGID_MOLDAVIAN_MOLDAVIA
TT_MS_LANGID_TURKISH_TURKEY
TT_MS_LANGID_WELSH_WALES
TT_MS_LANGID_GERMAN_AUSTRIA
TT_MS_LANGID_DUTCH_BELGIUM
TT_MS_LANGID_YI_CHINA
TT_MS_LANGID_QUECHUA_ECUADOR
TT_MS_LANGID_SPANISH_EL_SALVADOR
TT_MS_LANGID_SWAHILI_KENYA
TT_MS_LANGID_QUECHUA_BOLIVIA
TT_MS_LANGID_SLOVENE_SLOVENIA
TT_MS_LANGID_ORIYA_INDIA
TT_MS_LANGID_FARSI_IRAN
TT_MS_LANGID_ENGLISH_CANADA
TT_MS_LANGID_NEPALI_NEPAL
TT_MS_LANGID_DHIVEHI_MALDIVES
TT_MS_LANGID_GERMAN_LIECHTENSTEI
TT_MS_LANGID_TAMIL_INDIA
TT_MS_LANGID_ARABIC_UAE
TT_MS_LANGID_JAPANESE_JAPAN
TT_MS_LANGID_TAMAZIGHT_MOROCCO
TT_MS_LANGID_FRENCH_FRANCE
TT_MS_LANGID_CHINESE_MACAU
TT_MS_LANGID_VIETNAMESE_VIET_NAM
TT_MS_LANGID_HEBREW_ISRAEL
TT_MS_LANGID_SAMI_NORTHERN_SWEDEN
TT_MS_LANGID_PUNJABI_ARABIC_PAKISTAN
TT_MS_LANGID_SWEDISH_SWEDEN
TT_MS_LANGID_FRENCH_REUNION
TT_MS_LANGID_ARABIC_BAHRAIN
TT_MS_LANGID_ENGLISH_INDIA
TT_MS_LANGID_NEPALI_INDIA
TT_MS_LANGID_THAI_THAILAND
TT_MS_LANGID_ENGLISH_GENERAL
TT_MS_LANGID_SAMI_LULE_NORWAY
TT_MS_LANGID_ARABIC_OMAN
TT_MS_LANGID_SPANISH_HONDURAS
TT_MS_LANGID_ENGLISH_JAMAICA
TT_MS_LANGID_ESTONIAN_ESTONIA
TT_MS_LANGID_FRISIAN_NETHERLANDS
TT_MS_LANGID_LATIN
TT_MS_LANGID_ENGLISH_INDONESIA
TT_MS_LANGID_ENGLISH_IRELAND
TT_MS_LANGID_TIBETAN_CHINA
TT_MS_LANGID_PUNJABI_INDIA
TT_MS_LANGID_FRENCH_MALI
TT_MS_LANGID_GERMAN_LUXEMBOURG
TT_MS_LANGID_SUTU_SOUTH_AFRICA
TT_MS_LANGID_FRENCH_CAMEROON
TT_MS_LANGID_FRENCH_CONGO
TT_MS_LANGID_CLASSIC_LITHUANIAN_LITHUANIA
TT_MS_LANGID_MALAYALAM_INDIA
TT_MS_LANGID_SAMI_SOUTHERN_SWEDEN
TT_MS_LANGID_CHEROKEE_UNITED_STATES
TT_MS_LANGID_SPANISH_GUATEMALA
TT_MS_LANGID_CZECH_CZECH_REPUBLIC
TT_MS_LANGID_MANIPURI_INDIA
TT_MS_LANGID_ENGLISH_AUSTRALIA
TT_MS_LANGID_SPANISH_DOMINICAN_REPUBLIC
TT_MS_LANGID_ARABIC_LIBYA
TT_MS_LANGID_FRENCH_WEST_INDIES
TT_MS_LANGID_ENGLISH_TRINIDAD
TT_MS_LANGID_ARABIC_QATAR
TT_MS_LANGID_SPANISH_COLOMBIA
TT_MS_LANGID_GUARANI_PARAGUAY
TT_MS_LANGID_EDO_NIGERIA
TT_MS_LANGID_SEPEDI_SOUTH_AFRICA
TT_MS_LANGID_ENGLISH_HONG_KONG
TT_MS_LANGID_KOREAN_EXTENDED_WANSUNG_KOREA
TT_MS_LANGID_TATAR_TATARSTAN
TT_MS_LANGID_PASHTO_AFGHANISTAN
TT_MS_LANGID_KASHMIRI_PAKISTAN
TT_MS_LANGID_GALICIAN_SPAIN
TT_MS_LANGID_TAJIK_TAJIKISTAN
TT_MS_LANGID_SAMI_INARI_FINLAND
TT_MS_LANGID_KASHMIRI_SASIA
TT_MS_LANGID_SPANISH_ARGENTINA
TT_MS_LANGID_SAMI_SOUTHERN_NORWAY
TT_MS_LANGID_CROATIAN_CROATIA
TT_MS_LANGID_GUJARATI_INDIA
TT_MS_LANGID_TIBETAN_BHUTAN
TT_MS_LANGID_TIGRIGNA_ETHIOPIA
TT_MS_LANGID_FINNISH_FINLAND
TT_MS_LANGID_ENGLISH_UNITED_STATES
TT_MS_LANGID_ITALIAN_SWITZERLAND
TT_MS_LANGID_ARABIC_EGYPT
TT_MS_LANGID_SPANISH_LATIN_AMERICA
TT_MS_LANGID_LITHUANIAN_LITHUANIA
TT_MS_LANGID_ARABIC_ALGERIA
TT_MS_LANGID_MALAY_MALAYSIA
TT_MS_LANGID_ARABIC_GENERAL
TT_MS_LANGID_CHINESE_PRC
TT_MS_LANGID_BENGALI_BANGLADESH
TT_MS_LANGID_SPANISH_PERU
TT_MS_LANGID_SPANISH_SPAIN_INTERNATIONAL_SORT
TT_MS_LANGID_DIVEHI_MALDIVES
TT_MS_LANGID_LATVIAN_LATVIA
TT_MS_LANGID_TURKMEN_TURKMENISTAN
TT_MS_LANGID_XHOSA_SOUTH_AFRICA
TT_MS_LANGID_KHMER_CAMBODIA
TT_MS_LANGID_NORWEGIAN_NORWAY_NYNORSK
TT_MS_LANGID_ARABIC_MOROCCO
TT_MS_LANGID_FRENCH_SENEGAL
TT_MS_LANGID_YORUBA_NIGERIA
TT_MS_LANGID_CATALAN_SPAIN
TT_MS_LANGID_AFRIKAANS_SOUTH_AFRICA
TT_MS_LANGID_ZULU_SOUTH_AFRICA
TT_MS_LANGID_SPANISH_URUGUAY
TT_MS_LANGID_SPANISH_ECUADOR
TT_MS_LANGID_BOSNIAN_BOSNIA_HERZEGOVINA
TT_MS_LANGID_CHINESE_GENERAL
TT_MS_LANGID_SPANISH_PARAGUAY
TT_MS_LANGID_HINDI_INDIA
TT_MS_LANGID_FRENCH_LUXEMBOURG
TT_MS_LANGID_TSWANA_SOUTH_AFRICA
TT_MS_LANGID_HUNGARIAN_HUNGARY
TT_MS_LANGID_CROATIAN_BOSNIA_HERZEGOVINA
TT_MS_LANGID_ENGLISH_SINGAPORE
TT_MS_LANGID_MALTESE_MALTA
TT_MS_LANGID_SAMI_NORTHERN_FINLAND
TT_MS_LANGID_FRENCH_CANADA
TT_MS_LANGID_SAMI_LULE_SWEDEN
TT_MS_LANGID_KANURI_NIGERIA
TT_MS_LANGID_IRISH_GAELIC_IRELAND
TT_MS_LANGID_ARABIC_SAUDI_ARABIA
TT_MS_LANGID_FRENCH_HAITI
TT_MS_LANGID_SPANISH_PUERTO_RICO
TT_MS_LANGID_BURMESE_MYANMAR
TT_MS_LANGID_POLISH_POLAND
TT_MS_LANGID_PORTUGUESE_PORTUGAL
TT_MS_LANGID_ENGLISH_CARIBBEAN
TT_MS_LANGID_KIRGHIZ_KIRGHIZ_REPUBLIC
TT_MS_LANGID_ICELANDIC_ICELAND
TT_MS_LANGID_BENGALI_INDIA
TT_MS_LANGID_HAUSA_NIGERIA
TT_MS_LANGID_BASQUE_SPAIN
TT_MS_LANGID_UIGHUR_CHINA
TT_MS_LANGID_ENGLISH_MALAYSIA
TT_MS_LANGID_FRENCH_MONACO
TT_MS_LANGID_SPANISH_BOLIVIA
TT_MS_LANGID_SORBIAN_GERMANY
TT_MS_LANGID_SINDHI_INDIA
TT_MS_LANGID_CHINESE_SINGAPORE
TT_MS_LANGID_FRENCH_COTE_D_IVOIRE
TT_MS_LANGID_SPANISH_SPAIN_TRADITIONAL_SORT
TT_MS_LANGID_SERBIAN_SERBIA_CYRILLIC
TT_MS_LANGID_SAMI_SKOLT_FINLAND
TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_CYRILLIC
TT_MS_LANGID_MALAY_BRUNEI_DARUSSALAM
TT_MS_LANGID_ARABIC_JORDAN
TT_MS_LANGID_MONGOLIAN_MONGOLIA_MONGOLIAN
TT_MS_LANGID_SERBIAN_SERBIA_LATIN
TT_MS_LANGID_RUSSIAN_RUSSIA
TT_MS_LANGID_ROMANIAN_ROMANIA
TT_MS_LANGID_FRENCH_NORTH_AFRICA
TT_MS_LANGID_MONGOLIAN_MONGOLIA
TT_MS_LANGID_TSONGA_SOUTH_AFRICA
TT_MS_LANGID_SOMALI_SOMALIA
TT_MS_LANGID_SAAMI_LAPONIA
TT_MS_LANGID_SPANISH_COSTA_RICA
TT_MS_LANGID_ARABIC_SYRIA
TT_MS_LANGID_SPANISH_PANAMA
TT_MS_LANGID_PAPIAMENTU_NETHERLANDS_ANTILLES
TT_MS_LANGID_ASSAMESE_INDIA
TT_MS_LANGID_SCOTTISH_GAELIC_UNITED_KINGDOM
TT_MS_LANGID_DUTCH_NETHERLANDS
TT_MS_LANGID_SINDHI_PAKISTAN
TT_MS_LANGID_MACEDONIAN_MACEDONIA
TT_MS_LANGID_KAZAK_KAZAKSTAN
TT_MS_LANGID_AZERI_AZERBAIJAN_LATIN
TT_MS_LANGID_BELARUSIAN_BELARUS
TT_MS_LANGID_FRENCH_MOROCCO
TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_LATIN
TT_MS_LANGID_ALBANIAN_ALBANIA
TT_MS_LANGID_SINHALESE_SRI_LANKA
TT_MS_LANGID_SPANISH_MEXICO
TT_MS_LANGID_ENGLISH_ZIMBABWE
TT_MS_LANGID_OROMO_ETHIOPIA
TT_MS_LANGID_INDONESIAN_INDONESIA
TT_MS_LANGID_SAMI_NORTHERN_NORWAY
TT_MS_LANGID_UZBEK_UZBEKISTAN_LATIN
TT_MS_LANGID_SLOVAK_SLOVAKIA
TT_MS_LANGID_KASHMIRI_INDIA
TT_MS_LANGID_GERMAN_SWITZERLAND
TT_MS_LANGID_URDU_INDIA
TT_MS_LANGID_FAEROESE_FAEROE_ISLANDS
TT_MS_LANGID_SYRIAC_SYRIA
TT_MS_LANGID_SPANISH_CHILE
TT_MS_LANGID_FILIPINO_PHILIPPINES
TT_MS_LANGID_ARABIC_YEMEN
TT_MS_LANGID_KONKANI_INDIA
TT_MS_LANGID_AMHARIC_ETHIOPIA
TT_MS_LANGID_ENGLISH_NEW_ZEALAND
TT_MS_LANGID_RHAETO_ROMANIC_SWITZERLAND
TT_MS_LANGID_ARABIC_TUNISIA
TT_MS_LANGID_SOTHO_SOUTHERN_SOUTH_AFRICA
TT_MS_LANGID_QUECHUA_PERU
TT_MS_LANGID_DANISH_DENMARK
TT_MS_LANGID_ENGLISH_PHILIPPINES
TT_MS_LANGID_SPANISH_NICARAGUA
TT_MS_LANGID_INUKTITUT_CANADA
TT_MS_LANGID_UKRAINIAN_UKRAINE
TT_MS_LANGID_NORWEGIAN_NORWAY_BOKMAL
TT_MS_LANGID_UZBEK_UZBEKISTAN_CYRILLIC
TT_MS_LANGID_FRENCH_BELGIUM
TT_MS_LANGID_ENGLISH_SOUTH_AFRICA
TT_MS_LANGID_HAWAIIAN_UNITED_STATES
TT_MS_LANGID_ARABIC_IRAQ
TT_MS_LANGID_KANNADA_INDIA
TT_MS_LANGID_DZONGHKA_BHUTAN
TT_MS_LANGID_CHINESE_TAIWAN
TT_MS_LANGID_SPANISH_UNITED_STATES
TT_MS_LANGID_ARMENIAN_ARMENIA
TT_MS_LANGID_LAO_LAOS
TT_MS_LANGID_TIGRIGNA_ERYTREA
TT_MS_LANGID_MARATHI_INDIA
TT_MS_LANGID_ARABIC_KUWAIT
TT_MS_LANGID_TAMAZIGHT_MOROCCO_LATIN
TT_MS_LANGID_PORTUGUESE_BRAZIL
TT_MS_LANGID_TIGRIGNA_ERYTHREA
TT_MS_LANGID_GREEK_GREECE
TT_MS_LANGID_URDU_PAKISTAN
TT_MS_LANGID_KIRGHIZ_KIRGHIZSTAN
TT_MS_LANGID_YIDDISH_GERMANY
TT_MS_LANGID_GERMAN_GERMANY
TT_MS_LANGID_TELUGU_INDIA
TT_MS_LANGID_AZERI_AZERBAIJAN_CYRILLIC
TT_MS_LANGID_KOREAN_JOHAB_KOREA
TT_MS_LANGID_ITALIAN_ITALY
TT_MS_LANGID_MAORI_NEW_ZEALAND
TT_MS_LANGID_SPANISH_VENEZUELA
TT_MS_LANGID_IGBO_NIGERIA
TT_MS_LANGID_IBIBIO_NIGERIA
TT_MS_LANGID_CHINESE_HONG_KONG
TT_MS_LANGID_FRENCH_SWITZERLAND
TT_MS_LANGID_BULGARIAN_BULGARIA
TT_MS_LANGID_FULFULDE_NIGERIA
TT_MS_LANGID_RUSSIAN_MOLDAVIA
TT_MS_LANGID_VENDA_SOUTH_AFRICA
TT_MS_LANGID_GEORGIAN_GEORGIA
TT_MS_LANGID_SWEDISH_FINLAND
"""
TT_MS_LANGIDS = {
'TT_MS_LANGID_ARABIC_GENERAL' : 0x0001,
'TT_MS_LANGID_ARABIC_SAUDI_ARABIA' : 0x0401,
'TT_MS_LANGID_ARABIC_IRAQ' : 0x0801,
'TT_MS_LANGID_ARABIC_EGYPT' : 0x0c01,
'TT_MS_LANGID_ARABIC_LIBYA' : 0x1001,
'TT_MS_LANGID_ARABIC_ALGERIA' : 0x1401,
'TT_MS_LANGID_ARABIC_MOROCCO' : 0x1801,
'TT_MS_LANGID_ARABIC_TUNISIA' : 0x1c01,
'TT_MS_LANGID_ARABIC_OMAN' : 0x2001,
'TT_MS_LANGID_ARABIC_YEMEN' : 0x2401,
'TT_MS_LANGID_ARABIC_SYRIA' : 0x2801,
'TT_MS_LANGID_ARABIC_JORDAN' : 0x2c01,
'TT_MS_LANGID_ARABIC_LEBANON' : 0x3001,
'TT_MS_LANGID_ARABIC_KUWAIT' : 0x3401,
'TT_MS_LANGID_ARABIC_UAE' : 0x3801,
'TT_MS_LANGID_ARABIC_BAHRAIN' : 0x3c01,
'TT_MS_LANGID_ARABIC_QATAR' : 0x4001,
'TT_MS_LANGID_BULGARIAN_BULGARIA' : 0x0402,
'TT_MS_LANGID_CATALAN_SPAIN' : 0x0403,
'TT_MS_LANGID_CHINESE_GENERAL' : 0x0004,
'TT_MS_LANGID_CHINESE_TAIWAN' : 0x0404,
'TT_MS_LANGID_CHINESE_PRC' : 0x0804,
'TT_MS_LANGID_CHINESE_HONG_KONG' : 0x0c04,
'TT_MS_LANGID_CHINESE_SINGAPORE' : 0x1004,
'TT_MS_LANGID_CHINESE_MACAU' : 0x1404,
'TT_MS_LANGID_CZECH_CZECH_REPUBLIC' : 0x0405,
'TT_MS_LANGID_DANISH_DENMARK' : 0x0406,
'TT_MS_LANGID_GERMAN_GERMANY' : 0x0407,
'TT_MS_LANGID_GERMAN_SWITZERLAND' : 0x0807,
'TT_MS_LANGID_GERMAN_AUSTRIA' : 0x0c07,
'TT_MS_LANGID_GERMAN_LUXEMBOURG' : 0x1007,
'TT_MS_LANGID_GERMAN_LIECHTENSTEI' : 0x1407,
'TT_MS_LANGID_GREEK_GREECE' : 0x0408,
'TT_MS_LANGID_ENGLISH_GENERAL' : 0x0009,
'TT_MS_LANGID_ENGLISH_UNITED_STATES' : 0x0409,
'TT_MS_LANGID_ENGLISH_UNITED_KINGDOM' : 0x0809,
'TT_MS_LANGID_ENGLISH_AUSTRALIA' : 0x0c09,
'TT_MS_LANGID_ENGLISH_CANADA' : 0x1009,
'TT_MS_LANGID_ENGLISH_NEW_ZEALAND' : 0x1409,
'TT_MS_LANGID_ENGLISH_IRELAND' : 0x1809,
'TT_MS_LANGID_ENGLISH_SOUTH_AFRICA' : 0x1c09,
'TT_MS_LANGID_ENGLISH_JAMAICA' : 0x2009,
'TT_MS_LANGID_ENGLISH_CARIBBEAN' : 0x2409,
'TT_MS_LANGID_ENGLISH_BELIZE' : 0x2809,
'TT_MS_LANGID_ENGLISH_TRINIDAD' : 0x2c09,
'TT_MS_LANGID_ENGLISH_ZIMBABWE' : 0x3009,
'TT_MS_LANGID_ENGLISH_PHILIPPINES' : 0x3409,
'TT_MS_LANGID_ENGLISH_INDONESIA' : 0x3809,
'TT_MS_LANGID_ENGLISH_HONG_KONG' : 0x3c09,
'TT_MS_LANGID_ENGLISH_INDIA' : 0x4009,
'TT_MS_LANGID_ENGLISH_MALAYSIA' : 0x4409,
'TT_MS_LANGID_ENGLISH_SINGAPORE' : 0x4809,
'TT_MS_LANGID_SPANISH_SPAIN_TRADITIONAL_SORT' : 0x040a,
'TT_MS_LANGID_SPANISH_MEXICO' : 0x080a,
'TT_MS_LANGID_SPANISH_SPAIN_INTERNATIONAL_SORT' : 0x0c0a,
'TT_MS_LANGID_SPANISH_GUATEMALA' : 0x100a,
'TT_MS_LANGID_SPANISH_COSTA_RICA' : 0x140a,
'TT_MS_LANGID_SPANISH_PANAMA' : 0x180a,
'TT_MS_LANGID_SPANISH_DOMINICAN_REPUBLIC' : 0x1c0a,
'TT_MS_LANGID_SPANISH_VENEZUELA' : 0x200a,
'TT_MS_LANGID_SPANISH_COLOMBIA' : 0x240a,
'TT_MS_LANGID_SPANISH_PERU' : 0x280a,
'TT_MS_LANGID_SPANISH_ARGENTINA' : 0x2c0a,
'TT_MS_LANGID_SPANISH_ECUADOR' : 0x300a,
'TT_MS_LANGID_SPANISH_CHILE' : 0x340a,
'TT_MS_LANGID_SPANISH_URUGUAY' : 0x380a,
'TT_MS_LANGID_SPANISH_PARAGUAY' : 0x3c0a,
'TT_MS_LANGID_SPANISH_BOLIVIA' : 0x400a,
'TT_MS_LANGID_SPANISH_EL_SALVADOR' : 0x440a,
'TT_MS_LANGID_SPANISH_HONDURAS' : 0x480a,
'TT_MS_LANGID_SPANISH_NICARAGUA' : 0x4c0a,
'TT_MS_LANGID_SPANISH_PUERTO_RICO' : 0x500a,
'TT_MS_LANGID_SPANISH_UNITED_STATES' : 0x540a,
'TT_MS_LANGID_SPANISH_LATIN_AMERICA' : 0xE40a,
'TT_MS_LANGID_FINNISH_FINLAND' : 0x040b,
'TT_MS_LANGID_FRENCH_FRANCE' : 0x040c,
'TT_MS_LANGID_FRENCH_BELGIUM' : 0x080c,
'TT_MS_LANGID_FRENCH_CANADA' : 0x0c0c,
'TT_MS_LANGID_FRENCH_SWITZERLAND' : 0x100c,
'TT_MS_LANGID_FRENCH_LUXEMBOURG' : 0x140c,
'TT_MS_LANGID_FRENCH_MONACO' : 0x180c,
'TT_MS_LANGID_FRENCH_WEST_INDIES' : 0x1c0c,
'TT_MS_LANGID_FRENCH_REUNION' : 0x200c,
'TT_MS_LANGID_FRENCH_CONGO' : 0x240c,
'TT_MS_LANGID_FRENCH_SENEGAL' : 0x280c,
'TT_MS_LANGID_FRENCH_CAMEROON' : 0x2c0c,
'TT_MS_LANGID_FRENCH_COTE_D_IVOIRE' : 0x300c,
'TT_MS_LANGID_FRENCH_MALI' : 0x340c,
'TT_MS_LANGID_FRENCH_MOROCCO' : 0x380c,
'TT_MS_LANGID_FRENCH_HAITI' : 0x3c0c,
'TT_MS_LANGID_FRENCH_NORTH_AFRICA' : 0xE40c,
'TT_MS_LANGID_HEBREW_ISRAEL' : 0x040d,
'TT_MS_LANGID_HUNGARIAN_HUNGARY' : 0x040e,
'TT_MS_LANGID_ICELANDIC_ICELAND' : 0x040f,
'TT_MS_LANGID_ITALIAN_ITALY' : 0x0410,
'TT_MS_LANGID_ITALIAN_SWITZERLAND' : 0x0810,
'TT_MS_LANGID_JAPANESE_JAPAN' : 0x0411,
'TT_MS_LANGID_KOREAN_EXTENDED_WANSUNG_KOREA' : 0x0412,
'TT_MS_LANGID_KOREAN_JOHAB_KOREA' : 0x0812,
'TT_MS_LANGID_DUTCH_NETHERLANDS' : 0x0413,
'TT_MS_LANGID_DUTCH_BELGIUM' : 0x0813,
'TT_MS_LANGID_NORWEGIAN_NORWAY_BOKMAL' : 0x0414,
'TT_MS_LANGID_NORWEGIAN_NORWAY_NYNORSK' : 0x0814,
'TT_MS_LANGID_POLISH_POLAND' : 0x0415,
'TT_MS_LANGID_PORTUGUESE_BRAZIL' : 0x0416,
'TT_MS_LANGID_PORTUGUESE_PORTUGAL' : 0x0816,
'TT_MS_LANGID_RHAETO_ROMANIC_SWITZERLAND' : 0x0417,
'TT_MS_LANGID_ROMANIAN_ROMANIA' : 0x0418,
'TT_MS_LANGID_MOLDAVIAN_MOLDAVIA' : 0x0818,
'TT_MS_LANGID_RUSSIAN_RUSSIA' : 0x0419,
'TT_MS_LANGID_RUSSIAN_MOLDAVIA' : 0x0819,
'TT_MS_LANGID_CROATIAN_CROATIA' : 0x041a,
'TT_MS_LANGID_SERBIAN_SERBIA_LATIN' : 0x081a,
'TT_MS_LANGID_SERBIAN_SERBIA_CYRILLIC' : 0x0c1a,
'TT_MS_LANGID_CROATIAN_BOSNIA_HERZEGOVINA' : 0x101a,
'TT_MS_LANGID_BOSNIAN_BOSNIA_HERZEGOVINA' : 0x141a,
'TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_LATIN' : 0x181a,
'TT_MS_LANGID_SERBIAN_BOSNIA_HERZ_CYRILLIC' : 0x181a,
'TT_MS_LANGID_SLOVAK_SLOVAKIA' : 0x041b,
'TT_MS_LANGID_ALBANIAN_ALBANIA' : 0x041c,
'TT_MS_LANGID_SWEDISH_SWEDEN' : 0x041d,
'TT_MS_LANGID_SWEDISH_FINLAND' : 0x081d,
'TT_MS_LANGID_THAI_THAILAND' : 0x041e,
'TT_MS_LANGID_TURKISH_TURKEY' : 0x041f,
'TT_MS_LANGID_URDU_PAKISTAN' : 0x0420,
'TT_MS_LANGID_URDU_INDIA' : 0x0820,
'TT_MS_LANGID_INDONESIAN_INDONESIA' : 0x0421,
'TT_MS_LANGID_UKRAINIAN_UKRAINE' : 0x0422,
'TT_MS_LANGID_BELARUSIAN_BELARUS' : 0x0423,
'TT_MS_LANGID_SLOVENE_SLOVENIA' : 0x0424,
'TT_MS_LANGID_ESTONIAN_ESTONIA' : 0x0425,
'TT_MS_LANGID_LATVIAN_LATVIA' : 0x0426,
'TT_MS_LANGID_LITHUANIAN_LITHUANIA' : 0x0427,
'TT_MS_LANGID_CLASSIC_LITHUANIAN_LITHUANIA' : 0x0827,
'TT_MS_LANGID_TAJIK_TAJIKISTAN' : 0x0428,
'TT_MS_LANGID_FARSI_IRAN' : 0x0429,
'TT_MS_LANGID_VIETNAMESE_VIET_NAM' : 0x042a,
'TT_MS_LANGID_ARMENIAN_ARMENIA' : 0x042b,
'TT_MS_LANGID_AZERI_AZERBAIJAN_LATIN' : 0x042c,
'TT_MS_LANGID_AZERI_AZERBAIJAN_CYRILLIC' : 0x082c,
'TT_MS_LANGID_BASQUE_SPAIN' : 0x042d,
'TT_MS_LANGID_SORBIAN_GERMANY' : 0x042e,
'TT_MS_LANGID_MACEDONIAN_MACEDONIA' : 0x042f,
'TT_MS_LANGID_SUTU_SOUTH_AFRICA' : 0x0430,
'TT_MS_LANGID_TSONGA_SOUTH_AFRICA' : 0x0431,
'TT_MS_LANGID_TSWANA_SOUTH_AFRICA' : 0x0432,
'TT_MS_LANGID_VENDA_SOUTH_AFRICA' : 0x0433,
'TT_MS_LANGID_XHOSA_SOUTH_AFRICA' : 0x0434,
'TT_MS_LANGID_ZULU_SOUTH_AFRICA' : 0x0435,
'TT_MS_LANGID_AFRIKAANS_SOUTH_AFRICA' : 0x0436,
'TT_MS_LANGID_GEORGIAN_GEORGIA' : 0x0437,
'TT_MS_LANGID_FAEROESE_FAEROE_ISLANDS' : 0x0438,
'TT_MS_LANGID_HINDI_INDIA' : 0x0439,
'TT_MS_LANGID_MALTESE_MALTA' : 0x043a,
'TT_MS_LANGID_SAMI_NORTHERN_NORWAY' : 0x043b,
'TT_MS_LANGID_SAMI_NORTHERN_SWEDEN' : 0x083b,
'TT_MS_LANGID_SAMI_NORTHERN_FINLAND' : 0x0C3b,
'TT_MS_LANGID_SAMI_LULE_NORWAY' : 0x103b,
'TT_MS_LANGID_SAMI_LULE_SWEDEN' : 0x143b,
'TT_MS_LANGID_SAMI_SOUTHERN_NORWAY' : 0x183b,
'TT_MS_LANGID_SAMI_SOUTHERN_SWEDEN' : 0x1C3b,
'TT_MS_LANGID_SAMI_SKOLT_FINLAND' : 0x203b,
'TT_MS_LANGID_SAMI_INARI_FINLAND' : 0x243b,
'TT_MS_LANGID_SAAMI_LAPONIA' : 0x043b,
'TT_MS_LANGID_SCOTTISH_GAELIC_UNITED_KINGDOM' : 0x083c,
'TT_MS_LANGID_IRISH_GAELIC_IRELAND' : 0x043c,
'TT_MS_LANGID_YIDDISH_GERMANY' : 0x043d,
'TT_MS_LANGID_MALAY_MALAYSIA' : 0x043e,
'TT_MS_LANGID_MALAY_BRUNEI_DARUSSALAM' : 0x083e,
'TT_MS_LANGID_KAZAK_KAZAKSTAN' : 0x043f,
'TT_MS_LANGID_KIRGHIZ_KIRGHIZSTAN' : 0x0440,
'TT_MS_LANGID_KIRGHIZ_KIRGHIZ_REPUBLIC' : 0x0440,
'TT_MS_LANGID_SWAHILI_KENYA' : 0x0441,
'TT_MS_LANGID_TURKMEN_TURKMENISTAN' : 0x0442,
'TT_MS_LANGID_UZBEK_UZBEKISTAN_LATIN' : 0x0443,
'TT_MS_LANGID_UZBEK_UZBEKISTAN_CYRILLIC' : 0x0843,
'TT_MS_LANGID_TATAR_TATARSTAN' : 0x0444,
'TT_MS_LANGID_BENGALI_INDIA' : 0x0445,
'TT_MS_LANGID_BENGALI_BANGLADESH' : 0x0845,
'TT_MS_LANGID_PUNJABI_INDIA' : 0x0446,
'TT_MS_LANGID_PUNJABI_ARABIC_PAKISTAN' : 0x0846,
'TT_MS_LANGID_GUJARATI_INDIA' : 0x0447,
'TT_MS_LANGID_ORIYA_INDIA' : 0x0448,
'TT_MS_LANGID_TAMIL_INDIA' : 0x0449,
'TT_MS_LANGID_TELUGU_INDIA' : 0x044a,
'TT_MS_LANGID_KANNADA_INDIA' : 0x044b,
'TT_MS_LANGID_MALAYALAM_INDIA' : 0x044c,
'TT_MS_LANGID_ASSAMESE_INDIA' : 0x044d,
'TT_MS_LANGID_MARATHI_INDIA' : 0x044e,
'TT_MS_LANGID_SANSKRIT_INDIA' : 0x044f,
'TT_MS_LANGID_MONGOLIAN_MONGOLIA' : 0x0450,
'TT_MS_LANGID_MONGOLIAN_MONGOLIA_MONGOLIAN' : 0x0850,
'TT_MS_LANGID_TIBETAN_CHINA' : 0x0451,
'TT_MS_LANGID_DZONGHKA_BHUTAN' : 0x0851,
'TT_MS_LANGID_TIBETAN_BHUTAN' : 0x0851,
'TT_MS_LANGID_WELSH_WALES' : 0x0452,
'TT_MS_LANGID_KHMER_CAMBODIA' : 0x0453,
'TT_MS_LANGID_LAO_LAOS' : 0x0454,
'TT_MS_LANGID_BURMESE_MYANMAR' : 0x0455,
'TT_MS_LANGID_GALICIAN_SPAIN' : 0x0456,
'TT_MS_LANGID_KONKANI_INDIA' : 0x0457,
'TT_MS_LANGID_MANIPURI_INDIA' : 0x0458,
'TT_MS_LANGID_SINDHI_INDIA' : 0x0459,
'TT_MS_LANGID_SINDHI_PAKISTAN' : 0x0859,
'TT_MS_LANGID_SYRIAC_SYRIA' : 0x045a,
'TT_MS_LANGID_SINHALESE_SRI_LANKA' : 0x045b,
'TT_MS_LANGID_CHEROKEE_UNITED_STATES' : 0x045c,
'TT_MS_LANGID_INUKTITUT_CANADA' : 0x045d,
'TT_MS_LANGID_AMHARIC_ETHIOPIA' : 0x045e,
'TT_MS_LANGID_TAMAZIGHT_MOROCCO' : 0x045f,
'TT_MS_LANGID_TAMAZIGHT_MOROCCO_LATIN' : 0x085f,
'TT_MS_LANGID_KASHMIRI_PAKISTAN' : 0x0460,
'TT_MS_LANGID_KASHMIRI_SASIA' : 0x0860,
'TT_MS_LANGID_KASHMIRI_INDIA' : 0x0860,
'TT_MS_LANGID_NEPALI_NEPAL' : 0x0461,
'TT_MS_LANGID_NEPALI_INDIA' : 0x0861,
'TT_MS_LANGID_FRISIAN_NETHERLANDS' : 0x0462,
'TT_MS_LANGID_PASHTO_AFGHANISTAN' : 0x0463,
'TT_MS_LANGID_FILIPINO_PHILIPPINES' : 0x0464,
'TT_MS_LANGID_DHIVEHI_MALDIVES' : 0x0465,
'TT_MS_LANGID_DIVEHI_MALDIVES' : 0x0465,
'TT_MS_LANGID_EDO_NIGERIA' : 0x0466,
'TT_MS_LANGID_FULFULDE_NIGERIA' : 0x0467,
'TT_MS_LANGID_HAUSA_NIGERIA' : 0x0468,
'TT_MS_LANGID_IBIBIO_NIGERIA' : 0x0469,
'TT_MS_LANGID_YORUBA_NIGERIA' : 0x046a,
'TT_MS_LANGID_QUECHUA_BOLIVIA' : 0x046b,
'TT_MS_LANGID_QUECHUA_ECUADOR' : 0x086b,
'TT_MS_LANGID_QUECHUA_PERU' : 0x0c6b,
'TT_MS_LANGID_SEPEDI_SOUTH_AFRICA' : 0x046c,
'TT_MS_LANGID_SOTHO_SOUTHERN_SOUTH_AFRICA' : 0x046c,
'TT_MS_LANGID_IGBO_NIGERIA' : 0x0470,
'TT_MS_LANGID_KANURI_NIGERIA' : 0x0471,
'TT_MS_LANGID_OROMO_ETHIOPIA' : 0x0472,
'TT_MS_LANGID_TIGRIGNA_ETHIOPIA' : 0x0473,
'TT_MS_LANGID_TIGRIGNA_ERYTHREA' : 0x0873,
'TT_MS_LANGID_TIGRIGNA_ERYTREA' : 0x0873,
'TT_MS_LANGID_GUARANI_PARAGUAY' : 0x0474,
'TT_MS_LANGID_HAWAIIAN_UNITED_STATES' : 0x0475,
'TT_MS_LANGID_LATIN' : 0x0476,
'TT_MS_LANGID_SOMALI_SOMALIA' : 0x0477,
'TT_MS_LANGID_YI_CHINA' : 0x0478,
'TT_MS_LANGID_PAPIAMENTU_NETHERLANDS_ANTILLES' : 0x0479,
'TT_MS_LANGID_UIGHUR_CHINA' : 0x0480,
'TT_MS_LANGID_MAORI_NEW_ZEALAND' : 0x0481 }
globals().update(TT_MS_LANGIDS)
|
|
import json
import collections
import operator
from datetime import timedelta
from django.core import mail
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages import get_messages
from django.contrib.messages.storage.fallback import FallbackStorage
from django.contrib.sites.shortcuts import get_current_site
from rest_framework.test import APIRequestFactory, force_authenticate
from geokey import version
from geokey.core.tests.helpers import render_helpers
from geokey.users.tests.model_factories import UserFactory
from geokey.projects.models import Project
from geokey.projects.tests.model_factories import ProjectFactory
from geokey.categories.models import Category
from geokey.categories.tests.model_factories import (
CategoryFactory,
TextFieldFactory,
LookupFieldFactory,
LookupValueFactory
)
from geokey.contributions.models import Location, Observation
from geokey_airquality import views
from geokey_airquality.models import (
AirQualityProject,
AirQualityCategory,
AirQualityField,
AirQualityLocation,
AirQualityMeasurement
)
from geokey_airquality.tests.model_factories import (
AirQualityProjectFactory,
AirQualityCategoryFactory,
AirQualityFieldFactory,
AirQualityLocationFactory,
AirQualityMeasurementFactory
)
permission_denied = 'Managing Air Quality is for superusers only.'
# ###########################
# ADMIN PAGES
# ###########################
class AQIndexViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.template = 'aq_index.html'
self.view = views.AQIndexView.as_view()
self.request = HttpRequest()
self.request.method = 'GET'
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.project = AirQualityProjectFactory.create(
project=ProjectFactory.create()
)
def test_get_with_anonymous(self):
self.request.user = self.anonym
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_get_with_user(self):
self.request.user = self.user
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_with_superuser(self):
self.request.user = self.superuser
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [self.project],
'total_locations': 0,
'total_measurements': 0
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
class AQExportViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.view = views.AQExportView.as_view()
self.request = HttpRequest()
self.request.method = 'GET'
def test_get_with_anonymous(self):
self.request.user = self.anonym
response = self.view(self.request, file='measurements')
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
self.request.user = self.user
response = self.view(self.request, file='measurements')
self.assertEqual(response.status_code, 403)
def test_get_with_superuser(self):
self.request.user = self.superuser
response = self.view(self.request, file='measurements')
self.assertEqual(response.status_code, 200)
class AQAddViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.template = 'aq_add.html'
self.view = views.AQAddView.as_view()
self.request = HttpRequest()
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.project = ProjectFactory.create()
self.category_types = collections.OrderedDict(
sorted(dict(AirQualityCategory.TYPES).items())
)
self.field_types = collections.OrderedDict(
sorted(
dict(AirQualityField.TYPES).items(),
key=operator.itemgetter(1)
)
)
def test_get_with_anonymous(self):
self.request.user = self.anonym
self.request.method = 'GET'
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_get_with_user(self):
self.request.user = self.user
self.request.method = 'GET'
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_with_superuser(self):
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [self.project],
'category_types': self.category_types,
'field_types': self.field_types
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [],
'category_types': self.category_types,
'field_types': self.field_types
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [],
'category_types': self.category_types,
'field_types': self.field_types
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_with_anonymous(self):
self.request.user = self.anonym
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
self.assertEqual(
Project.objects.get(pk=self.project.id).islocked,
False
)
def test_post_with_user(self):
self.request.user = self.user
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
self.assertEqual(
Project.objects.get(pk=self.project.id).islocked,
False
)
def test_post_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [],
'category_types': self.category_types,
'field_types': self.field_types,
'messages': get_messages(self.request)
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
self.assertEqual(
Project.objects.get(pk=self.project.id).islocked,
False
)
def test_post_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(self.request).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [],
'category_types': self.category_types,
'field_types': self.field_types,
'messages': get_messages(self.request)
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
class AQProjectViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.template = 'aq_project.html'
self.view = views.AQProjectView.as_view()
self.request = HttpRequest()
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.project = ProjectFactory.create()
self.aq_project = AirQualityProjectFactory.create(project=self.project)
self.category_types = collections.OrderedDict(
sorted(dict(AirQualityCategory.TYPES).items())
)
self.field_types = collections.OrderedDict(
sorted(
dict(AirQualityField.TYPES).items(),
key=operator.itemgetter(1)
)
)
def test_get_with_anonymous(self):
self.request.user = self.anonym
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_get_with_user(self):
self.request.user = self.user
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_with_superuser(self):
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'projects': [self.project],
'project': self.aq_project,
'category_types': self.category_types,
'field_types': self.field_types
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_no_project(self):
Project.objects.get(pk=self.project.id).delete()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_when_no_aq_project(self):
AirQualityProject.objects.get(pk=self.aq_project.id).delete()
self.request.user = self.superuser
self.request.method = 'GET'
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_with_anonymous(self):
self.request.user = self.anonym
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_post_with_user(self):
self.request.user = self.user
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_when_no_project(self):
Project.objects.get(pk=self.project.id).delete()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_post_when_no_aq_project(self):
AirQualityProject.objects.get(pk=self.aq_project.id).delete()
self.request.user = self.superuser
self.request.method = 'POST'
self.request.POST = {
'project': self.project.id
}
response = self.view(
self.request,
project_id=self.aq_project.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Not found.',
'error_description': 'Project not found.'
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
class AQRemoveViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.template = 'base.html'
self.view = views.AQRemoveView.as_view()
self.request = HttpRequest()
self.request.method = 'GET'
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.project = ProjectFactory.create()
self.category = CategoryFactory.create()
self.field = TextFieldFactory.create()
self.aq_project_1 = AirQualityProjectFactory.create(
project=self.project
)
self.aq_category_1 = AirQualityCategoryFactory.create(
category=self.category,
project=self.aq_project_1
)
self.aq_field_1 = AirQualityFieldFactory.create(
field=self.field,
category=self.aq_category_1
)
self.aq_project_2 = AirQualityProjectFactory.create(
project=self.project
)
self.aq_category_2 = AirQualityCategoryFactory.create(
category=self.category,
project=self.aq_project_2
)
self.aq_field_1 = AirQualityFieldFactory.create(
field=self.field,
category=self.aq_category_2
)
def test_get_with_anonymous(self):
self.request.user = self.anonym
response = self.view(
self.request,
project_id=self.aq_project_1.id
)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_get_with_user(self):
self.request.user = self.user
response = self.view(
self.request,
project_id=self.aq_project_1.id
).render()
rendered = render_to_string(
self.template,
{
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'user': self.request.user,
'error': 'Permission denied.',
'error_description': permission_denied
}
)
self.assertEqual(response.status_code, 200)
response = render_helpers.remove_csrf(response.content.decode('utf-8'))
self.assertEqual(response, rendered)
def test_get_with_superuser(self):
self.request.user = self.superuser
response = self.view(
self.request,
project_id=self.aq_project_1.id
)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/airquality/', response['location'])
self.assertEqual(AirQualityProject.objects.count(), 1)
self.assertEqual(AirQualityCategory.objects.count(), 1)
self.assertEqual(AirQualityField.objects.count(), 1)
def test_get_when_no_project(self):
AirQualityProject.objects.get(pk=self.aq_project_1.id).delete()
self.request.user = self.superuser
response = self.view(
self.request,
project_id=self.aq_project_1.id
)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/airquality/', response['location'])
# ###########################
# ADMIN AJAX
# ###########################
class AQProjectsSingleAjaxViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.creator = UserFactory.create(**{'is_superuser': False})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.project = ProjectFactory.create(add_contributors=[self.creator])
self.url = '/ajax/airquality/projects/%s/' % self.project.id
self.factory = APIRequestFactory()
self.request_get = self.factory.get(self.url)
self.view = views.AQProjectsSingleAjaxView.as_view()
def test_get_with_anonymous(self):
force_authenticate(self.request_get, user=self.anonym)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
force_authenticate(self.request_get, user=self.user)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_creator(self):
force_authenticate(self.request_get, user=self.creator)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_superuser(self):
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
project = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(project['id'], self.project.id)
def test_get_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_no_project(self):
Project.objects.get(pk=self.project.id).delete()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id
).render()
self.assertEqual(response.status_code, 404)
class AQCategoriesSingleAjaxViewTest(TestCase):
def setUp(self):
self.superuser = UserFactory.create(**{'is_superuser': True})
self.creator = UserFactory.create(**{'is_superuser': False})
self.user = UserFactory.create(**{'is_superuser': False})
self.anonym = AnonymousUser()
self.project = ProjectFactory.create(add_contributors=[self.creator])
self.category = CategoryFactory.create(
creator=self.creator,
project=self.project
)
self.url = '/ajax/airquality/projects/%s/categories/%s/' % (
self.project.id,
self.category.id
)
self.factory = APIRequestFactory()
self.request_get = self.factory.get(self.url)
self.view = views.AQCategoriesSingleAjaxView.as_view()
def test_get_with_anonymous(self):
force_authenticate(self.request_get, user=self.anonym)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
force_authenticate(self.request_get, user=self.user)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_creator(self):
force_authenticate(self.request_get, user=self.creator)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 403)
def test_get_with_superuser(self):
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
category = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(category['id'], self.category.id)
def test_get_when_project_marked_as_inactive(self):
self.project.status = 'inactive'
self.project.save()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_project_marked_as_deleted(self):
self.project.status = 'deleted'
self.project.save()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_no_project(self):
Project.objects.get(pk=self.project.id).delete()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_category_marked_as_inactive(self):
self.category.status = 'inactive'
self.category.save()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 404)
def test_get_when_no_category(self):
Category.objects.get(pk=self.category.id).delete()
force_authenticate(self.request_get, user=self.superuser)
response = self.view(
self.request_get,
project_id=self.project.id,
category_id=self.category.id
).render()
self.assertEqual(response.status_code, 404)
# ###########################
# PUBLIC API
# ###########################
class AQSheetAPIViewTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.url = '/api/airquality/sheet/'
self.factory = APIRequestFactory()
self.request_get = self.factory.get(self.url)
self.view = views.AQSheetAPIView.as_view()
self.location_1 = AirQualityLocationFactory.create(
creator=self.creator
)
self.location_2 = AirQualityLocationFactory.create(
creator=UserFactory.create()
)
AirQualityMeasurementFactory.create(
location=self.location_1,
creator=self.location_1.creator,
finished=timezone.now()
)
AirQualityMeasurementFactory.create(
location=self.location_1,
creator=self.location_1.creator
)
def test_get_with_anonymous(self):
force_authenticate(self.request_get, user=self.anonym)
response = self.view(self.request_get).render()
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
force_authenticate(self.request_get, user=self.user)
response = self.view(self.request_get).render()
self.assertEqual(response.status_code, 204)
self.assertEquals(len(mail.outbox), 1)
def test_get_with_creator(self):
force_authenticate(self.request_get, user=self.creator)
response = self.view(self.request_get).render()
self.assertEqual(response.status_code, 204)
self.assertEquals(len(mail.outbox), 1)
class AQProjectsAPIViewTest(TestCase):
def setUp(self):
self.contributor = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.url = '/api/airquality/projects/'
self.factory = APIRequestFactory()
self.request_get = self.factory.get(self.url)
self.view = views.AQProjectsAPIView.as_view()
self.project_1 = ProjectFactory.create(
add_contributors=[self.contributor]
)
self.project_2 = ProjectFactory.create(
add_contributors=[self.contributor]
)
self.project_3 = ProjectFactory.create(
add_contributors=[self.contributor]
)
self.aq_project_1 = AirQualityProjectFactory.create(
project=self.project_1
)
self.aq_project_2 = AirQualityProjectFactory.create(
status='inactive',
project=self.project_2
)
def test_get_with_anonymous(self):
force_authenticate(self.request_get, user=self.anonym)
response = self.view(self.request_get).render()
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
force_authenticate(self.request_get, user=self.user)
response = self.view(self.request_get).render()
projects = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(projects), 0)
def test_get_with_contributor(self):
force_authenticate(self.request_get, user=self.contributor)
response = self.view(self.request_get).render()
projects = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(projects), 1)
self.assertEqual(projects[0]['id'], self.project_1.id)
def test_get_when_original_project_deleted(self):
self.aq_project_3 = AirQualityProjectFactory.create(
project=self.project_3
)
self.project_3.delete()
force_authenticate(self.request_get, user=self.contributor)
response = self.view(self.request_get).render()
projects = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(projects), 1)
self.assertEqual(AirQualityProject.objects.count(), 2)
class AQLocationsAPIViewTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.url = '/api/airquality/locations/'
self.data = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [-0.134, 51.524]
},
'name': 'Test Location',
'properties': {
'distance': 2
}
}
self.factory = APIRequestFactory()
self.request_get = self.factory.get(self.url)
self.request_post = self.factory.post(
self.url,
json.dumps(self.data),
content_type='application/json'
)
self.view = views.AQLocationsAPIView.as_view()
self.location_1 = AirQualityLocationFactory.create(
creator=self.creator
)
self.location_2 = AirQualityLocationFactory.create(
creator=UserFactory.create()
)
def test_get_with_anonymous(self):
force_authenticate(self.request_get, user=self.anonym)
response = self.view(self.request_get).render()
self.assertEqual(response.status_code, 403)
def test_get_with_user(self):
force_authenticate(self.request_get, user=self.user)
response = self.view(self.request_get).render()
locations = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(locations), 0)
def test_get_with_creator(self):
force_authenticate(self.request_get, user=self.creator)
response = self.view(self.request_get).render()
locations = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(locations), 1)
self.assertEqual(len(locations[0]['measurements']), 0)
def test_get_together_with_measurements(self):
AirQualityMeasurementFactory.create(
location=self.location_1,
creator=self.location_1.creator
)
AirQualityMeasurementFactory.create(
location=self.location_2,
creator=self.location_2.creator
)
force_authenticate(self.request_get, user=self.creator)
response = self.view(self.request_get).render()
locations = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(locations), 1)
self.assertEqual(len(locations[0]['measurements']), 1)
def test_post_with_anonymous(self):
force_authenticate(self.request_post, user=self.anonym)
response = self.view(self.request_post).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityLocation.objects.count(), 2)
def test_post_with_user(self):
force_authenticate(self.request_post, user=self.user)
response = self.view(self.request_post).render()
self.assertEqual(response.status_code, 201)
self.assertEqual(AirQualityLocation.objects.count(), 3)
class AQLocationsSingleAPIViewTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.location = AirQualityLocationFactory.create(creator=self.creator)
self.url = '/api/airquality/locations/%s/' % self.location.id
self.data = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [-0.124, 55.171]
},
'name': 'Updated Test Location',
'properties': {
'height': 11.2,
'distance': None,
'characteristics': 'Beautiful location'
}
}
self.factory = APIRequestFactory()
self.request_patch = self.factory.patch(
self.url,
json.dumps(self.data),
content_type='application/json'
)
self.request_delete = self.factory.delete(
self.url,
content_type='application/json'
)
self.view = views.AQLocationsSingleAPIView.as_view()
def test_patch_with_anonymous(self):
force_authenticate(self.request_patch, user=self.anonym)
response = self.view(
self.request_patch,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
def test_patch_with_user(self):
force_authenticate(self.request_patch, user=self.user)
response = self.view(
self.request_patch,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
def test_patch_with_creator(self):
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location.id
).render()
reference = AirQualityLocation.objects.get(pk=self.location.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(reference.name, self.data.get('name'))
self.assertEqual(reference.properties, self.data.get('properties'))
def test_patch_when_no_location(self):
AirQualityLocation.objects.get(pk=self.location.id).delete()
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 404)
def test_delete_with_anonymous(self):
force_authenticate(self.request_delete, user=self.anonym)
response = self.view(
self.request_delete,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityLocation.objects.count(), 1)
def test_delete_with_user(self):
force_authenticate(self.request_delete, user=self.user)
response = self.view(
self.request_delete,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityLocation.objects.count(), 1)
def test_delete_with_creator(self):
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 204)
self.assertEqual(
AirQualityLocation.objects.filter(pk=self.location.id).exists(),
False
)
def test_delete_when_there_are_measurements(self):
self.measurement_1 = AirQualityMeasurementFactory.create(
location=self.location,
creator=self.location.creator
)
self.measurement_2 = AirQualityMeasurementFactory.create(
location=self.location,
creator=self.location.creator
)
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 204)
self.assertEqual(
AirQualityLocation.objects.filter(pk=self.location.id).exists(),
False
)
self.assertEqual(AirQualityMeasurement.objects.count(), 0)
def test_delete_when_no_location(self):
AirQualityLocation.objects.get(pk=self.location.id).delete()
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 404)
class AQMeasurementsAPIViewTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.location = AirQualityLocationFactory.create(
creator=self.creator,
properties={
'height': 2,
'distance': 10
}
)
self.url = '/api/airquality/locations/%s/measurements/' % (
self.location.id
)
self.data = {
'barcode': 123456,
'started': (timezone.now() - timedelta(days=28)).isoformat(),
'called': timezone.now().isoformat()
}
self.factory = APIRequestFactory()
self.request_post = self.factory.post(
self.url,
json.dumps(self.data),
content_type='application/json'
)
self.view = views.AQMeasurementsAPIView.as_view()
self.project = ProjectFactory.create(add_contributors=[self.creator])
self.aq_project = AirQualityProjectFactory.create(project=self.project)
self.category = CategoryFactory.create(project=self.project)
self.aq_category = AirQualityCategoryFactory.create(
type='40-60',
category=self.category,
project=self.aq_project
)
self.field_1 = TextFieldFactory.create(category=self.category)
self.field_2 = TextFieldFactory.create(category=self.category)
self.field_3 = TextFieldFactory.create(category=self.category)
self.field_4 = TextFieldFactory.create(category=self.category)
self.field_5 = TextFieldFactory.create(category=self.category)
self.field_6 = TextFieldFactory.create(category=self.category)
self.field_7 = TextFieldFactory.create(category=self.category)
self.field_8 = TextFieldFactory.create(category=self.category)
self.field_9 = TextFieldFactory.create(category=self.category)
self.field_10 = TextFieldFactory.create(category=self.category)
self.field_11 = LookupFieldFactory.create(category=self.category)
LookupValueFactory(**{
'field': self.field_11,
'name': 'Yes'
})
LookupValueFactory(**{
'field': self.field_11,
'name': 'No'
})
self.aq_field_1 = AirQualityFieldFactory.create(
type='01. Results',
field=self.field_1,
category=self.aq_category
)
self.aq_field_2 = AirQualityFieldFactory.create(
type='02. Date out',
field=self.field_2,
category=self.aq_category
)
self.aq_field_3 = AirQualityFieldFactory.create(
type='03. Time out',
field=self.field_3,
category=self.aq_category
)
self.aq_field_4 = AirQualityFieldFactory.create(
type='04. Date collected',
field=self.field_4,
category=self.aq_category
)
self.aq_field_5 = AirQualityFieldFactory.create(
type='05. Time collected',
field=self.field_5,
category=self.aq_category
)
self.aq_field_6 = AirQualityFieldFactory.create(
type='06. Exposure time (min)',
field=self.field_6,
category=self.aq_category
)
self.aq_field_7 = AirQualityFieldFactory.create(
type='07. Distance from the road',
field=self.field_7,
category=self.aq_category
)
self.aq_field_8 = AirQualityFieldFactory.create(
type='08. Height from ground',
field=self.field_8,
category=self.aq_category
)
self.aq_field_9 = AirQualityFieldFactory.create(
type='09. Site characteristics',
field=self.field_9,
category=self.aq_category
)
self.aq_field_10 = AirQualityFieldFactory.create(
type='10. Additional details',
field=self.field_10,
category=self.aq_category
)
self.aq_field_11 = AirQualityFieldFactory.create(
type='11. Diffusion tube made by students',
field=self.field_11,
category=self.aq_category
)
def test_post_with_anonymous(self):
force_authenticate(self.request_post, user=self.anonym)
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityMeasurement.objects.count(), 0)
def test_post_with_user(self):
force_authenticate(self.request_post, user=self.user)
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityMeasurement.objects.count(), 0)
def test_post_with_creator(self):
force_authenticate(self.request_post, user=self.creator)
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 201)
self.assertEqual(AirQualityMeasurement.objects.count(), 1)
def test_post_when_submitting_and_no_project(self):
self.data['finished'] = timezone.now().isoformat()
self.data['project'] = 158
self.data['properties'] = {'results': 45.15}
self.request_post = self.factory.post(
self.url,
json.dumps(self.data),
content_type='application/json'
)
force_authenticate(self.request_post, user=self.creator)
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 201)
self.assertEqual(AirQualityMeasurement.objects.count(), 1)
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(Observation.objects.count(), 0)
def test_post_when_submitting(self):
self.data['finished'] = timezone.now().isoformat()
self.data['project'] = self.project.id
self.data['properties'] = {'results': 48.05}
self.request_post = self.factory.post(
self.url,
json.dumps(self.data),
content_type='application/json'
)
force_authenticate(self.request_post, user=self.creator)
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 204)
self.assertEqual(AirQualityMeasurement.objects.count(), 0)
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(Observation.objects.count(), 1)
def test_post_when_no_location(self):
AirQualityLocation.objects.get(pk=self.location.id).delete()
response = self.view(
self.request_post,
location_id=self.location.id
).render()
self.assertEqual(response.status_code, 404)
self.assertEqual(AirQualityMeasurement.objects.count(), 0)
class AQMeasurementsSingleAPIViewTest(TestCase):
def setUp(self):
self.creator = UserFactory.create()
self.user = UserFactory.create()
self.anonym = AnonymousUser()
self.location_1 = AirQualityLocationFactory.create(
creator=self.creator,
properties={
'additional_details': 'Heavy traffic.'
}
)
self.location_2 = AirQualityLocationFactory.create(creator=self.user)
self.measurement_1 = AirQualityMeasurementFactory.create(
location=self.location_1,
creator=self.location_1.creator
)
self.measurement_2 = AirQualityMeasurementFactory.create(
location=self.location_2,
creator=self.location_2.creator
)
self.url = '/api/airquality/locations/%s/measurements/%s/' % (
self.location_1.id,
self.measurement_1.id
)
self.data = {
'barcode': self.measurement_1.barcode,
'started': (timezone.now() - timedelta(days=28)).isoformat(),
'finished': timezone.now().isoformat(),
'called': timezone.now().isoformat()
}
self.factory = APIRequestFactory()
self.request_patch = self.factory.patch(
self.url,
json.dumps(self.data),
content_type='application/json'
)
self.request_delete = self.factory.delete(
self.url,
content_type='application/json'
)
self.view = views.AQMeasurementsSingleAPIView.as_view()
self.project = ProjectFactory.create(add_contributors=[self.creator])
self.aq_project = AirQualityProjectFactory.create(project=self.project)
self.category = CategoryFactory.create(project=self.project)
self.aq_category = AirQualityCategoryFactory.create(
type='60-80',
category=self.category,
project=self.aq_project
)
self.field_1 = TextFieldFactory.create(category=self.category)
self.field_2 = TextFieldFactory.create(category=self.category)
self.field_3 = TextFieldFactory.create(category=self.category)
self.field_4 = TextFieldFactory.create(category=self.category)
self.field_5 = TextFieldFactory.create(category=self.category)
self.field_6 = TextFieldFactory.create(category=self.category)
self.field_7 = TextFieldFactory.create(category=self.category)
self.field_8 = TextFieldFactory.create(category=self.category)
self.field_9 = TextFieldFactory.create(category=self.category)
self.field_10 = TextFieldFactory.create(category=self.category)
self.field_11 = LookupFieldFactory.create(category=self.category)
LookupValueFactory(**{
'field': self.field_11,
'name': 'Yes'
})
LookupValueFactory(**{
'field': self.field_11,
'name': 'No'
})
self.aq_field_1 = AirQualityFieldFactory.create(
type='01. Results',
field=self.field_1,
category=self.aq_category
)
self.aq_field_2 = AirQualityFieldFactory.create(
type='02. Date out',
field=self.field_2,
category=self.aq_category
)
self.aq_field_3 = AirQualityFieldFactory.create(
type='03. Time out',
field=self.field_3,
category=self.aq_category
)
self.aq_field_4 = AirQualityFieldFactory.create(
type='04. Date collected',
field=self.field_4,
category=self.aq_category
)
self.aq_field_5 = AirQualityFieldFactory.create(
type='05. Time collected',
field=self.field_5,
category=self.aq_category
)
self.aq_field_6 = AirQualityFieldFactory.create(
type='06. Exposure time (min)',
field=self.field_6,
category=self.aq_category
)
self.aq_field_7 = AirQualityFieldFactory.create(
type='07. Distance from the road',
field=self.field_7,
category=self.aq_category
)
self.aq_field_8 = AirQualityFieldFactory.create(
type='08. Height from ground',
field=self.field_8,
category=self.aq_category
)
self.aq_field_9 = AirQualityFieldFactory.create(
type='09. Site characteristics',
field=self.field_9,
category=self.aq_category
)
self.aq_field_10 = AirQualityFieldFactory.create(
type='10. Additional details',
field=self.field_10,
category=self.aq_category
)
self.aq_field_11 = AirQualityFieldFactory.create(
type='11. Diffusion tube made by students',
field=self.field_11,
category=self.aq_category
)
def test_patch_with_anonymous(self):
force_authenticate(self.request_patch, user=self.anonym)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 403)
def test_patch_with_user(self):
force_authenticate(self.request_patch, user=self.user)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 403)
def test_patch_with_creator(self):
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 200)
def test_patch_when_changing_barcode(self):
self.data['barcode'] = 451274
self.request_patch = self.factory.patch(
self.url,
json.dumps(self.data),
content_type='application/json'
)
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
reference = AirQualityMeasurement.objects.get(
pk=self.measurement_1.id
)
self.assertEqual(response.status_code, 200)
self.assertEqual(reference.barcode, str(self.data['barcode']))
def test_patch_when_submitting_and_no_project(self):
self.data['project'] = 183
self.data['properties'] = {'results': 70.51}
self.request_patch = self.factory.patch(
self.url,
json.dumps(self.data),
content_type='application/json'
)
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 200)
self.assertEqual(
AirQualityMeasurement.objects.filter(
pk=self.measurement_1.id).exists(),
True
)
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(Observation.objects.count(), 0)
def test_patch_when_submitting(self):
self.data['project'] = self.project.id
self.data['properties'] = {'results': 72.78}
self.request_patch = self.factory.patch(
self.url,
json.dumps(self.data),
content_type='application/json'
)
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 204)
self.assertEqual(
AirQualityMeasurement.objects.filter(
pk=self.measurement_1.id).exists(),
False
)
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(Observation.objects.count(), 1)
def test_patch_when_no_location(self):
AirQualityLocation.objects.get(pk=self.location_1.id).delete()
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 404)
def test_patch_when_no_measurement(self):
AirQualityMeasurement.objects.get(pk=self.measurement_1.id).delete()
force_authenticate(self.request_patch, user=self.creator)
response = self.view(
self.request_patch,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 404)
def test_delete_with_anonymous(self):
force_authenticate(self.request_delete, user=self.anonym)
response = self.view(
self.request_delete,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityMeasurement.objects.count(), 2)
def test_delete_with_user(self):
force_authenticate(self.request_delete, user=self.user)
response = self.view(
self.request_delete,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 403)
self.assertEqual(AirQualityMeasurement.objects.count(), 2)
def test_delete_with_creator(self):
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 204)
self.assertEqual(
AirQualityMeasurement.objects.filter(
pk=self.measurement_1.id).exists(),
False
)
def test_delete_when_no_location(self):
AirQualityLocation.objects.get(pk=self.location_1.id).delete()
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 404)
def test_delete_when_no_measurement(self):
AirQualityMeasurement.objects.get(pk=self.measurement_1.id).delete()
force_authenticate(self.request_delete, user=self.creator)
response = self.view(
self.request_delete,
location_id=self.location_1.id,
measurement_id=self.measurement_1.id
).render()
self.assertEqual(response.status_code, 404)
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from ipywidgets import *
from SimPEG import Mesh, Maps, EM, Utils
# from pymatsolver import PardisoSolver
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from scipy.constants import mu_0
import requests
from io import StringIO
from .Base import widgetify
from .DipoleWidgetFD import DisPosNegvalues
from .BiotSavart import BiotSavartFun
class TDEMHorizontalLoopCylWidget(object):
"""TDEMCylWidgete"""
survey = None
srcList = None
mesh = None
f = None
activeCC = None
srcLoc = None
mesh2D = None
mu = None
counter = 0
def __init__(self):
self.genMesh()
self.getCoreDomain()
# url = "http://em.geosci.xyz/_images/disc_dipole.png"
# response = requests.get(url)
# self.im = Image.open(StringIO(response.content))
self.time = np.logspace(-5, -2, 41)
def mirrorArray(self, x, direction="x"):
X = x.reshape((self.nx_core, self.ny_core), order="F")
if direction == "x" or direction == "y":
X2 = np.vstack((-np.flipud(X), X))
else:
X2 = np.vstack((np.flipud(X), X))
return X2
def genMesh(self, h=0., cs=3., ncx=15, ncz=30, npad=20):
"""
Generate cylindrically symmetric mesh
"""
# TODO: Make it adaptive due to z location
hx = [(cs, ncx), (cs, npad, 1.3)]
hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)]
self.mesh = Mesh.CylMesh([hx, 1, hz], '00C')
def getCoreDomain(self, mirror=False, xmax=200, zmin=-200, zmax=200.):
self.activeCC = (
(self.mesh.gridCC[:, 0] <= xmax) &
(
np.logical_and(
self.mesh.gridCC[:, 2] >= zmin,
self.mesh.gridCC[:, 2] <= zmax
)
)
)
self.gridCCactive = self.mesh.gridCC[self.activeCC, :][:, [0, 2]]
xind = (self.mesh.vectorCCx <= xmax)
yind = np.logical_and(
self.mesh.vectorCCz >= zmin, self.mesh.vectorCCz <= zmax
)
self.nx_core = xind.sum()
self.ny_core = yind.sum()
# if self.mesh2D is None:
hx = np.r_[self.mesh.hx[xind][::-1], self.mesh.hx[xind]]
hz = self.mesh.hz[yind]
self.mesh2D = Mesh.TensorMesh([hx, hz], x0="CC")
def getCoreModel(self, Type):
if Type == 'Layer':
active = self.mesh2D.vectorCCy < self.z0
ind1 = (
(self.mesh2D.vectorCCy < self.z0) & (self.mesh2D.vectorCCy >= self.z1)
)
ind2 = (
(self.mesh2D.vectorCCy < self.z1) & (self.mesh2D.vectorCCy >= self.z2)
)
mapping2D = (
Maps.SurjectVertical1D(self.mesh2D) *
Maps.InjectActiveCells(self.mesh2D, active, self.sig0, nC=self.mesh2D.nCy)
)
model2D = np.ones(self.mesh2D.nCy) * self.sig3
model2D[ind1] = self.sig1
model2D[ind2] = self.sig2
model2D = model2D[active]
elif Type == 'Sphere':
active = self.mesh2D.gridCC[:,1] < self.z0
ind1 = (
(self.mesh2D.gridCC[:, 1] < self.z1) & (self.mesh2D.gridCC[:, 1] >= self.z1-self.h)
)
ind2 = np.sqrt((self.mesh2D.gridCC[:, 0])**2 + (self.mesh2D.gridCC[:, 1]-self.z2)**2) <= self.R
mapping2D = (
Maps.InjectActiveCells(self.mesh2D, active, self.sig0, nC=self.mesh2D.nC)
)
model2D = np.ones(self.mesh2D.nC) * self.sigb
model2D[ind1] = self.sig1
model2D[ind2] = self.sig2
model2D = model2D[active]
return model2D, mapping2D
def getBiotSavrt(self, rxLoc):
"""
Compute Biot-Savart operator: Gz and Gx
"""
self.Gz = BiotSavartFun(self.mesh, rxLoc, component='z')
self.Gx = BiotSavartFun(self.mesh, rxLoc, component='x')
def setThreeLayerParam(
self, h1=12, h2=12, sig0=1e-8, sig1=1e-2, sig2=1e-2, sig3=1e-2, chi=0.
):
self.h1 = h1 # 1st layer thickness
self.h2 = h2 # 2nd layer thickness
self.z0 = 0.
self.z1 = self.z0-h1
self.z2 = self.z0-h1-h2
self.sig0 = sig0 # 0th layer \sigma (assumed to be air)
self.sig1 = sig1 # 1st layer \sigma
self.sig2 = sig2 # 2nd layer \sigma
self.sig3 = sig3 # 3rd layer \sigma
active = self.mesh.vectorCCz < self.z0
ind1 = (
(self.mesh.vectorCCz < self.z0) & (self.mesh.vectorCCz >= self.z1)
)
ind2 = (
(self.mesh.vectorCCz < self.z1) & (self.mesh.vectorCCz >= self.z2)
)
self.mapping = (
Maps.SurjectVertical1D(self.mesh) *
Maps.InjectActiveCells(self.mesh, active, sig0, nC=self.mesh.nCz)
)
model = np.ones(self.mesh.nCz) * sig3
model[ind1] = sig1
model[ind2] = sig2
self.m = model[active]
self.mu = np.ones(self.mesh.nC)*mu_0
self.mu[self.mesh.gridCC[:, 2] < 0.] = (1.+chi)*mu_0
return self.m
def setLayerSphereParam(
self, d1=6, h=6, d2=16, R=4, sig0=1e-8, sigb=1e-2, sig1=1e-1, sig2=1., chi=0.
):
self.z0 = 0. # Surface elevation
self.z1 = self.z0-d1 # Depth to layer
self.h = h # Thickness of layer
self.z2 = self.z0-d2 # Depth to center of sphere
self.R = R # Radius of sphere
self.sig0 = sig0 # Air conductivity
self.sigb = sigb # Background conductivity
self.sig1 = sig1 # Layer conductivity
self.sig2 = sig2 # Sphere conductivity
active = self.mesh.gridCC[:, 2] < self.z0
ind1 = (
(self.mesh.gridCC[:, 2] < self.z1) & (self.mesh.gridCC[:, 2] >= self.z1-self.h)
)
ind2 = np.sqrt((self.mesh.gridCC[:, 0])**2 + (self.mesh.gridCC[:, 2]-self.z2)**2) <= self.R
self.mapping = (
Maps.InjectActiveCells(self.mesh, active, sig0, nC=self.mesh.nC)
)
model = np.ones(self.mesh.nC) * sigb
model[ind1] = sig1
model[ind2] = sig2
self.m = model[active]
self.mu = np.ones(self.mesh.nC)*mu_0
self.mu[self.mesh.gridCC[:, 2] < 0.] = (1.+chi)*mu_0
return self.m
def simulate(self, srcLoc, rxLoc, time, radius=1.):
bz = EM.TDEM.Rx.Point_b(rxLoc, time, orientation='z')
dbzdt = EM.TDEM.Rx.Point_dbdt(rxLoc, time, orientation='z')
src = EM.TDEM.Src.CircularLoop([bz],
waveform=EM.TDEM.Src.StepOffWaveform(),
loc=srcLoc, radius=radius)
self.srcList = [src]
prb = EM.TDEM.Problem3D_b(self.mesh, sigmaMap=self.mapping)
prb.timeSteps = [
(1e-06, 10), (5e-06, 10), (1e-05, 10),
(5e-5, 10), (1e-4, 10), (5e-4, 10),
(1e-3, 10)
]
survey = EM.TDEM.Survey(self.srcList)
prb.pair(survey)
self.f = prb.fields(self.m)
self.prb = prb
dpred = survey.dpred(self.m, f=self.f)
return dpred
@property
def Pfx(self):
if getattr(self, '_Pfx', None) is None:
self._Pfx = self.mesh.getInterpolationMat(
self.mesh.gridCC[self.activeCC, :], locType="Fx"
)
return self._Pfx
@property
def Pfz(self):
if getattr(self, '_Pfz', None) is None:
self._Pfz = self.mesh.getInterpolationMat(
self.mesh.gridCC[self.activeCC, :], locType="Fz"
)
return self._Pfz
def getFields(self, itime):
src = self.srcList[0]
Ey = self.mesh.aveE2CC*self.f[src, "e", itime]
Jy = Utils.sdiag(self.prb.sigma) * Ey
self.Ey = Utils.mkvc(
self.mirrorArray(Ey[self.activeCC], direction="y")
)
self.Jy = Utils.mkvc(
self.mirrorArray(Jy[self.activeCC], direction="y")
)
self.Bx = Utils.mkvc(
self.mirrorArray(self.Pfx*self.f[src, "b", itime], direction="x")
)
self.Bz = Utils.mkvc(
self.mirrorArray(self.Pfz*self.f[src, "b", itime], direction="z")
)
self.dBxdt = Utils.mkvc(self.mirrorArray(
-self.Pfx*self.mesh.edgeCurl*self.f[src, "e", itime], direction="x")
)
self.dBzdt = Utils.mkvc(self.mirrorArray(
-self.Pfz*self.mesh.edgeCurl*self.f[src, "e", itime], direction="z")
)
def getData(self):
src = self.srcList[0]
Pfx = self.mesh.getInterpolationMat(self.rxLoc, locType="Fx")
Pfz = self.mesh.getInterpolationMat(self.rxLoc, locType="Fz")
Pey = self.mesh.getInterpolationMat(self.rxLoc, locType="Ey")
self.Ey = (Pey*self.f[src, "e", :]).flatten()
self.Bx = (Pfx*self.f[src, "b", :]).flatten()
self.Bz = (Pfz*self.f[src, "b", :]).flatten()
self.dBxdt = (-Pfx*self.mesh.edgeCurl*self.f[src, "e", :]).flatten()
self.dBzdt = (-Pfz*self.mesh.edgeCurl*self.f[src, "e", :]).flatten()
def plotField(
self, Field='B', view="vec", scale="linear", itime=0, Geometry=True, Scenario=None,
Fixed=False, vmin=None, vmax=None
):
# Printout for null cases
if (Field == "B") & (view == "y"):
print("Think about the problem geometry. There is NO By in this case.")
elif (Field == "dBdt") & (view == "y"):
print("Think about the problem geometry. There is NO dBy/dt in this case.")
elif (Field == "E") & (view == "x") | (Field == "E") & (view == "z"):
print("Think about the problem geometry. There is NO Ex or Ez in this case. Only Ey.")
elif (Field == "J") & (view == "x") | (Field == "J") & (view == "z"):
print("Think about the problem geometry. There is NO Jx or Jz in this case. Only Jy.")
elif (Field == "E") & (view == "vec"):
print("Think about the problem geometry. E only has components along y. Vector plot not possible")
elif (Field == "J") & (view == "vec"):
print("Think about the problem geometry. J only has components along y. Vector plot not possible")
elif Field == "Model":
fig = plt.figure(figsize=(7, 6))
ax = plt.subplot(111)
if Scenario == 'Sphere':
model2D, mapping2D = self.getCoreModel('Sphere')
elif Scenario == 'Layer':
model2D, mapping2D = self.getCoreModel('Layer')
if Fixed:
clim=(np.log10(vmin), np.log10(vmax))
else:
clim=None
out = self.mesh2D.plotImage(np.log10(mapping2D * model2D), ax=ax, clim=clim)
cb = plt.colorbar(
out[0], ax=ax, format="$10^{%.1f}$"
)
cb.set_label("$\sigma$ (S/m)")
ax.set_xlabel("Distance (m)")
ax.set_ylabel("Depth (m)")
ax.set_title("Conductivity Model")
plt.show()
else:
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(111)
vec = False
if view == "vec":
tname = "Vector "
title = tname+Field+"-field"
elif view == "amp":
tname = "|"
title = tname+Field+"|-field"
else:
title = Field + view+"-field"
if Field == "B":
label = "Magnetic field (T)"
if view == "vec":
vec = True
val = np.c_[self.Bx, self.Bz]
elif view == "x":
val = self.Bx
elif view == "z":
val = self.Bz
else:
return
elif Field == "dBdt":
label = "Time derivative of magnetic field (T/s)"
if view == "vec":
vec = True
val = np.c_[self.dBxdt, self.dBzdt]
elif view == "x":
val = self.dBxdt
elif view == "z":
val = self.dBzdt
else:
return
elif Field == "E":
label = "Electric field (V/m)"
if view == "y":
val = self.Ey
else:
return
elif Field == "J":
label = "Current density (A/m$^2$)"
if view == "y":
val = self.Jy
else:
return
if Fixed:
if scale == "log":
vmin, vmax = (np.log10(vmin), np.log10(vmax))
out=ax.scatter(np.zeros(3)-1000, np.zeros(3), c=np.linspace(vmin, vmax, 3))
Utils.plot2Ddata(
self.mesh2D.gridCC, val, vec=vec, ax=ax,
contourOpts={"cmap": "viridis", "vmin":vmin, "vmax":vmax},
ncontour=200, scale=scale
)
else:
out = Utils.plot2Ddata(
self.mesh2D.gridCC, val, vec=vec, ax=ax,
contourOpts={"cmap": "viridis"},
ncontour=200, scale=scale
)[0]
if scale == "linear":
cb = plt.colorbar(
out, ax=ax,
format="%.2e"
)
elif scale == "log":
cb = plt.colorbar(
out, ax=ax,
format="$10^{%.1f}$"
)
else:
raise Exception("We consdier only linear and log scale!")
cb.set_label(label)
xmax = self.mesh2D.gridCC[:, 0].max()
if Geometry:
if Scenario is 'Layer':
ax.plot(
np.r_[-xmax, xmax], np.ones(2)*self.srcLoc[2], 'w-', lw=1
)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*self.z0, 'w--', lw=1)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*self.z1, 'w--', lw=1)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*self.z2, 'w--', lw=1)
ax.plot(0, self.srcLoc[2], 'ko', ms=4)
ax.plot(self.rxLoc[0, 0], self.srcLoc[2], 'ro', ms=4)
elif Scenario is 'Sphere':
ax.plot(
np.r_[-xmax, xmax], np.ones(2)*self.srcLoc[2], 'k-', lw=1
)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*self.z0, 'w--', lw=1)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*self.z1, 'w--', lw=1)
ax.plot(np.r_[-xmax, xmax], np.ones(2)*(self.z1-self.h), 'w--', lw=1)
Phi = np.linspace(0, 2*np.pi, 41)
ax.plot(self.R*np.cos(Phi), self.z2+self.R*np.sin(Phi), 'w--', lw=1)
ax.plot(0, self.srcLoc[2], 'ko', ms=4)
ax.plot(self.rxLoc[0, 0], self.srcLoc[2], 'ro', ms=4)
ax.set_xlabel("Distance (m)")
ax.set_ylabel("Depth (m)")
title = title + "\nt = " + '{:.2e}'.format(self.prb.times[itime]*1e3) + " ms"
ax.set_title(title)
ax.set_xlim(-190, 190)
ax.set_ylim(-190, 190)
plt.show()
######################################################
# LAYER WIDGET
######################################################
def InteractivePlane_Layer(
self, scale="log", fieldvalue="E", compvalue="y"
):
def foo(
Update, Field, AmpDir, Component,
Sigma0, Sigma1, Sigma2, Sigma3,
Sus, h1, h2, Scale,
rxOffset, z, radius, itime, Geometry=True, Fixed=False, vmin=None, vmax=None
):
if AmpDir == "Direction (B or dBdt)":
Component = "vec"
m = self.setThreeLayerParam(
h1=h1, h2=h2, sig0=Sigma0, sig1=Sigma1, sig2=Sigma2,
sig3=Sigma3, chi=Sus
)
self.srcLoc = np.array([0., 0., z])
self.rxLoc = np.array([[rxOffset, 0., z]])
self.radius = radius
if Update:
dpred = self.simulate(
self.srcLoc, self.rxLoc, self.time, self.radius
)
self.getFields(itime)
return self.plotField(
Field=Field, view=Component, scale=Scale,
Geometry=Geometry, itime=itime, Scenario='Layer', Fixed=Fixed, vmin=vmin, vmax=vmax
)
out = widgetify(
foo,
Update=widgets.widget_bool.Checkbox(value=True, description="Update"),
Field=widgets.ToggleButtons(
options=["E", "B", "dBdt", "J", "Model"], value=fieldvalue
),
AmpDir=widgets.ToggleButtons(
options=['None', 'Direction (B or dBdt)'], value="None"
),
Component=widgets.ToggleButtons(
options=['x', 'y', 'z'], value=compvalue,
description='Comp.'
),
Sigma0=widgets.FloatText(
value=1e-8, continuous_update=False,
description='$\sigma_0$ (S/m)'
),
Sigma1=widgets.FloatText(
value=0.01, continuous_update=False,
description='$\sigma_1$ (S/m)'
),
Sigma2=widgets.FloatText(
value=0.01, continuous_update=False,
description='$\sigma_2$ (S/m)'
),
Sigma3=widgets.FloatText(
value=0.01, continuous_update=False,
description='$\sigma_3$ (S/m)'
),
Sus=widgets.FloatText(
value=0., continuous_update=False,
description='$\chi$'
),
h1=widgets.FloatSlider(
min=2., max=50., step=2., value=20.,
continuous_update=False, description='$h_1$ (m)'
),
h2=widgets.FloatSlider(
min=2., max=50., step=2., value=20.,
continuous_update=False, description='$h_2$ (m)'
),
Scale=widgets.ToggleButtons(
options=['log', 'linear'], value="linear"
),
rxOffset=widgets.FloatSlider(
min=0., max=50., step=2., value=10., continuous_update=False,
description='$\Delta x$(m)'
),
z=widgets.FloatSlider(
min=0., max=50., step=2., value=0., continuous_update=False,
description='$\Delta z$ (m)'
),
itime=widgets.IntSlider(
min=1, max=70, step=1, value=1,
continuous_update=False, description='Time index'
),
radius=widgets.FloatSlider(
min=2., max=50., step=2., value=2., continuous_update=False,
description='Tx radius (m)'
),
Fixed=widgets.widget_bool.Checkbox(value=False, description="Fixed"),
vmin=FloatText(value=None, description='vmin'),
vmax=FloatText(value=None, description='vmax')
)
return out
def InteractiveData_Layer(self, fieldvalue="B", compvalue="z"):
def foo(Field, Component, Scale):
if (Field == "B") & (Component == "y") | (Field == "dBdt") & (Component == "y"):
print("Think about the problem geometry. There is NO By in this case.")
elif (Field == "E") & (Component == "x") | (Field == "E") & (Component == "z"):
print("Think about the problem geometry. There is NO Ex or Ez in this case. Only Ey.")
else:
fig = plt.figure()
ax = plt.subplot(111)
bType = "b"
self.getData()
if Field == "B":
label = "Magnetic field (T)"
if Component == "x":
title = "Bx"
val = self.Bx
elif Component == "z":
title = "Bz"
val = self.Bz
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO By in this case.")
elif Field == "dBdt":
label = "Time dervative of magnetic field (T/s)"
if Component == "x":
title = "dBx/dt"
val = self.dBxdt
elif Component == "z":
title = "dBz/dt"
val = self.dBzdt
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO dBy/dt in this case.")
elif Field == "E":
label = "Electric field (V/m)"
title = "Ey"
if Component == "y":
val = self.Ey
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO Ex or Ez in this case.")
elif Field == "J":
print("The conductivity at the location is 0. Therefore there is no electrical current here.")
if Scale == "log":
val_p, val_n = DisPosNegvalues(val)
ax.plot(self.prb.times[10:]*1e3, val_p[10:], 'k-')
ax.plot(self.prb.times[10:]*1e3, val_n[10:], 'k--')
ax.legend(("(+)", "(-)"), loc=1, fontsize=10)
else:
ax.plot(self.prb.times[10:]*1e3, val[10:], 'k.-')
ax.set_xscale("log")
ax.set_yscale(Scale)
ax.set_xlabel("Time (ms)")
ax.set_ylabel(label)
ax.set_title(title)
ax.grid(True)
plt.show()
out = widgetify(
foo,
Field=widgets.ToggleButtons(
options=["E", "B", "dBdt"], value=fieldvalue
),
Component=widgets.ToggleButtons(
options=['x', 'y', 'z'], value=compvalue,
description='Comp.'
),
Scale=widgets.ToggleButtons(
options=['log', 'linear'], value="log"
)
)
return out
######################################################
# SPHERE WIDGET
######################################################
def InteractivePlane_Sphere(
self, scale="log", fieldvalue="E", compvalue="y"
):
def foo(
Update, Field, AmpDir, Component,
Sigma0, Sigmab, Sigma1, Sigma2,
Sus, d1, h, d2, R, Scale,
rxOffset, z, radius, itime, Geometry=True, Fixed=False, vmin=None, vmax=None
):
if AmpDir == "Direction (B or dBdt)":
Component = "vec"
m = self.setLayerSphereParam(
d1=d1, h=h, d2=d2, R=R, sig0=Sigma0, sigb=Sigmab, sig1=Sigma1, sig2=Sigma2, chi=Sus
)
self.srcLoc = np.array([0., 0., z])
self.rxLoc = np.array([[rxOffset, 0., z]])
self.radius = radius
if Update:
dpred = self.simulate(
self.srcLoc, self.rxLoc, self.time, self.radius
)
self.getFields(itime)
return self.plotField(
Field=Field, view=Component, scale=Scale,
Geometry=Geometry, itime=itime, Scenario='Sphere', Fixed=Fixed, vmin=vmin, vmax=vmax
)
out = widgetify(
foo,
Update=widgets.widget_bool.Checkbox(value=True, description="Update"),
Field=widgets.ToggleButtons(
options=["E", "B", "dBdt", "J", "Model"], value=fieldvalue
),
AmpDir=widgets.ToggleButtons(
options=['None', 'Direction (B or dBdt)'], value="None"
),
Component=widgets.ToggleButtons(
options=['x', 'y', 'z'], value=compvalue,
description='Comp.'
),
Sigma0=widgets.FloatText(
value=1e-8, continuous_update=False,
description='$\sigma_0$ (S/m)'
),
Sigmab=widgets.FloatText(
value=0.01, continuous_update=False,
description='$\sigma_b$ (S/m)'
),
Sigma1=widgets.FloatText(
value=0.01, continuous_update=False,
description='$\sigma_1$ (S/m)'
),
Sigma2=widgets.FloatText(
value=1., continuous_update=False,
description='$\sigma_2$ (S/m)'
),
Sus=widgets.FloatText(
value=0., continuous_update=False,
description='$\chi$'
),
d1=widgets.FloatSlider(
min=0., max=50., step=2., value=0., continuous_update=False,
description='$d_1$ (m)'
),
h=widgets.FloatSlider(
min=2., max=40., step=2., value=20., continuous_update=False,
description='$h$ (m)'
),
d2=widgets.FloatSlider(
min=20., max=80., step=2., value=60., continuous_update=False,
description='$d_2$ (m)'
),
R=widgets.FloatSlider(
min=2., max=40., step=2., value=30., continuous_update=False,
description='$R$ (m)'
),
Scale=widgets.ToggleButtons(
options=['log', 'linear'], value="linear"
),
rxOffset=widgets.FloatSlider(
min=0., max=50., step=2., value=10., continuous_update=False,
description='$\Delta x$(m)'
),
z=widgets.FloatSlider(
min=0., max=50., step=2., value=0., continuous_update=False,
description='$\Delta z$ (m)'
),
radius=widgets.FloatSlider(
min=2., max=50., step=2., value=2., continuous_update=False,
description='Tx radius (m)'
),
itime=widgets.IntSlider(
min=1, max=70, step=1, value=1,
continuous_update=False, description='Time index'
),
Fixed=widgets.widget_bool.Checkbox(value=False, description="Fixed"),
vmin=FloatText(value=None, description='vmin'),
vmax=FloatText(value=None, description='vmax')
)
return out
def InteractiveData_Sphere(self, fieldvalue="B", compvalue="z"):
def foo(Field, Component, Scale):
if (Field == "B") & (Component == "y") | (Field == "dBdt") & (Component == "y"):
print("Think about the problem geometry. There is NO By in this case.")
elif (Field == "E") & (Component == "x") | (Field == "E") & (Component == "z"):
print("Think about the problem geometry. There is NO Ex or Ez in this case. Only Ey.")
else:
fig = plt.figure()
ax = plt.subplot(111)
bType = "b"
self.getData()
if Field == "B":
label = "Magnetic field (T)"
if Component == "x":
title = "Bx"
val = self.Bx
elif Component == "z":
title = "Bz"
val = self.Bz
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO By in this case.")
elif Field == "dBdt":
label = "Time dervative of magnetic field (T/s)"
if Component == "x":
title = "dBx/dt"
val = self.dBxdt
elif Component == "z":
title = "dBz/dt"
val = self.dBzdt
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO dBy/dt in this case.")
elif Field == "E":
label = "Electric field (V/m)"
title = "Ey"
if Component == "y":
val = self.Ey
else:
# ax.imshow(self.im)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
print("Think about the problem geometry. There is NO Ex or Ez in this case.")
elif Field == "J":
print("The conductivity at the location is 0. Therefore there is no electrical current here.")
if Scale == "log":
val_p, val_n = DisPosNegvalues(val)
ax.plot(self.prb.times[10:]*1e3, val_p[10:], 'k-')
ax.plot(self.prb.times[10:]*1e3, val_n[10:], 'k--')
ax.legend(("(+)", "(-)"), loc=1, fontsize=10)
else:
ax.plot(self.prb.times[10:]*1e3, val[10:], 'k.-')
ax.set_xscale("log")
ax.set_yscale(Scale)
ax.set_xlabel("Time (ms)")
ax.set_ylabel(label)
ax.set_title(title)
ax.grid(True)
plt.show()
out = widgetify(
foo,
Field=widgets.ToggleButtons(
options=["E", "B", "dBdt"], value=fieldvalue
),
Component=widgets.ToggleButtons(
options=['x', 'y', 'z'], value=compvalue,
description='Comp.'
),
Scale=widgets.ToggleButtons(
options=['log', 'linear'], value="log"
)
)
return out
|
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library file which executes the PPO training."""
import functools
from typing import Any, Callable, Tuple, List
from absl import logging
import flax
from flax import linen as nn
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import train_state
import jax
import jax.random
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
import agent
import models
import test_episodes
@jax.jit
@functools.partial(jax.vmap, in_axes=(1, 1, 1, None, None), out_axes=1)
def gae_advantages(
rewards: np.ndarray,
terminal_masks: np.ndarray,
values: np.ndarray,
discount: float,
gae_param: float):
"""Use Generalized Advantage Estimation (GAE) to compute advantages.
As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses
key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}.
Args:
rewards: array shaped (actor_steps, num_agents), rewards from the game
terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal
and ones for non-terminal states
values: array shaped (actor_steps, num_agents), values estimated by critic
discount: RL discount usually denoted with gamma
gae_param: GAE parameter usually denoted with lambda
Returns:
advantages: calculated advantages shaped (actor_steps, num_agents)
"""
assert rewards.shape[0] + 1 == values.shape[0], ('One more value needed; Eq. '
'(12) in PPO paper requires '
'V(s_{t+1}) for delta_t')
advantages = []
gae = 0.
for t in reversed(range(len(rewards))):
# Masks used to set next state value to 0 for terminal states.
value_diff = discount * values[t + 1] * terminal_masks[t] - values[t]
delta = rewards[t] + value_diff
# Masks[t] used to ensure that values before and after a terminal state
# are independent of each other.
gae = delta + discount * gae_param * terminal_masks[t] * gae
advantages.append(gae)
advantages = advantages[::-1]
return jnp.array(advantages)
def loss_fn(
params: flax.core.FrozenDict,
apply_fn: Callable[..., Any],
minibatch: Tuple,
clip_param: float,
vf_coeff: float,
entropy_coeff: float):
"""Evaluate the loss function.
Compute loss as a sum of three components: the negative of the PPO clipped
surrogate objective, the value function loss and the negative of the entropy
bonus.
Args:
params: the parameters of the actor-critic model
apply_fn: the actor-critic model's apply function
minibatch: Tuple of five elements forming one experience batch:
states: shape (batch_size, 84, 84, 4)
actions: shape (batch_size, 84, 84, 4)
old_log_probs: shape (batch_size,)
returns: shape (batch_size,)
advantages: shape (batch_size,)
clip_param: the PPO clipping parameter used to clamp ratios in loss function
vf_coeff: weighs value function loss in total loss
entropy_coeff: weighs entropy bonus in the total loss
Returns:
loss: the PPO loss, scalar quantity
"""
states, actions, old_log_probs, returns, advantages = minibatch
log_probs, values = agent.policy_action(apply_fn, params, states)
values = values[:, 0] # Convert shapes: (batch, 1) to (batch, ).
probs = jnp.exp(log_probs)
value_loss = jnp.mean(jnp.square(returns - values), axis=0)
entropy = jnp.sum(-probs*log_probs, axis=1).mean()
log_probs_act_taken = jax.vmap(lambda lp, a: lp[a])(log_probs, actions)
ratios = jnp.exp(log_probs_act_taken - old_log_probs)
# Advantage normalization (following the OpenAI baselines).
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
pg_loss = ratios * advantages
clipped_loss = advantages * jax.lax.clamp(1. - clip_param, ratios,
1. + clip_param)
ppo_loss = -jnp.mean(jnp.minimum(pg_loss, clipped_loss), axis=0)
return ppo_loss + vf_coeff*value_loss - entropy_coeff*entropy
@functools.partial(jax.jit, static_argnums=(2,))
def train_step(
state: train_state.TrainState,
trajectories: Tuple,
batch_size: int,
*,
clip_param: float,
vf_coeff: float,
entropy_coeff: float):
"""Compilable train step.
Runs an entire epoch of training (i.e. the loop over minibatches within
an epoch is included here for performance reasons).
Args:
state: the train state
trajectories: Tuple of the following five elements forming the experience:
states: shape (steps_per_agent*num_agents, 84, 84, 4)
actions: shape (steps_per_agent*num_agents, 84, 84, 4)
old_log_probs: shape (steps_per_agent*num_agents, )
returns: shape (steps_per_agent*num_agents, )
advantages: (steps_per_agent*num_agents, )
batch_size: the minibatch size, static argument
clip_param: the PPO clipping parameter used to clamp ratios in loss function
vf_coeff: weighs value function loss in total loss
entropy_coeff: weighs entropy bonus in the total loss
Returns:
optimizer: new optimizer after the parameters update
loss: loss summed over training steps
"""
iterations = trajectories[0].shape[0] // batch_size
trajectories = jax.tree_map(
lambda x: x.reshape((iterations, batch_size) + x.shape[1:]), trajectories)
loss = 0.
for batch in zip(*trajectories):
grad_fn = jax.value_and_grad(loss_fn)
l, grads = grad_fn(state.params, state.apply_fn, batch, clip_param, vf_coeff,
entropy_coeff)
loss += l
state = state.apply_gradients(grads=grads)
return state, loss
def get_experience(
state: train_state.TrainState,
simulators: List[agent.RemoteSimulator],
steps_per_actor: int):
"""Collect experience from agents.
Runs `steps_per_actor` time steps of the game for each of the `simulators`.
"""
all_experience = []
# Range up to steps_per_actor + 1 to get one more value needed for GAE.
for _ in range(steps_per_actor + 1):
sim_states = []
for sim in simulators:
sim_state = sim.conn.recv()
sim_states.append(sim_state)
sim_states = np.concatenate(sim_states, axis=0)
log_probs, values = agent.policy_action(state.apply_fn, state.params, sim_states)
log_probs, values = jax.device_get((log_probs, values))
probs = np.exp(np.array(log_probs))
for i, sim in enumerate(simulators):
probabilities = probs[i]
action = np.random.choice(probs.shape[1], p=probabilities)
sim.conn.send(action)
experiences = []
for i, sim in enumerate(simulators):
sim_state, action, reward, done = sim.conn.recv()
value = values[i, 0]
log_prob = log_probs[i][action]
sample = agent.ExpTuple(sim_state, action, reward, value, log_prob, done)
experiences.append(sample)
all_experience.append(experiences)
return all_experience
def process_experience(
experience: List[List[agent.ExpTuple]],
actor_steps: int,
num_agents: int,
gamma: float,
lambda_: float):
"""Process experience for training, including advantage estimation.
Args:
experience: collected from agents in the form of nested lists/namedtuple
actor_steps: number of steps each agent has completed
num_agents: number of agents that collected experience
gamma: dicount parameter
lambda_: GAE parameter
Returns:
trajectories: trajectories readily accessible for `train_step()` function
"""
obs_shape = (84, 84, 4)
exp_dims = (actor_steps, num_agents)
values_dims = (actor_steps + 1, num_agents)
states = np.zeros(exp_dims + obs_shape, dtype=np.float32)
actions = np.zeros(exp_dims, dtype=np.int32)
rewards = np.zeros(exp_dims, dtype=np.float32)
values = np.zeros(values_dims, dtype=np.float32)
log_probs = np.zeros(exp_dims, dtype=np.float32)
dones = np.zeros(exp_dims, dtype=np.float32)
for t in range(len(experience) - 1): # experience[-1] only for next_values
for agent_id, exp_agent in enumerate(experience[t]):
states[t, agent_id, ...] = exp_agent.state
actions[t, agent_id] = exp_agent.action
rewards[t, agent_id] = exp_agent.reward
values[t, agent_id] = exp_agent.value
log_probs[t, agent_id] = exp_agent.log_prob
# Dones need to be 0 for terminal states.
dones[t, agent_id] = float(not exp_agent.done)
for a in range(num_agents):
values[-1, a] = experience[-1][a].value
advantages = gae_advantages(rewards, dones, values, gamma, lambda_)
returns = advantages + values[:-1, :]
# After preprocessing, concatenate data from all agents.
trajectories = (states, actions, log_probs, returns, advantages)
trajectory_len = num_agents * actor_steps
trajectories = tuple(map(
lambda x: np.reshape(x, (trajectory_len,) + x.shape[2:]), trajectories))
return trajectories
@functools.partial(jax.jit, static_argnums=1)
def get_initial_params(key: np.ndarray, model: nn.Module):
input_dims = (1, 84, 84, 4) # (minibatch, height, width, stacked frames)
init_shape = jnp.ones(input_dims, jnp.float32)
initial_params = model.init(key, init_shape)['params']
return initial_params
def create_train_state(params, model: nn.Module,
config: ml_collections.ConfigDict, train_steps: int) -> train_state.TrainState:
if config.decaying_lr_and_clip_param:
lr = optax.linear_schedule(
init_value=config.learning_rate, end_value=0.,
transition_steps=train_steps)
else:
lr = config.learning_rate
tx = optax.adam(lr)
state = train_state.TrainState.create(
apply_fn=model.apply,
params=params,
tx=tx)
return state
def train(
model: models.ActorCritic,
config: ml_collections.ConfigDict,
model_dir: str):
"""Main training loop.
Args:
model: the actor-critic model
config: object holding hyperparameters and the training information
model_dir: path to dictionary where checkpoints and logging info are stored
Returns:
optimizer: the trained optimizer
"""
game = config.game + 'NoFrameskip-v4'
simulators = [agent.RemoteSimulator(game)
for _ in range(config.num_agents)]
summary_writer = tensorboard.SummaryWriter(model_dir)
summary_writer.hparams(dict(config))
loop_steps = config.total_frames // (config.num_agents * config.actor_steps)
log_frequency = 40
checkpoint_frequency = 500
# train_step does multiple steps per call for better performance
# compute number of steps per call here to convert between the number of
# train steps and the inner number of optimizer steps
iterations_per_step = (config.num_agents * config.actor_steps
// config.batch_size)
initial_params = get_initial_params(jax.random.PRNGKey(0), model)
state = create_train_state(initial_params, model, config,
loop_steps * config.num_epochs * iterations_per_step)
del initial_params
state = checkpoints.restore_checkpoint(model_dir, state)
# number of train iterations done by each train_step
start_step = int(state.step) // config.num_epochs // iterations_per_step
logging.info('Start training from step: %s', start_step)
for step in range(start_step, loop_steps):
# Bookkeeping and testing.
if step % log_frequency == 0:
score = test_episodes.policy_test(1, state.apply_fn, state.params, game)
frames = step * config.num_agents * config.actor_steps
summary_writer.scalar('game_score', score, frames)
logging.info('Step %s:\nframes seen %s\nscore %s\n\n', step, frames, score)
# Core training code.
alpha = 1. - step / loop_steps if config.decaying_lr_and_clip_param else 1.
all_experiences = get_experience(
state, simulators, config.actor_steps)
trajectories = process_experience(
all_experiences, config.actor_steps, config.num_agents, config.gamma,
config.lambda_)
clip_param = config.clip_param * alpha
for _ in range(config.num_epochs):
permutation = np.random.permutation(
config.num_agents * config.actor_steps)
trajectories = tuple(x[permutation] for x in trajectories)
state, _ = train_step(
state, trajectories, config.batch_size,
clip_param=clip_param,
vf_coeff=config.vf_coeff,
entropy_coeff=config.entropy_coeff)
if (step + 1) % checkpoint_frequency == 0:
checkpoints.save_checkpoint(model_dir, state, step + 1)
return train_state
|
|
"""
Built-in pipeline
"""
import time
import logging
import logging.config
import shutil
import os
import matplotlib
matplotlib.use('Agg')
# supress PCA unpickle userwarning
# Cat: TODO: this is dangersous, may wish to fix the problem in cluster.py
# import warnings
# warnings.filterwarnings("ignore", category=UserWarning)
try:
# py3
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import numpy as np
import yaml
import yass
from yass import set_config
from yass import read_config
from yass import (preprocess, detect, cluster, postprocess,
deconvolve, residual, merge, rf, visual)
from yass.template import update_templates
from yass.util import (load_yaml, save_metadata, load_logging_config_file,
human_readable_time)
def run(config, logger_level='INFO', clean=False, output_dir='tmp/',
complete=False, calculate_rf=False, visualize=False, set_zero_seed=False):
"""Run YASS built-in pipeline
Parameters
----------
config: str or mapping (such as dictionary)
Path to YASS configuration file or mapping object
logger_level: str
Logger level
clean: bool, optional
Delete CONFIG.data.root_folder/output_dir/ before running
output_dir: str, optional
Output directory (if relative, it makes it relative to
CONFIG.data.root_folder) to store the output data, defaults to tmp/.
If absolute, it leaves it as it is.
complete: bool, optional
Generates extra files (needed to generate phy files)
Notes
-----
Running the preprocessor will generate the followiing files in
CONFIG.data.root_folder/output_directory/:
* ``config.yaml`` - Copy of the configuration file
* ``metadata.yaml`` - Experiment metadata
* ``filtered.bin`` - Filtered recordings (from preprocess)
* ``filtered.yaml`` - Filtered recordings metadata (from preprocess)
* ``standardized.bin`` - Standarized recordings (from preprocess)
* ``standardized.yaml`` - Standarized recordings metadata (from preprocess)
* ``whitening.npy`` - Whitening filter (from preprocess)
Returns
-------
numpy.ndarray
Spike train
"""
# load yass configuration parameters
set_config(config, output_dir)
CONFIG = read_config()
TMP_FOLDER = CONFIG.path_to_output_directory
# remove tmp folder if needed
if os.path.exists(TMP_FOLDER) and clean:
shutil.rmtree(TMP_FOLDER)
# create TMP_FOLDER if needed
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
# load logging config file
logging_config = load_logging_config_file()
logging_config['handlers']['file']['filename'] = os.path.join(
TMP_FOLDER,'yass.log')
logging_config['root']['level'] = logger_level
# configure logging
logging.config.dictConfig(logging_config)
# instantiate logger
logger = logging.getLogger(__name__)
# print yass version
logger.info('YASS version: %s', yass.__version__)
''' **********************************************
******** SET ENVIRONMENT VARIABLES ***********
**********************************************
'''
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["GIO_EXTRA_MODULES"] = "/usr/lib/x86_64-linux-gnu/gio/modules/"
''' **********************************************
************** PREPROCESS ********************
**********************************************
'''
# preprocess
start = time.time()
(standardized_path,
standardized_params) = preprocess.run(
os.path.join(TMP_FOLDER, 'preprocess'))
#### Block 1: Detection, Clustering, Postprocess
(fname_templates,
fname_spike_train) = initial_block(
os.path.join(TMP_FOLDER, 'block_1'),
standardized_path,
standardized_params,
run_chunk_sec = [0, CONFIG.rec_len])
print (" inpput to block2: ", fname_templates)
#### Block 2: Deconv, Merge, Residuals, Clustering, Postprocess
n_iterations = 1
for it in range(n_iterations):
(fname_templates,
fname_spike_train) = iterative_block(
os.path.join(TMP_FOLDER, 'block_{}'.format(it+2)),
standardized_path,
standardized_params,
fname_templates,
run_chunk_sec = [0, CONFIG.rec_len])
### Block 3: Deconvolve, Residual, Merge
(fname_templates,
fname_spike_train,
fname_templates_up,
fname_spike_train_up,
fname_residual,
residual_dtype)= final_deconv(
os.path.join(TMP_FOLDER, 'final_deconv'),
standardized_path,
standardized_params,
fname_templates)
## save the final templates and spike train
fname_templates_final = os.path.join(
TMP_FOLDER, 'templates.npy')
fname_spike_train_final = os.path.join(
TMP_FOLDER, 'spike_train.npy')
# tranpose axes
templates = np.load(fname_templates).transpose(1,2,0)
# align spike time to the beginning
spike_train = np.load(fname_spike_train)
spike_train[:,0] -= CONFIG.spike_size//2
np.save(fname_templates_final, templates)
np.save(fname_spike_train_final, spike_train)
total_time = time.time() - start
''' **********************************************
************** RF / VISUALIZE ****************
**********************************************
'''
if calculate_rf:
rf.run()
if visualize:
visual.run()
logger.info('Finished YASS execution. Total time: {}'.format(
human_readable_time(total_time)))
logger.info('Final Templates Location: '+fname_templates_final)
logger.info('Final Spike Train Location: '+fname_spike_train_final)
def initial_block(TMP_FOLDER,
standardized_path,
standardized_params,
run_chunk_sec):
logger = logging.getLogger(__name__)
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
''' **********************************************
************** DETECT EVENTS *****************
**********************************************
'''
# detect
logger.info('INITIAL DETECTION')
spike_index_path = detect.run(
standardized_path,
standardized_params,
os.path.join(TMP_FOLDER, 'detect'),
run_chunk_sec=run_chunk_sec)
logger.info('INITIAL CLUSTERING')
# cluster
raw_data = True
full_run = True
fname_templates, fname_spike_train = cluster.run(
spike_index_path,
standardized_path,
standardized_params['dtype'],
os.path.join(TMP_FOLDER, 'cluster'),
raw_data,
full_run)
methods = ['duplicate', 'high_mad', 'collision']
fname_templates, fname_spike_train = postprocess.run(
methods,
fname_templates,
fname_spike_train,
os.path.join(TMP_FOLDER,
'cluster_post_process'),
standardized_path,
standardized_params['dtype'])
return fname_templates, fname_spike_train
def iterative_block(TMP_FOLDER,
standardized_path,
standardized_params,
fname_templates,
run_chunk_sec):
logger = logging.getLogger(__name__)
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
# run deconvolution
logger.info('DECONV')
(fname_templates,
fname_spike_train,
fname_templates_up,
fname_spike_train_up,
fname_shifts) = deconvolve.run(
fname_templates,
os.path.join(TMP_FOLDER,
'deconv'),
standardized_path,
standardized_params['dtype'],
run_chunk_sec=run_chunk_sec)
# compute residual
logger.info('RESIDUAL COMPUTATION')
fname_residual, residual_dtype = residual.run(
fname_shifts,
fname_templates_up,
fname_spike_train_up,
os.path.join(TMP_FOLDER,
'residual'),
standardized_path,
standardized_params['dtype'],
dtype_out='float32',
run_chunk_sec=run_chunk_sec)
logger.info('BLOCK1 MERGE')
fname_templates_up, fname_spike_train_up = merge.run(
os.path.join(TMP_FOLDER,
'post_deconv_merge'),
False,
fname_spike_train,
fname_templates,
fname_spike_train_up,
fname_templates_up,
standardized_path,
standardized_params['dtype'],
fname_residual,
residual_dtype)
fname_templates = fname_templates_up
fname_spike_train = fname_spike_train_up
# cluster
logger.info('RECLUSTERING')
raw_data = False
full_run = True
fname_templates, fname_spike_train = cluster.run(
fname_spike_train,
standardized_path,
standardized_params['dtype'],
os.path.join(TMP_FOLDER, 'cluster'),
raw_data,
full_run,
fname_residual=fname_residual,
residual_dtype=residual_dtype,
fname_templates_up=fname_templates_up,
fname_spike_train_up=fname_spike_train_up)
methods = ['duplicate', 'high_mad', 'collision']
fname_templates, fname_spike_train = postprocess.run(
methods,
fname_templates,
fname_spike_train,
os.path.join(TMP_FOLDER,
'cluster_post_process'),
standardized_path,
standardized_params['dtype'])
return fname_templates, fname_spike_train
def final_deconv(TMP_FOLDER,
standardized_path,
standardized_params,
fname_templates):
logger = logging.getLogger(__name__)
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
''' **********************************************
************** DECONVOLUTION *****************
**********************************************
'''
# run deconvolution
logger.info('FINAL DECONV')
(fname_templates,
fname_spike_train,
fname_templates_up,
fname_spike_train_up,
fname_shifts) = deconvolve.run(
fname_templates,
os.path.join(TMP_FOLDER,
'deconv'),
standardized_path,
standardized_params['dtype'])
# compute residual
logger.info('RESIDUAL COMPUTATION')
fname_residual, residual_dtype = residual.run(
fname_shifts,
fname_templates_up,
fname_spike_train_up,
os.path.join(TMP_FOLDER,
'residual'),
standardized_path,
standardized_params['dtype'],
dtype_out='float32')
#logger.info('FINAL MERGE')
fname_templates, fname_spike_train = merge.run(
os.path.join(TMP_FOLDER,
'post_deconv_merge'),
False,
fname_spike_train,
fname_templates,
fname_spike_train_up,
fname_templates_up,
standardized_path,
standardized_params['dtype'],
fname_residual,
residual_dtype)
return (fname_templates, fname_spike_train, fname_templates_up,
fname_spike_train_up, fname_residual, residual_dtype)
|
|
import logging
import json
import dropbox
import time
import copy
import threading
import re
from StringIO import StringIO
from datetime import datetime, timedelta
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.generic import View, TemplateView
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseNotFound, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, HttpResponsePermanentRedirect
from django.views.generic.base import ContextMixin
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.core import serializers
from dropbox.rest import ErrorResponse
from django.core.cache import cache
from django.template import Template
from fastapp.utils import UnAuthorized, Connection, NoBasesFound, message, info, warn, channel_name_for_user, send_client
from fastapp.queue import generate_vhost_configuration
from fastapp.models import AuthProfile, Base, Apy, Setting, Executor, Process, Thread, Transaction
from fastapp.models import RUNNING, FINISHED
from fastapp import responses
from fastapp.executors.remote import call_rpc_client
from fastapp.importer import _handle_settings, _read_config
from fastapp.plugins import PluginRegistry
User = get_user_model()
logger = logging.getLogger(__name__)
use_plans = False
try:
from plans.quota import get_user_quota
except ImportError:
use_plans = False
class CockpitView(TemplateView):
def get_context_data(self, **kwargs):
context = super(CockpitView, self).get_context_data(**kwargs)
qs = Executor.objects.all().order_by('base__name')
if not self.request.user.is_superuser:
qs = qs.filter(base__user=self.request.user)
context['executors'] = qs.order_by('base__name')
context['process_list'] = Process.objects.all().order_by('-running')
context['threads'] = Thread.objects.all().order_by('parent__name', 'name')
context['plugins'] = PluginRegistry().get()
return context
def dispatch(self, *args, **kwargs):
if not self.request.user.is_superuser:
return HttpResponseNotFound()
return super(CockpitView, self).dispatch(*args, **kwargs)
class ResponseUnavailableViewMixing():
def verify(self, request, base_model):
if not base_model.state:
response = HttpResponse()
if "html" in request.META['HTTP_ACCEPT']:
response.content_type = "text/html"
response.content = "Content cannot be delivered"
response.status_code = 503
return response
else:
return None
class DjendMixin(object):
def connection(self, request):
logger.debug("Creating connection for %s" % request.user)
return Connection(request.user.authprofile.access_token)
class DjendExecView(View, ResponseUnavailableViewMixing, DjendMixin):
STATE_OK = "OK"
STATE_NOK = "NOK"
STATE_NOT_FOUND = "NOT_FOUND"
STATE_TIMEOUT = "TIMEOUT"
def _prepare_request(self, request, exec_model):
apy_data = serializers.serialize("json", [exec_model],
fields=('base_id', 'name'))
struct = json.loads(apy_data)
apy_data = json.dumps(struct[0])
request_data = {}
request_data.update({'model': apy_data,
'base_name': exec_model.base.name})
get_dict = copy.deepcopy(request.GET)
post_dict = copy.deepcopy(request.POST)
for key in ["json", "shared_key"]:
if request.method == "GET":
if key in get_dict:
del get_dict[key]
if request.method == "POST":
if key in post_dict:
del get_dict[key]
request_data.update({'request': {
'method': request.method,
'content_type': request.META.get('Content-Type'),
'GET': get_dict.dict(),
'POST': post_dict.dict(),
'user': {'username': request.user.username},
'UUID': exec_model.base.uuid,
'REMOTE_ADDR': request.META.get('REMOTE_ADDR')
}
})
logger.debug("REQUEST-data: %s" % request_data)
return request_data
def _execute(self, request, request_data, base_model, rid):
try:
# _do on remote
start = int(round(time.time() * 1000))
request_data.update({'rid': rid})
response_data = call_rpc_client(json.dumps(request_data),
generate_vhost_configuration(
base_model.user.username,
base_model.name),
base_model.name,
base_model.executor.password
)
end = int(round(time.time() * 1000))
ms=str(end-start)
logger.debug("DATA: %s" % str(response_data))
logger.debug("RESPONSE-time: %sms" % str(ms))
logger.debug("RESPONSE-data: %s" % response_data[:120])
data = json.loads(response_data)
data.update({
"time_ms": ms,
})
except Exception, e:
logger.exception(e)
raise Exception("Could not execute request")
return data
def _execute_async(self, request, request_data, base_model, rid):
try:
# _do on remote
request_data.update({'rid': rid})
call_rpc_client(json.dumps(request_data),
generate_vhost_configuration(
base_model.user.username,
base_model.name),
base_model.name,
base_model.executor.password,
async=True
)
except Exception, e:
logger.exception(e)
raise e
return True
def _handle_response(self, request, data, exec_model):
response_class = data.get("response_class", None)
default_status_code = 200
logger.debug(data)
if not data.has_key('returned'):
response_status_code = default_status_code
else:
if response_class:
try:
response_status_code = json.loads(data['returned']).get('status_code', default_status_code)
except:
response_status_code = data['returned'].get('status_code', default_status_code)
else:
response_status_code = default_status_code
# respond with json
if request.GET.has_key(u'json') or request.GET.has_key('callback'):
status = data.get("status", False)
# if is json
if status == "OK":
exec_model.mark_executed()
else:
exec_model.mark_failed()
if status in [self.STATE_NOK]:
response_status_code = 500
elif status in [self.STATE_NOT_FOUND]:
response_status_code = 404
elif status in [self.STATE_TIMEOUT]:
response_status_code = 502
# send counter to client
cdata = {
'counter':
{
'executed': str(Apy.objects.get(id=exec_model.id).counter.executed),
'failed': str(Apy.objects.get(id=exec_model.id).counter.failed)
},
'apy_id': exec_model.id
}
#user = channel_name_for_user(request)
#send_client(user, "counter", cdata)
if request.GET.has_key('callback'):
data = '%s(%s);' % (request.REQUEST['callback'], json.dumps(data))
return HttpResponse(data, "application/javascript")
return HttpResponse(json.dumps(data), content_type="application/json", status=response_status_code)
# real response
elif response_class:
if response_class == u''+responses.XMLResponse.__name__:
content_type = load_json(data['returned'])['content_type']
content = load_json(data['returned'])['content']
elif response_class == u''+responses.HTMLResponse.__name__:
content_type = load_json(data['returned'])['content_type']
content = load_json(data['returned'])['content']
elif response_class == u''+responses.JSONResponse.__name__:
content_type = load_json(data['returned'])['content_type']
content = load_json(data['returned'])['content']
elif response_class == u''+responses.RedirectResponse.__name__:
location = load_json(data['returned'])['content']
return HttpResponseRedirect(location)
else:
logger.warning("Wrong response")
return HttpResponseServerError("You're apy did not return any allowed response-class or is not called with 'json' or 'callback' as querystring.")
return HttpResponse(content, content_type, status=response_status_code)
else:
msg = "Not received json or callback query string nor response_class from response."
logger.error("Not received json or callback query string nor response_class from response.")
return HttpResponseServerError(msg)
#@profile
@never_cache
def get(self, request, *args, **kwargs):
# get base
base_model = get_object_or_404(Base, name=kwargs['base'])
response = self.verify(request, base_model)
if response:
return response
# get exec from database
try:
exec_model = base_model.apys.get(id=kwargs['id'])
except Apy.DoesNotExist:
#warning(channel_name_for_user(request), "404 on %s" % request.META['PATH_INFO'])
return HttpResponseNotFound("'%s' not found" % request.META['PATH_INFO'])
rid = request.GET.get('rid', None)
if rid:
# look for transaction
transaction = Transaction.objects.get(pk=rid)
if transaction.tout:
data = transaction.tout
else:
data = {'status': transaction.get_status_display()}
redirect_to = request.get_full_path()
data.update({'url': redirect_to})
else:
request_data = self._prepare_request(request, exec_model)
transaction = Transaction(apy=exec_model)
transaction.tin = json.dumps(request_data)
transaction.status = RUNNING
transaction.save()
if request.GET.has_key('async') or request.POST.has_key('async'):
transaction.async = True
transaction.save()
# execute async
data = self._execute_async(request, request_data, base_model, transaction.rid)
redirect_to = request.get_full_path()+"&rid=%s" % transaction.rid
return HttpResponsePermanentRedirect(redirect_to)
else:
# execute
data = self._execute(request, request_data, base_model, transaction.rid)
try:
returned = json.loads(data['returned'])
data['returned'] = returned
except:
pass
transaction.tout = json.dumps(data)
transaction.status = FINISHED
transaction.save()
# add exec's id to the response dict
data.update({
"id": kwargs['id'],
"rid": transaction.rid
})
# response
response = self._handle_response(request, data, exec_model)
return response
@method_decorator(csrf_exempt)
def post(self, request, *args, **kwargs):
return DjendExecView.get(self, request, *args, **kwargs)
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(DjendExecView, self).dispatch(*args, **kwargs)
class DjendSharedView(View, ContextMixin):
def get(self, request, *args, **kwargs):
context = RequestContext(request)
base_name = kwargs.get('base')
shared_key = request.GET.get('shared_key')
if not shared_key:
shared_key = request.session.get('shared_key')
base_model = get_object_or_404(Base, name=base_name, uuid=shared_key)
# store it in session list
if not request.session.__contains__('shared_bases'):
request.session['shared_bases'] = {}
request.session['shared_bases'][base_name] = shared_key
request.session.modified = True
# context
context['VERSION'] = version
context['shared_bases'] = request.session['shared_bases']
context['FASTAPP_EXECS'] = base_model.apys.all().order_by('name')
context['LAST_EXEC'] = request.GET.get('done')
context['active_base'] = base_model
context['username'] = request.user.username
context['FASTAPP_NAME'] = base_model.name
#context['DROPBOX_REDIRECT_URL'] = settings.DROPBOX_REDIRECT_URL
#context['PUSHER_KEY'] = settings.PUSHER_KEY
context['CHANNEL'] = channel_name_for_user(request)
context['FASTAPP_STATIC_URL'] = "/%s/%s/static/" % ("fastapp", base_model.name)
rs = base_model.template(context)
return HttpResponse(rs)
class DjendBaseCreateView(View):
def post(self, request, *args, **kwargs):
# TODO: should be in planet project and not fastapp
if use_plans:
if get_user_quota(request.user).get('MAX_BASES_PER_USER') <= request.user.bases.count():
return HttpResponseForbidden("Too many bases for your plan.")
base, created = Base.objects.get_or_create(name=request.POST.get('new_base_name'), user=User.objects.get(username=request.user.username))
if not created:
return HttpResponseBadRequest("A base with this name does already exist.")
base.save_and_sync()
from fastapp.api_serializers import BaseSerializer
base_data = BaseSerializer(base)
response_data = base_data.data
return HttpResponse(json.dumps(response_data), content_type="application/json")
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendBaseCreateView, self).dispatch(*args, **kwargs)
class DjendBaseDeleteView(View):
def post(self, request, *args, **kwargs):
base = Base.objects.get(name=kwargs['base'], user=User.objects.get(username=request.user.username))
base.delete()
response_data = {"redirect": "/fastapp/"}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendBaseDeleteView, self).dispatch(*args, **kwargs)
class DjendBaseSettingsView(View):
def get(self, request, *args, **kwargs):
base = Base.objects.get(name=kwargs['base'])
base_settings = base.setting.all().extra(\
select={'lower_key':'lower(key)'}).order_by('lower_key').values('key', 'value', 'id')
return HttpResponse(json.dumps(list(base_settings)), content_type="application/json")
def delete(self, request, *args, **kwargs):
base = Base.objects.get(name=kwargs['base'])
base_setting = base.setting.get(id=kwargs['id'])
base_setting.delete()
return HttpResponse(content_type="application/json")
def post(self, request, *args, **kwargs):
base_settings = json.loads(request.POST.get('settings'))
try:
for setting in base_settings:
base = Base.objects.get(name=kwargs['base'])
if setting.has_key('id'):
setting_obj = Setting.objects.get(base=base, id=setting['id'])
setting_obj.key = setting['key']
else:
setting_obj = Setting(key=setting['key'], base=base)
setting_obj.value = setting['value']
setting_obj.save()
except Exception:
logger.exception()
return HttpResponse({}, content_type="application/json")
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendBaseSettingsView, self).dispatch(*args, **kwargs)
class DjendExecDeleteView(View):
def post(self, request, *args, **kwargs):
base = get_object_or_404(Base, name=kwargs['base'], user=User.objects.get(username=request.user.username))
# syncing to storage provider
# exec
e = base.apys.get(name=kwargs['id'])
try:
e.delete()
#info(request.user.username, "Exec '%s' deleted" % e.exec_name)
except Exception, e:
pass
#error(request.user.username, "Error deleting(%s)" % e)
return HttpResponse('{"redirect": %s}' % request.META['HTTP_REFERER'])
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendExecDeleteView, self).dispatch(*args, **kwargs)
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendExecRenameView, self).dispatch(*args, **kwargs)
class DjendBaseRenameView(View):
def post(self, request, *args, **kwargs):
base = get_object_or_404(Base, name=kwargs['base'], user=User.objects.get(username=request.user.username))
base.name = request.POST.get('new_name')
base.save()
response_data = {"redirect": request.META['HTTP_REFERER'].replace(kwargs['base'], base.name)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendBaseRenameView, self).dispatch(*args, **kwargs)
class DjendBaseSaveView(View):
def post(self, request, *args, **kwargs):
base = get_object_or_404(Base, name=kwargs['base'], user=User.objects.get(username=request.user.username))
content = request.POST.get('content', None)
public = request.POST.get('public', None)
static_public = request.POST.get('static_public', None)
# exec
if request.POST.has_key('exec_name'):
exec_name = request.POST.get('exec_name')
# save in database
e = base.apys.get(name=exec_name)
if len(content) > 8200:
pass
#error(channel_name_for_user(request), "Exec '%s' is to big." % exec_name)
else:
e.module = content
e.description = request.POST.get('exec_description')
e.save()
info(channel_name_for_user(request), "Exec '%s' saved" % exec_name)
# base
else:
logger.info("Save base")
if content: base.content = content
if public: base.public = public
if static_public: base.static_public = static_public
base.save()
# save in database
#info(channel_name_for_user(request), "Base index '%s' saved" % base.name)
return HttpResponse()
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DjendBaseSaveView, self).dispatch(*args, **kwargs)
class DjendBaseView(TemplateView, ContextMixin):
def _refresh_single_base(self, base):
base = Base.objects.get(name=base)
base.refresh()
base.save()
def get(self, request, *args, **kwargs):
rs = None
context = RequestContext(request)
# redirect to shared view
if not request.user.is_authenticated():
if request.GET.has_key('shared_key') or request.session.__contains__("shared_key"):
return DjendSharedView.as_view()(request, *args, **kwargs)
try:
# refresh bases from dropbox
refresh = "refresh" in request.GET
base = kwargs.get('base')
if refresh and base:
self._refresh_single_base(base)
base_model = None
if base:
base_model = get_object_or_404(Base, name=base, user=request.user.id)
#base_model.save()
#if refresh:
# base_model.refresh_execs()
# execs
try:
context['FASTAPP_EXECS'] = base_model.apys.all().order_by('name')
except ErrorResponse, e:
messages.warning(request, "No app.json found", extra_tags="alert-warning")
logging.debug(e)
# context
try:
context['bases'] = Base.objects.filter(user=request.user.id).order_by('name')
context['FASTAPP_NAME'] = base
#context['DROPBOX_REDIRECT_URL'] = settings.DROPBOX_REDIRECT_URL
#context['PUSHER_KEY'] = settings.PUSHER_KEY
context['CHANNEL'] = channel_name_for_user(request)
context['FASTAPP_STATIC_URL'] = "/%s/%s/static/" % ("fastapp", base)
context['active_base'] = base_model
context['username'] = request.user.username
context['LAST_EXEC'] = request.GET.get('done')
context['transaction_list'] = Transaction.objects.filter(apy__base__name=base).filter(created__gte=datetime.now()-timedelta(minutes=30)).order_by('created')
#rs = base_model.template(context)
except ErrorResponse, e:
if e.__dict__['status'] == 404:
logging.debug(base)
logging.debug("Template not found")
messages.error(request, "Template %s not found" % template_name, extra_tags="alert-danger")
# error handling
except (UnAuthorized, AuthProfile.DoesNotExist), e:
return HttpResponseRedirect("/fastapp/dropbox_auth_start")
except NoBasesFound, e:
message(request, logging.WARNING, "No bases found")
rs = render_to_string("fastapp/base.html", context_instance=context)
#rs = render_to_string("fastapp/base.html", context_instance=context)
return HttpResponse(rs)
class DjendView(TemplateView):
def get_context_data(self, **kwargs):
context = super(DjendView, self).get_context_data(**kwargs)
context['bases'] = Base.objects.filter(user=self.request.user).order_by('name')
context['public_bases'] = Base.objects.filter(public=True).order_by('name')
#try:
# token = self.request.user.auth_token
#except Token.DoesNotExist:
# token = Token.objects.create(user=self.request.user)
#context['TOKEN'] = token
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(DjendView, self).dispatch(*args, **kwargs)
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "%s/fastapp/dropbox_auth_finish" % settings.DROPBOX_REDIRECT_URL
dropbox_consumer_key = settings.DROPBOX_CONSUMER_KEY
dropbox_consumer_secret = settings.DROPBOX_CONSUMER_SECRET
return dropbox.client.DropboxOAuth2Flow(dropbox_consumer_key, dropbox_consumer_secret, redirect_uri, web_app_session, "dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(request):
authorize_url = get_dropbox_auth_flow(request.session).start()
return HttpResponseRedirect(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(request):
try:
access_token, user_id, url_state = get_dropbox_auth_flow(request.session).finish(request.GET)
auth, created = AuthProfile.objects.get_or_create(user=request.user)
# store access_token
auth.access_token = access_token
auth.dropbox_userid = user_id
auth.user = request.user
auth.save()
return HttpResponseRedirect("/fastapp/")
except dropbox.client.DropboxOAuth2Flow.BadRequestException, e:
return HttpResponseBadRequest(e)
except dropbox.client.DropboxOAuth2Flow.BadStateException, e:
# Start the auth flow again.
return HttpResponseRedirect("http://www.mydomain.com/dropbox_auth_start")
except dropbox.client.DropboxOAuth2Flow.CsrfException, e:
return HttpResponseForbidden()
except dropbox.client.DropboxOAuth2Flow.NotApprovedException, e:
raise e
except dropbox.client.DropboxOAuth2Flow.ProviderException, e:
raise e
# URL handler for /dropbox-auth-start
def dropbox_auth_disconnect(request):
request.user.authprofile.access_token = ""
request.user.authprofile.save()
return HttpResponseRedirect("/profile/")
def process_file(path, metadata, client, user):
def get_app_config(client, path):
appconfig_file = StringIO()
appconfig_file = StringIO()
appconfig_file.write(client.get_file_content(path))
appconfig_file.seek(0)
appconfig = _read_config(appconfig_file)
appconfig_file.close()
return appconfig
try:
# Handle only files ending with ".py" or "config"
#if not path.endswith("py") or not metadata or "/." in path or not path.endswith("config") or not path is "index.html":
# logger.info("Ignore path: %s" % path)
# continue
# setup recognition
regex = re.compile("/(.*)/.*")
r = regex.search(path)
if not r:
logger.warn("regex '/(.*)/(.*).py' no results in '%s'" % path)
return
names = r.groups()
base_name = names[0]
appconfig_path = "%s/app.config" % base_name
logger.info("notification for: base_name: %s, user: %s" % (base_name, user))
if "/." in path:
logger.debug("Ignore file starting with a dot")
return
# Handle app.config
elif "app.config" in path:
appconfig = get_app_config(client, path)
logger.info("Read app.config for base %s" % base_name)
# base
base_obj, created = Base.objects.get_or_create(name=base_name, user=user)
base_obj.save()
logger.info("base %s created?: %s" % (base_name, created))
# settings
_handle_settings(appconfig['settings'], base_obj)
# Handle index.html
elif path is "index.html":
base_obj = Base.objects.get(name=base_name, user=user)
index_html = client.get_file_content(path)
base_obj.content = index_html
base_obj.save()
# Handle apys
elif path.endswith("py"):
logger.info("Try to handle apy on path %s" % path)
regex = re.compile("/(.*)/([a-zA-Z-_0-9.]*).py")
r = regex.search(path)
apy_name = r.groups()[1]
try:
base_obj = Base.objects.get(name=base_name, user=user)
apy, created = Apy.objects.get_or_create(name=apy_name, base=base_obj)
if created:
apy.save()
logger.info("new apy %s created" % apy_name)
logger.info("apy %s already exists" % apy_name)
except Apy.DoesNotExist, e:
logger.warn(e.message)
return
try:
description = get_app_config(client, appconfig_path)['modules'][apy_name].get('description', None)
if description:
apy.description = get_app_config(client, appconfig_path)['modules'][apy_name]['description']
apy.save()
except Exception, e:
logger.warn("Description could not be read for %s" % apy_name)
logger.warn(repr(e))
new_rev = metadata['rev']
logger.debug("local rev: %s, remote rev: %s" % (apy.rev, new_rev))
if apy.rev == new_rev:
logger.info("No changes in %s" % path)
else:
logger.info("Load changes for %s" % path)
content, rev = client.get_file_content_and_rev("%s" % path)
apy.module = content
logger.info("Update content for %s with %s" % (path, str(len(content))))
apy.rev = rev
apy.save()
logger.info("Apy %s updated" % apy.name)
else:
logger.warn("Path %s ignored" % path)
if "static" in path:
try:
cache_path = path.lstrip("/")
base_obj = Base.objects.get(name=base_name, user=user)
cache_key = "%s-%s-%s" % (base_obj.user.username, base_obj.name, cache_path)
logger.info("Delete cache entry: %s" % cache_key)
cache.delete(cache_key)
except Exception, e:
logger.error("Problem cleaning cache for static file %s" % cache_path)
logger.warn("Looking for %s for user %s" % (base_name, user.username))
except Exception, e:
logger.error("Exception handling path %s" % path)
logger.exception(e)
def process_user(uid):
auth_profile = AuthProfile.objects.filter(dropbox_userid=uid)[0]
token = auth_profile.access_token
user = auth_profile.user
logger.info("START process user '%s'" % user)
logger.info("Process change notfication for user: %s" % user.username)
cursor = cache.get("cursor-%s" % uid)
client = Connection(token)
has_more = True
from fastapp.threadpool import ThreadPool
from random import uniform
while has_more:
result = client.delta(cursor)
pool = ThreadPool(1)
for path, metadata in result['entries']:
logger.info("Add task for %s to pool" % path)
pool.add_task(process_file, path, metadata, client, user)
logger.info("Waiting for completion ... %s" % path)
pool.wait_completion()
logger.info("Add task for %s to pool")
logger.info("Tasks completed.")
# Update cursor
cursor = result['cursor']
cursor = cache.set("cursor-%s" % uid, cursor)
# Repeat only if there's more to do
has_more = result['has_more']
logger.info("END process user '%s'" % user)
class DropboxNotifyView(View):
def get(self, request):
challenge = request.GET['challenge']
return HttpResponse(challenge)
def post(self, request):
# get delta for user
for uid in json.loads(request.body)['delta']['users']:
thread = threading.Thread(target=process_user, args=(uid,))
thread.daemon = True
thread.start()
return HttpResponse()
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(DropboxNotifyView, self).dispatch(*args, **kwargs)
@csrf_exempt
def login_or_sharedkey(function):
def wrapper(request, *args, **kwargs):
# logger.debug("authenticate %s" % request.user)
user=request.user
# if logged in
if user.is_authenticated():
return function(request, *args, **kwargs)
base_name = kwargs.get('base')
base_obj = get_object_or_404(Base, name=base_name)
# static_public
if "static" in request.path_info and base_obj.static_public:
return function(request, *args, **kwargs)
# if base is public
if base_obj.public:
return function(request, *args, **kwargs)
# if shared key in query string
elif request.GET.has_key('shared_key'):
shared_key = request.GET.get('shared_key', None)
if base_obj.uuid==shared_key or base_obj.public:
request.session['shared_key'] = shared_key
return function(request, *args, **kwargs)
else:
return HttpResponseNotFound()
# if shared key in session and corresponds to base
#has_shared_key = request.session.__contains__('shared_key')
#if has_shared_key:
# shared_key = request.session['shared_key']
# logger.info("authenticate on base '%s' with shared_key '%s'" % (base, shared_key))
# get_object_or_404(Base, name=base_name, uuid=shared_key)
# return function(request, *args, **kwargs)
# don't redirect when access a exec withoud secret key
elif kwargs.has_key('id'):
if Apy.objects.get(base=base_obj, id=kwargs['id']).everyone:
return function(request, *args, **kwargs)
else:
return HttpResponseNotFound('Ups, wrong URL?')
# TODO: should redirect to a nice login page
return HttpResponseRedirect("/")
return wrapper
def load_json(s):
if type(s) is str:
r = json.loads(s)
elif type(s) is dict:
r = s
return r
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CHANGED BY Kapiche Ltd.
# Copyright 2015 Kapiche Ltd. All rights reserved.
# Based on work by the good folk responsible for gcloud-python. Thanks folks!
# Author: Ryan Stuart<[email protected]>
#
"""Create / interact with gcloud datastore queries."""
from __future__ import absolute_import, division, print_function
import base64
import six
from . import utils
from ._generated import query_pb2 as query_pb
from .connection import get_connection
from ..exceptions import InvalidQueryError
from ..key import Key
from .transaction import Transaction
class Query(object):
"""
A Google Datastore Query.
This class serves as an abstraction for creating a query over data stored in Datastore.
"""
OPERATORS = {
'<=': query_pb.PropertyFilter.LESS_THAN_OR_EQUAL,
'lte': query_pb.PropertyFilter.LESS_THAN_OR_EQUAL,
'>=': query_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,
'gte': query_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,
'<': query_pb.PropertyFilter.LESS_THAN,
'lt': query_pb.PropertyFilter.LESS_THAN,
'>': query_pb.PropertyFilter.GREATER_THAN,
'gt': query_pb.PropertyFilter.GREATER_THAN,
'=': query_pb.PropertyFilter.EQUAL,
'eq': query_pb.PropertyFilter.EQUAL,
}
"""Mapping of operator strs and their protobuf equivalents."""
def __init__(self, entity, ancestor=None, filters=(), projection=(), order=(), group_by=(),
limit=None, offset=0):
"""
Initialise a new Query.
:type entity: type
:param entity: The entity class to use for the query. Used to derive the ``kind`` passed to datastore.
:type ancestor: :class:`~gcloudoem.entity.Entity` or None
:param ancestor: the ancestor to which this query's results are restricted.
:type filters: sequence of (property_name, operator, value) tuples
:param filters: property filters applied by this query.
:type projection: sequence of str
:param projection: fields to be returned as part of query results. An empty sequence means all fields.
:type order: sequence of str
:param order: field names used to order query results. Prepend '-' to a field name to sort it in descending
order.
:type group_by: sequence of str
:param group_by: field names used to group query results.
:type limit: int
:param limit: number of entity results to limit this query to. None means don't limit. Defaults to None.
:type offset: int
:param offset: the offset into the results the first entity should be. Defaults to 0.
"""
from gcloudoem import Entity
if not isinstance(entity, type) or not issubclass(entity, Entity):
raise ValueError('You must pass a valid entity class to query (one that subclasses Entity)')
self._entity = entity
self._ancestor = ancestor
self._filters = list()
self._projection = list(projection)
self._order = list(order)
self._group_by = list(group_by)
self._limit = limit
self._offset = offset
self._has_inequality_filter = None
for f in filters:
self.add_filter(*f)
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, limit):
self._limit = limit
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
def set_limits(self, offset, limit):
"""Shortcut to set the offset and the limit. Useful for slices."""
self._offset = offset
self._limit = limit
def is_limited(self):
"""Has an offset or limit been applied to this query?"""
return bool(self._offset or self._limit)
@property
def entity(self):
"""
Get the Kind of the Query.
:rtype: str
"""
return self._entity
@property
def ancestor(self):
"""
The ancestor key for the query.
:rtype: :class:`~gcloudoem.entity.Entity` or None
"""
return self._ancestor
@property
def filters(self):
"""
Filters set on the query.
:rtype: sequence of (property_name, operator, value) tuples.
"""
return self._filters[:]
def add_filter(self, property_name, operator, value):
"""
Add a filter to the query based on a property name, operator and a value.
Expressions take the form of::
.add_filter('<property>', '<operator>', <value>)
where property is the name of a property stored on the entity for this query, operator is one of ``OPERATORS``
(ie, ``=``, ``<``, ``<=``, ``>``, ``>=``) and value is the value to filter on::
>>> from gcloudoem import entity, properties
>>> from gcloudoem.datastore.query import Query
>>> class Person(entity.Entity):
... name = properties.TextProperty()
... age = properties.IntegerProperty()
...
>>> query = Query(Person)
>>> query.add_filter('name', '=', 'James')
>>> query.add_filter('age', '>', 50)
:type property_name: str
:param property_name: A property name. Used to fetch the corresponding property off the Entity for this query.
:type operator: str
:param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``. See :attr:`Query.OPERATORS`.
:type value: int, str, bool, float, None, datetime
:param value: The value to filter on.
:raises: :class:`gcloudoem.exceptions.InvalidQueryError` if:
* `operation` is not one of the specified values, or
* query.entity doesn't have a property by the name ``property_name``, or
* filter names `'__key__'` but passes an invalid operator (``=`` is required) or value (a Key is required).
"""
if self.OPERATORS.get(operator) is None:
error_message = 'Invalid expression: "%s"' % (operator,)
choices_message = 'Please use one of: =, <, <=, >, >=.'
raise InvalidQueryError(error_message, choices_message)
if property_name == 'key':
if not isinstance(value, (Key,) + six.string_types + six.integer_types):
raise InvalidQueryError('Invalid key value "%s"' % type(value))
if not isinstance(value, Key):
value = Key(self._entity._meta.kind, value=value)
if self.OPERATORS[operator] != query_pb.PropertyFilter.EQUAL:
raise InvalidQueryError('Invalid operator for key: "%s"' % operator)
elif not hasattr(self._entity, property_name):
raise InvalidQueryError("Entity %s used in this Query doesn't have a property %s" %
(self._entity._meta.kind, property_name))
if self.OPERATORS[operator] in (
query_pb.PropertyFilter.LESS_THAN_OR_EQUAL, query_pb.PropertyFilter.GREATER_THAN_OR_EQUAL,
):
if not self._has_inequality_filter:
self._has_inequality_filter = property_name
elif property_name != self._has_inequality_filter:
raise InvalidQueryError(
"Datastore only supports inequality operators on a single property within a query."
)
self._filters.append((property_name, operator, value))
@property
def projection(self):
"""
Fields names returned by the query.
:rtype: sequence of str
:returns: Names of fields in query results.
"""
return self._projection[:]
@projection.setter
def projection(self, value):
"""
:raises: :class:`gcloudoem.exceptions.InvalidQueryError` if the property name(s) in value don't exist on the
entity for this query.
"""
if isinstance(value, str):
value = [value]
for projection in value:
if not projection == '__key__' and not hasattr(self._entity, projection):
raise InvalidQueryError("Entity %s used in this Query doesn't have a property %s" %
(self.entity._meta.kind, projection))
self._projection[:] = value
def keys_only(self):
"""Set the projection to include only keys."""
self._projection[:] = ['__key__']
@property
def order(self):
"""
Names of fields used to sort query results.
:rtype: sequence of str
"""
return self._order[:]
@order.setter
def order(self, value):
"""
Set the fields used to sort query results.
Sort fields will be applied in the order specified.
:type value: str or sequence of str
:param value: Each value is a str giving the name of the property on which to sort, optionally preceded by a
hyphen (-) to specify descending order. Omitting the hyphen implies ascending order.
:raises: :class:`gcloudoem.exceptions.InvalidQueryError` if the property name(s) in value don't exist on the
entity for this query.
"""
if isinstance(value, str):
value = [value]
for prop_name in value:
property = prop_name[1:] if prop_name[0] == '-' else prop_name
if not hasattr(self._entity, property):
raise InvalidQueryError("Entity %s used in this Query doesn't have a property %s" %
(self._entity._meta.kind, prop_name))
self._order[:] = value
@property
def group_by(self):
"""
Names of fields used to group query results.
:rtype: sequence of str
"""
return self._group_by[:]
@group_by.setter
def group_by(self, value):
"""
Set fields used to group query results.
:type value: str or sequence of strs
:param value: Each value is a str giving the name of a property to use to group results together.
:raises: :class:`gcloudoem.exceptions.InvalidQueryError` if the property name(s) in value don't exist on the
entity for this query.
"""
if isinstance(value, str):
value = [value]
for prop_name in value:
if not hasattr(self._entity, prop_name):
raise InvalidQueryError("Entity %s used in this Query doesn't have a property %s" %
(self._entity._meta.kind, prop_name))
self._group_by[:] = value
def __call__(self):
"""
Execute the Query; return a :class:`Cursor` for the matching entities.
For example::
>>> from gcloudoem.datastore.query import Query
>>> query = Query('Person')
>>> query.add_filter('name', '=', 'Sally')
>>> list(query.execute())
[<Entity object>, <Entity object>, ...]
>>> query.limit = 1
>>> list(query.execute(1))
[<Entity object>]
For an explication of the options, see
https://cloud.google.com/datastore/docs/concepts/queries#Datastore_Query_cursors.
:rtype: :class:`Cursor`
:raises: :class:`~gcloudoem.exceptions.ConnectionError` if there is no active connection.
"""
connection = get_connection()
return Cursor(self, connection, self.limit, self.offset)
def clone(self):
return self.__class__(
self._entity, self._ancestor, self.filters, self.projection, self.order, self.group_by,
self._limit, self._offset
)
def to_protobuf(self):
"""
Convert this Query instance to the corresponding protobuf representation.
:rtype: :class:`~gcloudoem.datastore.datastore_v1_pb2.Query`
:returns: A protobuf query that can be sent to the protobuf API. N.b. that it does not contain "in-flight"
fields for ongoing query executions (cursors, offset, limit).
"""
pb = query_pb.Query()
for projection_name in self._projection:
pb.projection.add().property.name = projection_name
if self._entity:
pb.kind.add().name = self._entity._meta.kind
composite_filter = pb.filter.composite_filter
composite_filter.op = query_pb.CompositeFilter.AND
if self.ancestor:
ancestor_pb = self.ancestor._properties['key'].to_protobuf(self.ancestor.key)
# Filter on __key__ HAS_ANCESTOR == ancestor.
ancestor_filter = composite_filter.filters.add().property_filter
ancestor_filter.property.name = '__key__'
ancestor_filter.op = query_pb.PropertyFilter.HAS_ANCESTOR
ancestor_filter.value.key_value.CopyFrom(ancestor_pb)
for property_name, operator, value in self.filters:
pb_op_enum = self.OPERATORS.get(operator)
# Add the specific filter
prop = self._entity._properties[property_name]
property_filter = composite_filter.filters.add().property_filter
property_filter.property.name = prop.db_name
property_filter.op = pb_op_enum
# Set the value to filter on based on the type.
if property_name == 'key':
key_pb = prop.to_protobuf(value)
property_filter.value.key_value.CopyFrom(key_pb)
else:
attr, pb_value = prop.to_protobuf(value)
utils.set_protobuf_value(property_filter.value, attr, pb_value)
if not composite_filter.filters:
pb.ClearField('filter')
for prop in self.order:
property_order = pb.order.add()
if prop.startswith('-'):
property_order.property.name = prop[1:]
property_order.direction = property_order.DESCENDING
else:
property_order.property.name = prop
property_order.direction = property_order.ASCENDING
for group_by_name in self.group_by:
pb.group_by.add().name = group_by_name
return pb
class Cursor(object):
"""
Represent the state of a given execution of a Query.
This class is a generator that can be iterated.
"""
_NOT_FINISHED = query_pb.QueryResultBatch.NOT_FINISHED
_FINISHED = (
query_pb.QueryResultBatch.NO_MORE_RESULTS,
query_pb.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT,
)
def __init__(self, query, connection, limit=None, offset=0, start_cursor=None, end_cursor=None):
self._query = query
self._connection = connection
self._limit = limit
self._offset = offset
self._start_cursor = start_cursor
self._end_cursor = end_cursor
self._page = self._more_results = None
def next_page(self):
"""
Fetch a single "page" of query results.
Low-level API for fine control: the more convenient API is to iterate on the current Iterator.
:rtype: tuple, (entities, more_results, cursor)
"""
pb = self._query.to_protobuf()
start_cursor = self._start_cursor
if start_cursor is not None:
pb.start_cursor = base64.b64decode(start_cursor)
end_cursor = self._end_cursor
if end_cursor is not None:
pb.end_cursor = base64.b64decode(end_cursor)
if self._limit is not None:
pb.limit = self._limit
pb.offset = self._offset
transaction = Transaction.current()
query_results = self._connection.run_query(
query_pb=pb,
namespace=self._connection.namespace,
transaction_id=transaction and transaction.id,
)
# NOTE: `query_results` contains an extra value that we don't use, namely `skipped_results`.
#
# NOTE: The value of `more_results` is not currently useful because the back-end always returns an enum value of
# MORE_RESULTS_AFTER_LIMIT even if there are no more results. See
# https://github.com/GoogleCloudPlatform/gcloud-python/issues/280 for discussion.
entity_pbs, cursor_as_bytes, more_results_enum = query_results[:3]
self._start_cursor = base64.b64encode(cursor_as_bytes)
self._end_cursor = None
if more_results_enum == self._NOT_FINISHED:
self._more_results = True
elif more_results_enum in self._FINISHED:
self._more_results = False
else:
raise RuntimeError('Unexpected value returned for `more_results`.')
self._page = [self._query.entity.from_protobuf(pb) for pb in entity_pbs]
return self._page, self._more_results, self._start_cursor
def __iter__(self):
"""
Generator yielding all results matching our query.
:rtype: sequence of :class:`gcloud.datastore.entity.Entity`
"""
self.next_page()
while True:
for entity in self._page:
yield entity
if not self._more_results:
break
self.next_page()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.secretmanager_v1.types import resources
from google.cloud.secretmanager_v1.types import service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import SecretManagerServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import SecretManagerServiceGrpcTransport
class SecretManagerServiceGrpcAsyncIOTransport(SecretManagerServiceTransport):
"""gRPC AsyncIO backend transport for SecretManagerService.
Secret Manager Service
Manages secrets and operations using those secrets. Implements a
REST model with the following objects:
- [Secret][google.cloud.secretmanager.v1.Secret]
- [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "secretmanager.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "secretmanager.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_secrets(
self,
) -> Callable[[service.ListSecretsRequest], Awaitable[service.ListSecretsResponse]]:
r"""Return a callable for the list secrets method over gRPC.
Lists [Secrets][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.ListSecretsRequest],
Awaitable[~.ListSecretsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_secrets" not in self._stubs:
self._stubs["list_secrets"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/ListSecrets",
request_serializer=service.ListSecretsRequest.serialize,
response_deserializer=service.ListSecretsResponse.deserialize,
)
return self._stubs["list_secrets"]
@property
def create_secret(
self,
) -> Callable[[service.CreateSecretRequest], Awaitable[resources.Secret]]:
r"""Return a callable for the create secret method over gRPC.
Creates a new [Secret][google.cloud.secretmanager.v1.Secret]
containing no
[SecretVersions][google.cloud.secretmanager.v1.SecretVersion].
Returns:
Callable[[~.CreateSecretRequest],
Awaitable[~.Secret]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_secret" not in self._stubs:
self._stubs["create_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/CreateSecret",
request_serializer=service.CreateSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["create_secret"]
@property
def add_secret_version(
self,
) -> Callable[
[service.AddSecretVersionRequest], Awaitable[resources.SecretVersion]
]:
r"""Return a callable for the add secret version method over gRPC.
Creates a new
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion]
containing secret data and attaches it to an existing
[Secret][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.AddSecretVersionRequest],
Awaitable[~.SecretVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_secret_version" not in self._stubs:
self._stubs["add_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/AddSecretVersion",
request_serializer=service.AddSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["add_secret_version"]
@property
def get_secret(
self,
) -> Callable[[service.GetSecretRequest], Awaitable[resources.Secret]]:
r"""Return a callable for the get secret method over gRPC.
Gets metadata for a given
[Secret][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.GetSecretRequest],
Awaitable[~.Secret]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_secret" not in self._stubs:
self._stubs["get_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/GetSecret",
request_serializer=service.GetSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["get_secret"]
@property
def update_secret(
self,
) -> Callable[[service.UpdateSecretRequest], Awaitable[resources.Secret]]:
r"""Return a callable for the update secret method over gRPC.
Updates metadata of an existing
[Secret][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.UpdateSecretRequest],
Awaitable[~.Secret]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_secret" not in self._stubs:
self._stubs["update_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/UpdateSecret",
request_serializer=service.UpdateSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["update_secret"]
@property
def delete_secret(
self,
) -> Callable[[service.DeleteSecretRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete secret method over gRPC.
Deletes a [Secret][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.DeleteSecretRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_secret" not in self._stubs:
self._stubs["delete_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/DeleteSecret",
request_serializer=service.DeleteSecretRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_secret"]
@property
def list_secret_versions(
self,
) -> Callable[
[service.ListSecretVersionsRequest],
Awaitable[service.ListSecretVersionsResponse],
]:
r"""Return a callable for the list secret versions method over gRPC.
Lists
[SecretVersions][google.cloud.secretmanager.v1.SecretVersion].
This call does not return secret data.
Returns:
Callable[[~.ListSecretVersionsRequest],
Awaitable[~.ListSecretVersionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_secret_versions" not in self._stubs:
self._stubs["list_secret_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/ListSecretVersions",
request_serializer=service.ListSecretVersionsRequest.serialize,
response_deserializer=service.ListSecretVersionsResponse.deserialize,
)
return self._stubs["list_secret_versions"]
@property
def get_secret_version(
self,
) -> Callable[
[service.GetSecretVersionRequest], Awaitable[resources.SecretVersion]
]:
r"""Return a callable for the get secret version method over gRPC.
Gets metadata for a
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
``projects/*/secrets/*/versions/latest`` is an alias to the most
recently created
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
Returns:
Callable[[~.GetSecretVersionRequest],
Awaitable[~.SecretVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_secret_version" not in self._stubs:
self._stubs["get_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/GetSecretVersion",
request_serializer=service.GetSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["get_secret_version"]
@property
def access_secret_version(
self,
) -> Callable[
[service.AccessSecretVersionRequest],
Awaitable[service.AccessSecretVersionResponse],
]:
r"""Return a callable for the access secret version method over gRPC.
Accesses a
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
This call returns the secret data.
``projects/*/secrets/*/versions/latest`` is an alias to the most
recently created
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
Returns:
Callable[[~.AccessSecretVersionRequest],
Awaitable[~.AccessSecretVersionResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "access_secret_version" not in self._stubs:
self._stubs["access_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/AccessSecretVersion",
request_serializer=service.AccessSecretVersionRequest.serialize,
response_deserializer=service.AccessSecretVersionResponse.deserialize,
)
return self._stubs["access_secret_version"]
@property
def disable_secret_version(
self,
) -> Callable[
[service.DisableSecretVersionRequest], Awaitable[resources.SecretVersion]
]:
r"""Return a callable for the disable secret version method over gRPC.
Disables a
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
Sets the
[state][google.cloud.secretmanager.v1.SecretVersion.state] of
the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]
to
[DISABLED][google.cloud.secretmanager.v1.SecretVersion.State.DISABLED].
Returns:
Callable[[~.DisableSecretVersionRequest],
Awaitable[~.SecretVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "disable_secret_version" not in self._stubs:
self._stubs["disable_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/DisableSecretVersion",
request_serializer=service.DisableSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["disable_secret_version"]
@property
def enable_secret_version(
self,
) -> Callable[
[service.EnableSecretVersionRequest], Awaitable[resources.SecretVersion]
]:
r"""Return a callable for the enable secret version method over gRPC.
Enables a
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
Sets the
[state][google.cloud.secretmanager.v1.SecretVersion.state] of
the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]
to
[ENABLED][google.cloud.secretmanager.v1.SecretVersion.State.ENABLED].
Returns:
Callable[[~.EnableSecretVersionRequest],
Awaitable[~.SecretVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "enable_secret_version" not in self._stubs:
self._stubs["enable_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/EnableSecretVersion",
request_serializer=service.EnableSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["enable_secret_version"]
@property
def destroy_secret_version(
self,
) -> Callable[
[service.DestroySecretVersionRequest], Awaitable[resources.SecretVersion]
]:
r"""Return a callable for the destroy secret version method over gRPC.
Destroys a
[SecretVersion][google.cloud.secretmanager.v1.SecretVersion].
Sets the
[state][google.cloud.secretmanager.v1.SecretVersion.state] of
the [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]
to
[DESTROYED][google.cloud.secretmanager.v1.SecretVersion.State.DESTROYED]
and irrevocably destroys the secret data.
Returns:
Callable[[~.DestroySecretVersionRequest],
Awaitable[~.SecretVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "destroy_secret_version" not in self._stubs:
self._stubs["destroy_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/DestroySecretVersion",
request_serializer=service.DestroySecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["destroy_secret_version"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on the specified secret. Replaces
any existing policy.
Permissions on
[SecretVersions][google.cloud.secretmanager.v1.SecretVersion]
are enforced according to the policy set on the associated
[Secret][google.cloud.secretmanager.v1.Secret].
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a secret.
Returns empty policy if the secret exists and does not
have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has for the specified secret.
If the secret does not exist, this call returns an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.secretmanager.v1.SecretManagerService/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("SecretManagerServiceGrpcAsyncIOTransport",)
|
|
from __future__ import absolute_import
from __future__ import with_statement
from celery.datastructures import (
ExceptionInfo,
LRUCache,
LimitedSet,
AttributeDict,
DictAttribute,
ConfigurationView,
DependencyGraph,
)
from celery.utils.compat import THREAD_TIMEOUT_MAX
from celery.tests.utils import Case, WhateverIO
class Object(object):
pass
class test_DictAttribute(Case):
def test_get_set(self):
x = DictAttribute(Object())
x['foo'] = 'The quick brown fox'
self.assertEqual(x['foo'], 'The quick brown fox')
self.assertEqual(x['foo'], x.obj.foo)
self.assertEqual(x.get('foo'), 'The quick brown fox')
self.assertIsNone(x.get('bar'))
with self.assertRaises(KeyError):
x['bar']
def test_setdefault(self):
x = DictAttribute(Object())
self.assertEqual(x.setdefault('foo', 'NEW'), 'NEW')
self.assertEqual(x.setdefault('foo', 'XYZ'), 'NEW')
def test_contains(self):
x = DictAttribute(Object())
x['foo'] = 1
self.assertIn('foo', x)
self.assertNotIn('bar', x)
def test_items(self):
obj = Object()
obj.attr1 = 1
x = DictAttribute(obj)
x['attr2'] = 2
self.assertEqual(x['attr1'], 1)
self.assertEqual(x['attr2'], 2)
class test_ConfigurationView(Case):
def setUp(self):
self.view = ConfigurationView({'changed_key': 1,
'both': 2},
[{'default_key': 1,
'both': 1}])
def test_setdefault(self):
self.assertEqual(self.view.setdefault('both', 36), 2)
self.assertEqual(self.view.setdefault('new', 36), 36)
def test_get(self):
self.assertEqual(self.view.get('both'), 2)
sp = object()
self.assertIs(self.view.get('nonexisting', sp), sp)
def test_update(self):
changes = dict(self.view.changes)
self.view.update(a=1, b=2, c=3)
self.assertDictEqual(self.view.changes,
dict(changes, a=1, b=2, c=3))
def test_contains(self):
self.assertIn('changed_key', self.view)
self.assertIn('default_key', self.view)
self.assertNotIn('new', self.view)
def test_repr(self):
self.assertIn('changed_key', repr(self.view))
self.assertIn('default_key', repr(self.view))
def test_iter(self):
expected = {'changed_key': 1,
'default_key': 1,
'both': 2}
self.assertDictEqual(dict(self.view.items()), expected)
self.assertItemsEqual(list(iter(self.view)),
expected.keys())
self.assertItemsEqual(self.view.keys(), expected.keys())
self.assertItemsEqual(self.view.values(), expected.values())
class test_ExceptionInfo(Case):
def test_exception_info(self):
try:
raise LookupError('The quick brown fox jumps...')
except Exception:
einfo = ExceptionInfo()
self.assertEqual(str(einfo), einfo.traceback)
self.assertIsInstance(einfo.exception, LookupError)
self.assertTupleEqual(
einfo.exception.args, ('The quick brown fox jumps...', ),
)
self.assertTrue(einfo.traceback)
r = repr(einfo)
self.assertTrue(r)
class test_LimitedSet(Case):
def test_add(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('bar')
for n in 'foo', 'bar':
self.assertIn(n, s)
s.add('baz')
for n in 'bar', 'baz':
self.assertIn(n, s)
self.assertNotIn('foo', s)
def test_iter(self):
s = LimitedSet(maxlen=2)
items = 'foo', 'bar'
for item in items:
s.add(item)
l = list(iter(s))
for item in items:
self.assertIn(item, l)
def test_repr(self):
s = LimitedSet(maxlen=2)
items = 'foo', 'bar'
for item in items:
s.add(item)
self.assertIn('LimitedSet(', repr(s))
def test_clear(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('bar')
self.assertEqual(len(s), 2)
s.clear()
self.assertFalse(s)
def test_update(self):
s1 = LimitedSet(maxlen=2)
s1.add('foo')
s1.add('bar')
s2 = LimitedSet(maxlen=2)
s2.update(s1)
self.assertItemsEqual(list(s2), ['foo', 'bar'])
s2.update(['bla'])
self.assertItemsEqual(list(s2), ['bla', 'bar'])
s2.update(['do', 're'])
self.assertItemsEqual(list(s2), ['do', 're'])
def test_as_dict(self):
s = LimitedSet(maxlen=2)
s.add('foo')
self.assertIsInstance(s.as_dict(), dict)
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(xrange(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(x.keys(), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(x.keys(), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(x.keys(), [3, 4, 5])
# access 3, which makes it the last used key.
x[3]
x[6] = 6
self.assertEqual(x.keys(), [5, 3, 6])
x[7] = 7
self.assertEqual(x.keys(), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(xrange(size), xrange(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self._is_shutdown = Event()
self._is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self._is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self._is_stopped.set()
def stop(self):
self._is_shutdown.set()
self._is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter('iteritems')
def test_safe_to_remove_while_keys(self):
self.assertSafeIter('keys')
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter('itervalues')
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(c.items())
class test_AttributeDict(Case):
def test_getattr__setattr(self):
x = AttributeDict({'foo': 'bar'})
self.assertEqual(x['foo'], 'bar')
with self.assertRaises(AttributeError):
x.bar
x.bar = 'foo'
self.assertEqual(x['bar'], 'foo')
class test_DependencyGraph(Case):
def graph1(self):
return DependencyGraph([
('A', []),
('B', []),
('C', ['A']),
('D', ['C', 'B']),
])
def test_repr(self):
self.assertTrue(repr(self.graph1()))
def test_topsort(self):
order = self.graph1().topsort()
# C must start before D
self.assertLess(order.index('C'), order.index('D'))
# and B must start before D
self.assertLess(order.index('B'), order.index('D'))
# and A must start before C
self.assertLess(order.index('A'), order.index('C'))
def test_edges(self):
self.assertListEqual(list(self.graph1().edges()),
['C', 'D'])
def test_items(self):
self.assertDictEqual(
dict(self.graph1().items()),
{'A': [], 'B': [], 'C': ['A'], 'D': ['C', 'B']},
)
def test_to_dot(self):
s = WhateverIO()
self.graph1().to_dot(s)
self.assertTrue(s.getvalue())
|
|
# Copyright (C) 2010-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2009 Lorenzo Gil Sanchez <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout as django_logout
from django.http import Http404, HttpResponse
from django.http import HttpResponseRedirect, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.http import require_POST
from django.shortcuts import render_to_response
from django.template import RequestContext
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
# Django 1.0 compatibility
def csrf_exempt(view_func):
return view_func
from saml2 import BINDING_HTTP_REDIRECT
from saml2.client import Saml2Client
from saml2.metadata import entity_descriptor
from saml2 import BINDING_HTTP_POST
from djangosaml2.cache import IdentityCache, OutstandingQueriesCache
from djangosaml2.cache import StateCache
from djangosaml2.conf import get_config
from djangosaml2.signals import post_authenticated
from djangosaml2.utils import get_custom_setting
logger = logging.getLogger('djangosaml2')
def _set_subject_id(session, subject_id):
session['_saml2_subject_id'] = subject_id
def _get_subject_id(session):
try:
return session['_saml2_subject_id']
except KeyError:
return None
def login(request,
config_loader_path=None,
wayf_template='djangosaml2/wayf.html',
authorization_error_template='djangosaml2/auth_error.html'):
"""SAML Authorization Request initiator
This view initiates the SAML2 Authorization handshake
using the pysaml2 library to create the AuthnRequest.
It uses the SAML 2.0 Http Redirect protocol binding.
"""
logger.debug('Login process started')
came_from = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
if not came_from:
logger.warning('The next parameter exists but is empty')
came_from = settings.LOGIN_REDIRECT_URL
# if the user is already authenticated that maybe because of two reasons:
# A) He has this URL in two browser windows and in the other one he
# has already initiated the authenticated session.
# B) He comes from a view that (incorrectly) send him here because
# he does not have enough permissions. That view should have shown
# an authorization error in the first place.
# We can only make one thing here and that is configurable with the
# SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN setting. If that setting
# is True (default value) we will redirect him to the came_from view.
# Otherwise, we will show an (configurable) authorization error.
if not request.user.is_anonymous():
try:
redirect_authenticated_user = settings.SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN
except AttributeError:
redirect_authenticated_user = True
if redirect_authenticated_user:
return HttpResponseRedirect(came_from)
else:
logger.debug('User is already logged in')
return render_to_response(authorization_error_template, {
'came_from': came_from,
}, context_instance=RequestContext(request))
selected_idp = request.GET.get('idp', None)
conf = get_config(config_loader_path, request)
# is a embedded wayf needed?
# idps = conf.idps()
# if selected_idp is None and len(idps) > 1:
# logger.debug('A discovery process is needed')
# return render_to_response(wayf_template, {
# 'available_idps': idps.items(),
# 'came_from': came_from,
# }, context_instance=RequestContext(request))
client = Saml2Client(conf)
try:
(session_id, result) = client.prepare_for_authenticate(
entityid=selected_idp, relay_state=came_from,
binding=BINDING_HTTP_REDIRECT,
)
except TypeError, e:
logger.error('Unable to know which IdP to use')
return HttpResponse(unicode(e))
assert result['headers'][0][0] == 'Location'
location = result['headers'][0][1]
logger.debug('Saving the session_id in the OutstandingQueries cache')
oq_cache = OutstandingQueriesCache(request.session)
oq_cache.set(session_id, came_from)
logger.debug('Redirecting the user to the IdP')
return HttpResponseRedirect(location)
@require_POST
@csrf_exempt
def assertion_consumer_service(request,
config_loader_path=None,
attribute_mapping=None,
create_unknown_user=None):
"""SAML Authorization Response endpoint
The IdP will send its response to this view, which
will process it with pysaml2 help and log the user
in using the custom Authorization backend
djangosaml2.backends.Saml2Backend that should be
enabled in the settings.py
"""
attribute_mapping = attribute_mapping or get_custom_setting(
'SAML_ATTRIBUTE_MAPPING', {'uid': ('username', )})
create_unknown_user = create_unknown_user or get_custom_setting(
'SAML_CREATE_UNKNOWN_USER', True)
logger.debug('Assertion Consumer Service started')
conf = get_config(config_loader_path, request)
if 'SAMLResponse' not in request.POST:
return HttpResponseBadRequest(
'Couldn\'t find "SAMLResponse" in POST data.')
client = Saml2Client(conf, identity_cache=IdentityCache(request.session))
oq_cache = OutstandingQueriesCache(request.session)
outstanding_queries = oq_cache.outstanding_queries()
# process the authentication response
response = client.parse_authn_request_response(request.POST['SAMLResponse'], BINDING_HTTP_POST, outstanding_queries)
if response is None:
logger.error('SAML response is None')
return HttpResponseBadRequest(
"SAML response has errors. Please check the logs")
session_id = response.session_id()
oq_cache.delete(session_id)
# authenticate the remote user
session_info = response.session_info()
if callable(attribute_mapping):
attribute_mapping = attribute_mapping()
if callable(create_unknown_user):
create_unknown_user = create_unknown_user()
logger.debug('Trying to authenticate the user')
user = auth.authenticate(session_info=session_info,
attribute_mapping=attribute_mapping,
create_unknown_user=create_unknown_user)
if user is None:
logger.error('The user is None')
return HttpResponseForbidden("Permission denied")
auth.login(request, user)
_set_subject_id(request.session, session_info['name_id'])
logger.debug('Sending the post_authenticated signal')
post_authenticated.send_robust(sender=user, session_info=session_info)
# redirect the user to the view where he came from
relay_state = request.POST.get('RelayState', '/')
if not relay_state:
logger.warning('The RelayState parameter exists but is empty')
relay_state = settings.LOGIN_REDIRECT_URL
logger.debug('Redirecting to the RelayState: ' + relay_state)
return HttpResponseRedirect(relay_state)
@login_required
def echo_attributes(request,
config_loader_path=None,
template='djangosaml2/echo_attributes.html'):
"""Example view that echo the SAML attributes of an user"""
state = StateCache(request.session)
conf = get_config(config_loader_path, request)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
subject_id = _get_subject_id(request.session)
identity = client.users.get_identity(subject_id,
check_not_on_or_after=False)
return render_to_response(template, {'attributes': identity[0]},
context_instance=RequestContext(request))
@login_required
def logout(request, config_loader_path=None):
"""SAML Logout Request initiator
This view initiates the SAML2 Logout request
using the pysaml2 library to create the LogoutRequest.
"""
logger.debug('Logout process started')
state = StateCache(request.session)
conf = get_config(config_loader_path, request)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
subject_id = _get_subject_id(request.session)
if subject_id is None:
logger.warning(
'The session does not contains the subject id for user %s'
% request.user)
session_id, code, head, body = client.global_logout(subject_id)
headers = dict(head)
state.sync()
logger.debug('Redirecting to the IdP to continue the logout process')
return HttpResponseRedirect(headers['Location'])
def logout_service(request, config_loader_path=None, next_page=None,
logout_error_template='djangosaml2/logout_error.html'):
"""SAML Logout Response endpoint
The IdP will send the logout response to this view,
which will process it with pysaml2 help and log the user
out.
Note that the IdP can request a logout even when
we didn't initiate the process as a single logout
request started by another SP.
"""
logger.debug('Logout service started')
conf = get_config(config_loader_path, request)
state = StateCache(request.session)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
if 'SAMLResponse' in request.GET: # we started the logout
logger.debug('Receiving a logout response from the IdP')
response = client.logout_response(request.GET['SAMLResponse'],
binding=BINDING_HTTP_REDIRECT)
state.sync()
if response and response[1] == '200 Ok':
if next_page is None and hasattr(settings, 'LOGOUT_REDIRECT_URL'):
next_page = settings.LOGOUT_REDIRECT_URL
logger.debug('Performing django_logout with a next_page of %s'
% next_page)
return django_logout(request, next_page=next_page)
else:
logger.error('Unknown error during the logout')
return HttpResponse('Error during logout')
elif 'SAMLRequest' in request.GET: # logout started by the IdP
logger.debug('Receiving a logout request from the IdP')
subject_id = _get_subject_id(request.session)
if subject_id is None:
logger.warning(
'The session does not contain the subject id for user %s. Performing local logout'
% request.user)
auth.logout(request)
return render_to_response(logout_error_template, {},
context_instance=RequestContext(request))
else:
response, success = client.logout_request(request.GET, subject_id)
state.sync()
if success:
auth.logout(request)
assert response[0][0] == 'Location'
url = response[0][1]
return HttpResponseRedirect(url)
elif response is not None:
assert response[0][0] == 'Location'
url = response[0][1]
return HttpResponseRedirect(url)
else:
logger.error('Unknown error during the logout')
return HttpResponse('Error during logout')
else:
logger.error('No SAMLResponse or SAMLRequest parameter found')
raise Http404('No SAMLResponse or SAMLRequest parameter found')
def metadata(request, config_loader_path=None, valid_for=None):
"""Returns an XML with the SAML 2.0 metadata for this
SP as configured in the settings.py file.
"""
conf = get_config(config_loader_path, request)
conf.valid_for = valid_for or get_custom_setting('SAML_VALID_FOR', 24)
metadata = entity_descriptor(conf)
return HttpResponse(content=str(metadata),
content_type="text/xml; charset=utf8")
def register_namespace_prefixes():
from saml2 import md, saml, samlp
import xmlenc
import xmldsig
prefixes = (('saml', saml.NAMESPACE),
('samlp', samlp.NAMESPACE),
('md', md.NAMESPACE),
('ds', xmldsig.NAMESPACE),
('xenc', xmlenc.NAMESPACE))
if hasattr(ElementTree, 'register_namespace'):
for prefix, namespace in prefixes:
ElementTree.register_namespace(prefix, namespace)
else:
for prefix, namespace in prefixes:
ElementTree._namespace_map[namespace] = prefix
register_namespace_prefixes()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client for AWS batch services
.. seealso::
- http://boto3.readthedocs.io/en/latest/guide/configuration.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from random import uniform
from time import sleep
from typing import Dict, List, Optional, Union
import botocore.client
import botocore.exceptions
import botocore.waiter
from typing_extensions import Protocol, runtime_checkable
from airflow import AirflowException, LoggingMixin
from airflow.providers.amazon.aws.hooks.aws_hook import AwsHook
# Add exceptions to pylint for the boto3 protocol only; ideally the boto3 library could provide
# protocols for all their dynamically generated classes (try to migrate this to a PR on botocore).
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: disable=invalid-name, unused-argument
@runtime_checkable
class AwsBatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.AwsBatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: List[str]) -> Dict:
"""
Get job descriptions from AWS batch
:param jobs: a list of JobId to describe
:type jobs: List[str]
:return: an API response to describe jobs
:rtype: Dict
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiterName: str
:return: a waiter object for the named AWS batch service
:rtype: botocore.waiter.Waiter
.. note::
AWS batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client('batch').waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: Dict,
parameters: Dict,
containerOverrides: Dict,
) -> Dict:
"""
Submit a batch job
:param jobName: the name for the AWS batch job
:type jobName: str
:param jobQueue: the queue name on AWS Batch
:type jobQueue: str
:param jobDefinition: the job definition name on AWS Batch
:type jobDefinition: str
:param arrayProperties: the same parameter that boto3 will receive
:type arrayProperties: Dict
:param parameters: the same parameter that boto3 will receive
:type parameters: Dict
:param containerOverrides: the same parameter that boto3 will receive
:type containerOverrides: Dict
:return: an API response
:rtype: Dict
"""
...
def terminate_job(self, jobId: str, reason: str) -> Dict:
"""
Terminate a batch job
:param jobId: a job ID to terminate
:type jobId: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: enable=invalid-name, unused-argument
class AwsBatchClient(LoggingMixin):
"""
A client for AWS batch services.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:type max_retries: Optional[int]
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:type status_retries: Optional[int]
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: Optional[str]
:param region_name: region name to use in AWS client.
Override the region_name in connection (if provided)
:type region_name: Optional[str]
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check batch job status, modify these defaults, e.g.:
.. code-block::
AwsBatchClient.DEFAULT_DELAY_MIN = 0
AwsBatchClient.DEFAULT_DELAY_MAX = 5
When explict delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = AwsBatchClient.add_jitter(10, width=5, minima=0)``
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
def __init__(
self,
max_retries: Optional[int] = None,
status_retries: Optional[int] = None,
aws_conn_id: Optional[str] = None,
region_name: Optional[str] = None,
):
super().__init__()
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self._hook = None # type: Union[AwsHook, None]
self._client = None # type: Union[AwsBatchProtocol, botocore.client.BaseClient, None]
@property
def hook(self) -> AwsHook:
"""
An AWS API connection manager (wraps boto3)
:return: the connected hook to AWS
:rtype: AwsHook
"""
if self._hook is None:
self._hook = AwsHook(aws_conn_id=self.aws_conn_id)
return self._hook
@property
def client(self) -> Union[AwsBatchProtocol, botocore.client.BaseClient]:
"""
An AWS API client for batch services, like ``boto3.client('batch')``
:return: a boto3 'batch' client for the ``.region_name``
:rtype: Union[AwsBatchProtocol, botocore.client.BaseClient]
"""
if self._client is None:
self._client = self.hook.get_client_type("batch", region_name=self.region_name)
return self._client
def terminate_job(self, job_id: str, reason: str) -> Dict:
"""
Terminate a batch job
:param job_id: a job ID to terminate
:type job_id: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
response = self.client.terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the batch job; return True if the job
'SUCCEEDED', else raise an AirflowException
:param job_id: a batch job ID
:type job_id: str
:rtype: bool
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == "SUCCEEDED":
self.log.info("AWS batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == "FAILED":
raise AirflowException("AWS Batch job ({}) failed: {}".format(job_id, job))
if job_status in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"]:
raise AirflowException("AWS Batch job ({}) is not complete: {}".format(job_id, job))
raise AirflowException("AWS Batch job ({}) has unknown status: {}".format(job_id, job))
def wait_for_job(self, job_id: str, delay: Union[int, float, None] = None):
"""
Wait for batch job to complete
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
self.poll_for_job_complete(job_id, delay)
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: Union[int, float, None] = None):
"""
Poll for job running. The status that indicates a job is running or
already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
running_status = ["RUNNING", "SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: Union[int, float, None] = None):
"""
Poll for job completion. The status that indicates job completion
are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
complete_status = ["SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: List[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a batch job ID
:type job_id: str
:param match_status: a list of job status to match; the batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:type match_status: List[str]
:rtype: bool
:raises: AirflowException
"""
retries = 0
while True:
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s", job_id, job_status, match_status,
)
if job_status in match_status:
return True
if retries >= self.max_retries:
raise AirflowException(
"AWS Batch job ({}) status checks exceed max_retries".format(job_id)
)
retries += 1
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
def get_job_description(self, job_id: str) -> Dict:
"""
Get job description (using status_retries).
:param job_id: a batch job ID
:type job_id: str
:return: an API response for describe jobs
:rtype: Dict
:raises: AirflowException
"""
retries = 0
while True:
try:
response = self.client.describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except botocore.exceptions.ClientError as err:
error = err.response.get("Error", {})
if error.get("Code") == "TooManyRequestsException":
pass # allow it to retry, if possible
else:
raise AirflowException(
"AWS Batch job ({}) description error: {}".format(job_id, err)
)
retries += 1
if retries >= self.status_retries:
raise AirflowException(
"AWS Batch job ({}) description error: exceeded "
"status_retries ({})".format(job_id, self.status_retries)
)
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
@staticmethod
def parse_job_description(job_id: str, response: Dict) -> Dict:
"""
Parse job description to extract description for job_id
:param job_id: a batch job ID
:type job_id: str
:param response: an API response for describe jobs
:type response: Dict
:return: an API response to describe job_id
:rtype: Dict
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(
"AWS Batch job ({}) description error: response: {}".format(job_id, response)
)
return matching_jobs[0]
@staticmethod
def add_jitter(
delay: Union[int, float], width: Union[int, float] = 1, minima: Union[int, float] = 0
) -> float:
"""
Use delay +/- width for random jitter
Adding jitter to status polling can help to avoid
AWS batch API limits for monitoring batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:type delay: Union[int, float]
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:type width: Union[int, float]
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:type minima: Union[int, float]
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
:rtype: float
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return uniform(lower, upper)
@staticmethod
def delay(delay: Union[int, float, None] = None):
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
:type delay: Optional[Union[int, float]]
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = uniform(AwsBatchClient.DEFAULT_DELAY_MIN, AwsBatchClient.DEFAULT_DELAY_MAX)
else:
delay = AwsBatchClient.add_jitter(delay)
sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
An exponential back-off delay, with random jitter. There is a maximum
interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
:param tries: Number of tries
:type tries: int
:rtype: float
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return uniform(delay / 3, delay)
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Forker configuration broker.
A simple servlet handling GET and DELETE commands to provide a raw JSON
configuration for the requested isolate, if available.
The stored configurations should be the ones given by a monitor requesting to
start an isolate.
A configuration should be deleted on a request by the isolate itself when it
read it correctly.
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import json
import logging
import threading
# Pelix framework
from pelix.ipopo.decorators import ComponentFactory, Invalidate, Property, \
Provides
import pelix.http
# COHORTE constants
import cohorte
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
MIME_TYPE_JSON = 'application/json'
""" JSON data MIME type """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-forker-broker-factory")
@Provides(cohorte.SERVICE_CONFIGURATION_BROKER, controller='_svc_flag')
@Provides(pelix.http.HTTP_SERVLET)
@Property("_servlet_path", pelix.http.HTTP_SERVLET_PATH, "/cohorte/broker")
class ConfigBroker(object):
"""
The configuration broker servlet
"""
def __init__(self):
"""
Sets up members
"""
# The broker flag
self._svc_flag = False
# The path to this servlet
self._servlet_path = None
# Servlet access
self._host = None
self._port = None
# Configurations : Isolate UID -> JSON string
self._configurations = {}
# Configurations lock
self.__config_lock = threading.Lock()
def bound_to(self, path, parameters):
"""
Servlet bound to a HTTP service
:param path: The path to access the servlet
:param parameters: The server & servlet parameters
"""
if path == self._servlet_path:
# Update our access information
self._host = parameters['http.address']
self._port = int(parameters['http.port'])
# Register our service
self._svc_flag = True
else:
_logger.warning("Bound to a HTTP service with a different path."
"Ignore.")
def unbound_from(self, path, parameters):
"""
Servlet unbound from a HTTP service
:param path: The path to access the servlet
:param parameters: The server & servlet parameters
"""
if path == self._servlet_path:
# Unregister our service
self._svc_flag = False
# Clear our access information
self._host = None
self._port = None
def do_GET(self, request, response):
"""
Handles GET requests
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the isolate UID (last part of the request path)
uid = request.get_path().split('/')[-1]
with self.__config_lock:
# Get the associated configuration
json_config = self._configurations.get(uid)
if json_config:
# Send the found configuration
response.send_content(200, json_config, MIME_TYPE_JSON)
else:
# Unknown isolate
error = {'uid': uid,
'result': False,
'message': "Unknown isolate UID"}
response.send_content(404, json.dumps(error), MIME_TYPE_JSON)
def do_DELETE(self, request, response):
"""
Handles DELETE requests
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the isolate UID (last part of the request path)
uid = request.get_path().split('/')[-1]
result = {'uid': uid}
if self.delete_configuration(uid):
# Success
code = 200
result['result'] = True
result['message'] = "Configuration deleted"
else:
# Error
code = 404
result['result'] = False
result['message'] = "Unknown isolate UID"
response.send_content(code, json.dumps(result), MIME_TYPE_JSON)
def delete_configuration(self, uid):
"""
Deletes the configuration of the given isolate
:param uid: An isolate UID
:return: True if the isolate was known, else False
"""
with self.__config_lock:
if uid in self._configurations:
# Found !
del self._configurations[uid]
return True
return False
def store_configuration(self, uid, dict_config):
"""
Stores the configuration of the given isolate
:param uid: An isolate UID
:param dict_config: The configuration dictionary of the given isolate
:return: The URL to access this configuration
:raise ValueError: Invalid parameter
"""
if not uid or not dict_config:
# Invalid parameters
raise ValueError("Can't store an invalid configuration")
with self.__config_lock:
# Store the configuration as a JSON string
self._configurations[uid] = json.dumps(dict_config)
# Send a "localhost" address to avoid an "address not available" error
# under Windows
if ':' in self._host:
# IPv6 host
host = '[::1]'
else:
host = '127.0.0.1'
return 'http://{host}:{port}{path}/{uid}'\
.format(uid=uid, host=host, port=self._port,
path=self._servlet_path)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
# Reset the service flag
self._svc_flag = False
with self.__config_lock:
self._configurations.clear()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/interfaces/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An interface associated with the network instance
"""
__slots__ = ("_path_helper", "_extmethods", "__id", "__config", "__state")
_yang_name = "interface"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "interfaces", "interface"]
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /network_instances/network_instance/interfaces/interface/id (leafref)
YANG Description: A reference to an identifier for this interface which
acts as a key for this list
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /network_instances/network_instance/interfaces/interface/id (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: A reference to an identifier for this interface which
acts as a key for this list
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """id must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__id = t
if hasattr(self, "_set"):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/interfaces/interface/config (container)
YANG Description: Configuration parameters relating to the associated
interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/interfaces/interface/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the associated
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/interfaces/interface/state (container)
YANG Description: Operational state parameters relating to the
associated interface
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/interfaces/interface/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to the
associated interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
id = __builtin__.property(_get_id, _set_id)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("id", id), ("config", config), ("state", state)]
)
from . import config
from . import state
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/interfaces/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An interface associated with the network instance
"""
__slots__ = ("_path_helper", "_extmethods", "__id", "__config", "__state")
_yang_name = "interface"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["network-instances", "network-instance", "interfaces", "interface"]
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /network_instances/network_instance/interfaces/interface/id (leafref)
YANG Description: A reference to an identifier for this interface which
acts as a key for this list
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /network_instances/network_instance/interfaces/interface/id (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: A reference to an identifier for this interface which
acts as a key for this list
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """id must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__id = t
if hasattr(self, "_set"):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/interfaces/interface/config (container)
YANG Description: Configuration parameters relating to the associated
interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/interfaces/interface/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the associated
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/interfaces/interface/state (container)
YANG Description: Operational state parameters relating to the
associated interface
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/interfaces/interface/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to the
associated interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
id = __builtin__.property(_get_id, _set_id)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("id", id), ("config", config), ("state", state)]
)
|
|
"""Base class for all the objects in Diofant."""
from collections import defaultdict
from collections.abc import Mapping
from itertools import zip_longest
from ..utilities import ordered
from .cache import cacheit
from .compatibility import iterable
from .decorators import _sympifyit
from .evaluate import evaluate
from .sympify import SympifyError, sympify
class Basic:
"""
Base class for all objects in Diofant.
Always use ``args`` property, when accessing parameters of some instance.
"""
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom: bool = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix: bool = False
is_MatMul = False
is_Vector = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._hash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
"""Return swallow copy of self."""
return self.func(*self.args)
def __reduce_ex__(self, proto):
"""Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {'_hash': None}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._hash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._hash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__.
"""
return self._args
@classmethod
def class_key(cls):
"""Nice order of classes."""
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""Return a sort key.
Examples
========
>>> sorted([Rational(1, 2), I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> [x, 1/x, 1/x**2, x**2, sqrt(x), root(x, 4), x**Rational(3, 2)]
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
from .numbers import Integer
args = len(self.args), tuple(arg.sort_key(order)
for arg in self._sorted_args)
return self.class_key(), args, Integer(1).sort_key(), Integer(1)
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
Notes
=====
See [1]_. If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
* http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
if type(self) != type(other):
return False
return self._hashable_content() == other._hashable_content()
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue sympy/sympy#5487.
def __repr__(self):
from ..printing import srepr
return srepr(self, order=None)
def __str__(self):
from ..printing import sstr
return sstr(self, order=None)
def _repr_pretty_(self, p, cycle):
from ..printing import pretty
p.text(pretty(self))
def _repr_latex_(self):
from ..printing import latex
return latex(self, mode='equation*')
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> e = 1 + x + 2*sin(y + I*pi)
>>> e.atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> e.atoms(Symbol)
{x, y}
>>> e.atoms(Number)
{1, 2}
>>> e.atoms(Number, NumberSymbol)
{1, 2, pi}
>>> e.atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> e.atoms(x)
{x, y}
Be careful to check your assumptions when using the implicit option
since ``Integer(1).is_Integer = True`` but ``type(Integer(1))`` is
``One``, a special type of diofant atom, while ``type(Integer(2))``
is type ``Integer`` and will find all integers in an expression:
>>> e.atoms(Integer(1))
{1}
>>> e.atoms(Integer(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
diofant type can be listed as an argument and those types of "atoms"
as found in scanning the arguments of the expression recursively:
>>> from diofant.core.function import AppliedUndef
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
>>> f = Function('f')
>>> e = 1 + f(x) + 2*sin(y + I*pi)
>>> e.atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
"""
if types:
types = tuple(t if isinstance(t, type) else type(t) for t in types)
else:
types = Atom,
return set().union(*[self.find(t) for t in types])
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method.
"""
return set().union(*[a.free_symbols for a in self.args])
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in Diofant the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
if callable(self) and hasattr(self, '__call__'):
return self(*args)
elif self.args:
newargs = [sub.rcall(*args) for sub in self.args]
return type(self)(*newargs)
else:
return self
@property
def func(self):
"""The top-level function in an expression.
The following should hold for all objects::
x == x.func(*x.args)
Examples
========
>>> a = 2*x
>>> a.func
<class 'diofant.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> cot(x).args
(x,)
>>> (x*y).args
(x, y)
"""
return self._args
@property
def is_evaluated(self):
"""Test if an expession is evaluated."""
with evaluate(True):
expr = self.func(*self.args)
return expr == self
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> (1 + x*y).subs({x: pi})
pi*y + 1
>>> (1 + x*y).subs({x: pi, y: 2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs({x**2: y})
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from diofant.abc import e
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs({sqrt(sin(2*x)): a, sin(2*x): b,
... cos(2*x): c, x: d, exp(x): e})
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(21, subs={x: 3.0}, strict=False)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21, strict=False)
0.333333333333333
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
diofant.core.evalf.EvalfMixin.evalf: calculates the given formula to
a desired level of precision
"""
from ..utilities import default_sort_key
from .numbers import Integer
from .symbol import Dummy
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, Mapping):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError('Expected a mapping or iterable '
'of (old, new) tuples.')
sequence = list(sequence)
else:
raise ValueError('subs accepts one argument')
sequence = [_ for _ in sympify(sequence) if not _aresame(*_)]
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = defaultdict(list)
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d[ops].append((o, n))
newseq = []
for k in sorted(d, reverse=True):
newseq.extend(sorted((v[0] for v in d[k]),
key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted(((k, v) for (k, v) in sequence.items()),
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
m = Dummy()
for old, new in sequence:
d = Dummy(commutative=new.is_commutative)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
reps[d] = new
reps[m] = Integer(1) # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs({x + y: 1})
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs({x + y: 1})
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""Try to replace old with new in any of self's arguments."""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
return self.func(*args)
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new) # pylint: disable=assignment-from-none
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
E**y + x + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y})
Traceback (most recent call last):
...
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
if self in rule:
return rule[self]
elif rule and not self.is_Atom:
args = tuple(a.xreplace(rule) for a in self.args)
if not _aresame(args, self.args):
return self.func(*args)
return self
@cacheit
def has(self, *patterns):
r"""Test if any subexpression matches any of the patterns.
Parameters
==========
\*patterns : tuple of Expr
List of expressions to search for match.
Returns
=======
bool
False if there is no match or patterns list is
empty, else True.
Examples
========
>>> e = x**2 + sin(x*y)
>>> e.has(z)
False
>>> e.has(x, y, z)
True
>>> x.has()
False
"""
from .function import Function, UndefinedFunction
if len(patterns) != 1:
return any(self.has(pattern) for pattern in patterns)
else:
pattern = sympify(patterns[0])
if isinstance(pattern, UndefinedFunction):
return any(pattern in (f, f.func)
for f in self.atoms(Function, UndefinedFunction))
elif isinstance(pattern, type):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
else:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()."""
return lambda x: self == x
def replace(self, query, value, exact=False):
"""Replace matching subexpressions of ``self`` with ``value``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree in a simultaneous
fashion so changes made are targeted only once. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will
only succeed if non-zero values are received for each Wild that appears
in the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from ..simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
def _query(expr):
return isinstance(expr, query)
if isinstance(value, type) or callable(value):
def _value(expr, result):
return value(*expr.args)
else:
raise TypeError(
'given a type, replace() expects another '
'type or a callable')
elif isinstance(query, Basic):
def _query(expr):
return expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
def _value(expr, result):
return (value.subs(result)
if all(val for val in result.values()) else expr)
else:
def _value(expr, result):
return value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
def _value(expr, result):
return (value(**{str(key)[:-1]: val for key, val in result.items()})
if all(val for val in result.values()) else expr)
else:
def _value(expr, result):
return value(**{str(key)[:-1]: val for key, val in result.items()})
else:
raise TypeError(
'given an expression, replace() expects '
'another expression or a callable')
elif callable(query):
_query = query
if callable(value):
def _value(expr, result):
return value(expr)
else:
raise TypeError(
'given a callable, replace() expects '
'another callable')
else:
raise TypeError(
'first argument to replace() must be a '
'type, an expression or a callable')
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
expr = new
return expr
return bottom_up(self, rec_replace, atoms=True)
def find(self, query):
"""Find all subexpressions matching a query."""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
def _query(expr):
return isinstance(expr, query)
elif isinstance(query, Basic):
def _query(expr):
return expr.match(query) is not None
else:
_query = query
groups = defaultdict(int)
for result in filter(_query, preorder_traversal(self)):
groups[result] += 1
return dict(groups)
def count(self, query):
"""Count the number of matching subexpressions."""
return sum(self.find(query).values())
def _matches(self, expr, repl_dict={}):
"""Helper method for match() that looks for a match between Wild
symbols in self and expressions in expr.
Examples
========
>>> x = Wild('x')
>>> Basic(a + x, x)._matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x)._matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.func):
return
if self == expr:
return repl_dict
if self.is_Atom:
return
if len(self.args) != len(expr.args):
return
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d)._matches(other_arg, d)
if d is None:
return
return d
def match(self, pattern):
"""Pattern matching.
Wild symbols match all.
Parameters
==========
pattern : Expr
An expression that may contain Wild symbols.
Returns
=======
dict or None
If pattern match self, return a dictionary of
replacement rules, such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> p = Wild('p')
>>> q = Wild('q')
>>> e = (x + y)**(x + y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> (p**q).xreplace(_)
(x + y)**(x + y)
See Also
========
xreplace
diofant.core.symbol.Wild
"""
from ..simplify import signsimp
pattern = sympify(pattern)
s = signsimp(self)
p = signsimp(pattern)
# if we still have the same relationship between the types of
# input, then use the sign simplified forms
if (pattern.func == self.func) and (s.func == p.func):
rv = p._matches(s)
else:
rv = pattern._matches(self)
return rv
def count_ops(self, visual=None):
"""Wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
For example, limits, integrals, sums and products. All objects of this
kind will be evaluated recursively, unless some species were excluded
via 'hints' or unless the 'deep' hint was set to 'False'.
Examples
========
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def rewrite(self, *args, **hints):
"""Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(E**(I*x) - E**(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(E**(I*x) - E**(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin], exp)
-I*(E**(I*x) - E**(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], str):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
class Atom(Basic):
"""A parent class for atomic things.
An atom is an expression with no subexpressions, for example Symbol,
Number, Rational or Integer, but not Add, Mul, Pow.
"""
is_Atom = True
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default.
See Also
========
Basic.doit
"""
return self
@classmethod
def class_key(cls):
"""Nice order of classes."""
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""Return a sort key."""
from . import Integer
return self.class_key(), (1, (str(self),)), Integer(1).sort_key(), Integer(1)
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To Diofant, 2.0 == 2:
>>> 2.0 == Integer(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> _aresame(Float(2.0), Integer(2))
False
"""
from .function import AppliedUndef
from .function import UndefinedFunction as UndefFunc
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if (isinstance(i, (UndefFunc, AppliedUndef)) and
isinstance(j, (UndefFunc, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
class preorder_traversal:
"""Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : diofant expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : diofant expression
All of the subtrees in the tree.
Examples
========
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
args = node.args
if keys:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x + y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
|
|
import random
import zlib
import base64
from django.db import models
from django.db.models import get_model
from django.conf import settings
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from please_reply import settings as backup_settings
from please_reply.exceptions import NotInvited, InvalidHash
USER_MODEL = getattr(
settings,
'PLEASE_REPLY_USER_MODEL',
backup_settings.PLEASE_REPLY_USER_MODEL)
class ReplyListManager(models.Manager):
"""
Defines helper functions to get all attendees for an event.
"""
def is_guest_attending(self, event, guest):
"""
Returns true if the guest is marked as attending this event.
"""
matching_guest = self.get_replylist_for(event
).replies.filter(attending=True, guest=guest)
return any(matching_guest.all())
def get_confirmed_guests_for(self, event):
"""
return all attending=True replies for the given event.
"""
return self.get_replylist_for(event
).replies.filter(attending=True)
def get_invited_guests_for(self, event):
"""
Return all Reply models for the given event.
"""
return self.get_replylist_for(event
).replies.all()
def get_replylist_for(self, event):
"""
Return the replylist for the given event.
"""
event_type = ContentType.objects.get_for_model(event)
return self.get_query_set().get(
object_id=event.pk,
content_type=event_type)
def create_replylist(self, event, guests=None):
"""
Create a new reply list with optional guest-list.
Running a second time with the some of the same guests will not change
the replies for the existing guests but will add new blank replies for
the new guests.
"""
if guests is None:
guests = []
content_type = ContentType.objects.get_for_model(event)
replylist, created = self.model.objects.get_or_create(
object_id=event.pk,
content_type=content_type
)
replylist.save()
for guest in guests:
emptyreply, created = Reply.objects.get_or_create(
replylist=replylist,
guest=guest
)
if created:
emptyreply.attending=False
emptyreply.responded=False
emptyreply.save()
return replylist
class ReplyList(models.Model):
"""
A group of replies for an event.
"""
# event instance.
object_id = models.CharField(max_length=999)
content_type = models.ForeignKey(ContentType)
content_object = generic.GenericForeignKey('content_type', 'object_id')
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
objects = ReplyListManager()
class Meta:
verbose_name = _("reply list")
verbose_name_plural = _("reply lists")
ordering = ("content_type", "-object_id")
def __unicode__(self):
return u"replies for %s" % (
self.content_object
)
def make_simple_filter_manager(**filter_kwargs):
"""
Factory function returns Manager class that filters
results by the keywords given in filter_kwargs.
"""
class FilteredReplyManager(models.Manager):
def get_query_set(self):
return super(FilteredReplyManager, self).get_query_set(
).filter(**filter_kwargs)
return FilteredReplyManager
class ReplyManager(models.Manager):
"""
Some convenience methods for marking a guest as attending an event.
"""
def reply_to_event_for(self, event, guest, attending):
"""
Set guest's reply to attending (true or false) for the replylist
matching the event.
"""
replylist = ReplyList.objects.get_replylist_for(event)
try:
guest = self.model.objects.get(
replylist=replylist,
guest=guest)
except self.model.DoesNotExist:
raise NotInvited("%s wasn't invited to %s" %
(guest, event))
guest.attending = attending
guest.responded = True
guest.save()
return guest
class Reply(models.Model):
"""
A single guest's reply to an event.
"""
replylist = models.ForeignKey(
'ReplyList',
related_name="replies",
verbose_name=_("reply to list")
)
guest = models.ForeignKey(
get_model(*USER_MODEL.split(".")),
verbose_name=_("guest")
)
attending = models.BooleanField(
_("attending"),
default=False
)
responded = models.BooleanField(
_("responded"),
default=False
)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
# custom managers.
objects = ReplyManager()
confirmed_guests = make_simple_filter_manager(attending=True)()
not_responded = make_simple_filter_manager(responded=False)()
def generate_userhash(self, salt):
"""
create XOR encrypted and base64 encoded text version of the guest__pk.
"""
return encode_userhash(self.guest.pk, self.replylist.pk, salt)
class Meta:
verbose_name = _("reply")
verbose_name_plural = _("replies")
ordering = ("replylist", "-responded", "-attending", "guest")
def __unicode__(self):
return u"%s is%s attending %s" % (
self.guest,
'' if self.attending else ' not',
self.replylist.content_object
)
#----------------------------------------------------------------------
# Utilities
def tinycode(key, text, reverse=False):
"""
an XOR type encryption routine taken from
http://code.activestate.com/recipes/266586-simple-xor-keyword-encryption/
This isn't a secure technique, I am only using it to slightly obscure an
otherwise obvious use of user.id in the uri. I know I could use a UUID for
this purpose, but I didn't want to add uuid as a dependancy for this
project.
"""
rand = random.Random(key).randrange
if not reverse:
text = zlib.compress(text)
text = ''.join([chr(ord(elem)^rand(256)) for elem in text])
if reverse:
text = zlib.decompress(text)
return text
def decode_userhash(userhash, reply_list_id, salt):
"""
Decode and return the user-pk value.
Assume the unhashed text has the form:
userpk.
and was generated with key
salt%%reply_list_id
we are in big trouble if salt has %% in it!!.
so we assume they are not present.
"""
reply_list_id = str(reply_list_id).replace("%%", "")
salt = str(salt).replace("%%", "")
key = "%%".join([salt, reply_list_id])
try:
userhash = base64.urlsafe_b64decode(str(userhash))
except TypeError:
raise InvalidHash('hash %s was not valid' % userhash)
try:
return tinycode(key, userhash, reverse=True)
except zlib.error:
raise InvalidHash('attempted to decrypt %s with invalid key %s' %
(userhash, key))
def encode_userhash(userpk, reply_list_id, salt):
"""
Return a base64 XOR encrypted text using a key of:
salt%%reply_list_id
where any '%%' has been removed from salt and reply_list_id
"""
reply_list_id = unicode(reply_list_id).replace("%%", "")
salt = unicode(salt).replace("%%", "")
key = "%%".join([salt, reply_list_id])
userhash = tinycode(key, str(userpk))
return base64.urlsafe_b64encode(userhash)
|
|
import cv2
import numpy as np
import pyscreenshot as ImageGrab
from tkinter import *
from Xlib import display
import mss
import os
import pyxhook
import serial
import threading
import serial.tools.list_ports
import ctypes
import glob
from PIL import ImageTk, Image
import sys
with open(os.devnull, 'w') as f: ###shutting up pygame silliness
oldstdout = sys.stdout# disable stdout
sys.stdout = f
import pygame
sys.stdout = oldstdout# enable stdout
version = '0.9.4l'
print('CHMACHINE Ver. %s \n' %version)
class motorclass():
def __init__(self):
self.state=2
self.tempo=0
self.savestate=2
self.result=0
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create an array of zeros for the black background
self.index=0
self.listindex=0
self.timetic=0
self.patternspeed=0
self.speed='0'
self.targetspeed='0'
self.pinresttime=0
self.serialfloodlimit=5 #time(ms) between commands to limit serial flooding
def detect(self):
global stream_window_open
global stream_window
while detectflag==True:
if arduino_connected==False:
pygame.time.wait(1)
while ((self.state==5) or (self.state==1)): #################### DETECT/DETECT SETUP
if self.state==5 and self.getspeed()!=0 and arduino_connected==True:
self.PWMpin('0')
try: #take care of sliding too fast which result in a shape out of boundaries
self.monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arr = np.array(mss.mss().grab(self.monitor))
self.result = cv2.matchTemplate(arr, arrbase, cv2.TM_CCOEFF_NORMED) #check if images match
if self.state==5:
if self.result>0:
print ('%.0f %% Match' %(self.result*100))
else:
print ('0 % Match')
if checkinv==False: #invert flag False
if (self.result>=threshold): # if match value is over the threshold
if self.state==1 and arduino_connected==True:
self.PWMpin(speed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
cv2.circle(self.colorshow,(0,0), 70, (0,255,0), -1) #draw a green circle
self.tempo=pygame.time.get_ticks()
elif (pygame.time.get_ticks()-self.tempo) >= (timeonvar): #turn the pin to floor speed some time after the last match is occurred
if self.state==1 and arduino_connected==True:
self.PWMpin(floorspeed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
else: #invert flag True
if (self.result<=threshold):#match if value is under the threshold
if self.state==1 and arduino_connected==True:
self.PWMpin(speed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
cv2.circle(self.colorshow,(0,0), 70, (0,255,0), -1) #draw a green circle
self.tempo=pygame.time.get_ticks()
elif (pygame.time.get_ticks()-self.tempo) >= (timeonvar): #turn the pin to floor speed some time after the last match is occurred
if self.state==1 and arduino_connected==True:
self.PWMpin(floorspeed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
###centering and overlapping images over background:
x_offset=int((streamwindowssizex - screenshotsizex)/2)
y_offset=int((streamwindowssizey - screenshotsizey)/2)
self.colorshow[y_offset:y_offset + arr.shape[0], x_offset:x_offset + arr.shape[1]] = arr
###
except:
pass
if stream_window_open==False: # open "stream" window if isn't already opened
stream_window= Toplevel()
stream_window.resizable(False,False)
stream_window.title('Stream')
stream_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
stream_window.protocol("WM_DELETE_WINDOW", on_closing_stream_window)
stream_canvas = Canvas(stream_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
stream_canvas.pack()
stream_window_open=True
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
stream_window.tk.call('wm', 'iconphoto', stream_window._w, imgicon)
except:
pass
ontop()
else:
###showing image:
self.im = cv2.cvtColor(self.colorshow, cv2.COLOR_BGRA2RGB)
self.imag = Image.fromarray(self.im)
self.imagi = ImageTk.PhotoImage(image=self.imag)
stream_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=self.imagi)
stream_canvas.image=self.imagi #to keep a reference else it shows blank
if self.state==1 and arduino_connected==True:
self.PWMpin(str(self.getspeed())) #Keeps the PWM pin alive(see Arduino code)
###
pygame.time.wait(1)
while self.state==2 and arduino_connected==True:#################### STOP/PAUSE
self.PWMpin('0')
pygame.time.wait(1)
while self.state==3 and arduino_connected==True: ##################### ALWAYSON/PATTERN
if patternvar=='none':
self.PWMpin(speed)
pygame.time.wait(1)
else:
self.listindex=namelist.index(patternvar)
if pygame.time.get_ticks()-self.timetic>=timeonvar/100:
if self.index < len(patternlist[self.listindex])-1:
self.index+=1
else:
self.index=0
self.patternspeed=int(round(patternlist[self.listindex][self.index]/100*int(speed)))
if self.patternspeed>=int(floorspeed):
self.PWMpin(str(self.patternspeed))
if self.patternspeed<int(floorspeed) and self.listindex>1:
self.PWMpin(floorspeed)
self.timetic=pygame.time.get_ticks()
pygame.time.wait(1)
while self.state==4 and arduino_connected==True:######################### PULSE
self.tempo=pygame.time.get_ticks()
while (pygame.time.get_ticks()-self.tempo) <= (timeonvar):
if self.state!=4:
break
self.PWMpin(speed)
pygame.time.wait(1)
self.tempo=pygame.time.get_ticks()
while (pygame.time.get_ticks()-self.tempo) <= (timeoffvar):
if self.state!=4:
break
self.PWMpin(floorspeed)
pygame.time.wait(1)
def getspeed(self):
return int(self.speed)
def PWMpin(self, PWM_speed): #set the Arduino pin PWM
global arduino_connected
try:
if (pygame.time.get_ticks()-self.pinresttime) > self.serialfloodlimit: #limit serial flooding
self.speed=PWM_speed
arduino.write(('V' + self.speed + 'S').encode('utf-8'))
self.pinresttime=pygame.time.get_ticks()
except serial.SerialTimeoutException:
print('WRITE TIMEOUT ERROR.')
arduino.close()
self.stop()
arduino_connected=False
except:
print('SERIAL CONNECTION ERROR')
arduino.close()
self.stop()
arduino_connected=False
def stop(self):
self.state=2
self.savestate=2
def pause(self):
if self.state!=2:
self.savestate=self.state
self.state=2
elif self.state==2:
self.state=self.savestate
def startdetect(self):
self.state=1
self.savestate=self.state
def alwayson_pattern(self):
self.state=3
self.savestate=self.state
def pulse(self):
self.state=4
self.savestate=self.state
def setup(self):
self.state=5
self.savestate=self.state
def keysetup(file):
global pausebutton
global slowdownbutton
global speedupbutton
global screenshotbutton
global refreshbutton
global savebutton
global loadbutton
linelist=[]
###default keys:
pausebutton='P_Insert'
slowdownbutton='P_End'
speedupbutton='P_Down'
screenshotbutton='F9'
refreshbutton='F10'
savebutton='F11'
loadbutton='F12'
###
try:
setup = open(file, 'r')
for x in range(100):
linelist=setup.readline().replace(' ', '').strip().split('=') #read line, remove spaces and split the string a the "=" sign
if linelist[0]== '***':
break
if linelist[0] == 'Pause':
pausebutton=linelist[1]
if linelist[0] == 'Slowdown':
slowdownbutton=linelist[1]
if linelist[0] == 'Speedup':
speedupbutton=linelist[1]
if linelist[0] == 'Screenshot':
screenshotbutton=linelist[1]
if linelist[0] == 'Refresh':
refreshbutton=linelist[1]
if linelist[0] == 'Loadstate':
loadbutton=linelist[1]
if linelist[0] == 'Savestate':
savebutton=linelist[1]
setup.close()
except:
print('Cannot open', file, ', loading default keys...\n')
print('- HOTKEYS:\n')
print('Pause -------------- ',pausebutton)
print('Slow down ---------- ',slowdownbutton)
print('Speed up ----------- ',speedupbutton)
print('Screenshot --------- ',screenshotbutton)
print('Screenshot update -- ',refreshbutton)
print('Save state --------- ',savebutton)
print('Load state --------- ',loadbutton)
print('')
print('')
def patternsetup(file):
global namelist
global patternlist
linelist=[]
namelist=[]
patternlist=[]
namelist.append('PATTERN')
namelist.append('none')
patternlist.append([0])
patternlist.append([0])
try:
patterntxt = open(file, 'r')
for x in range(1000):
linelist=patterntxt.readline()
if linelist.strip()== '***': #strip() removes spaces and end of line characters
break
try:
if linelist.count('=')==1 and linelist.count(':')>0:
linelist=linelist.replace(' ', '').replace(',', '.').strip().split('=') #read line, remove spaces, convert "," to "." and split the string at the "=" sign
if linelist[0] != '' and linelist[1]!= '':
namelist.append(linelist[0][0:18])
stringlist=linelist[1].split(':')
intlist = [int(round(float(i))) for i in stringlist]#converts list of strings into rounded integers
patternlist.append(intlist)
except:
print(file, 'FORMAT ERROR\n')
patterntxt.close()
except:
print('Cannot open', file, '\n')
def comportsetup():
global ports
ports = list(serial.tools.list_ports.comports()) # detects available ports
print ('- AVAILABLE PORTS:\n')
for p in ports:
print (p)
print('')
def autoserialstart(baud):
checkAO.configure(state=DISABLED)
checkPUL.configure(state=DISABLED)
checkDET.configure(state=DISABLED)
checkSET.configure(state=DISABLED)
buttonserial.configure(state=DISABLED)
comentry.insert(END, "PLEASE WAIT...") # insert text into the widget
comentry.configure(state=DISABLED)
global arduino
global arduino_connected
line=('')
portnumber=('')
comentry.delete(0, END) # delete text from the widget from position 0 to the END
root.focus() #remove focus from the entry widget
resetGUI()
motor.stop()
print("Looking for the CH Machine, PLEASE WAIT...\n")
for p in ports:
arduino_connected=False
try:#try to close already existing serial connection
arduino.close()
while arduino.is_open:
pygame.time.wait(1)
except:
pass
try:
print (p[0] + '...')
arduino = serial.Serial(p[0], baud, timeout = 1, write_timeout = 1) # always a good idea to specify a timeout in case we send bad data
pygame.time.wait(3000)# wait for Arduino to initialize
arduino.write(('T').encode('utf-8'))
pygame.time.wait(150)# wait for a response from Arduino
line = arduino.read(arduino.inWaiting()).decode(encoding='UTF-8',errors='replace')
if line.find('connOK')!=-1:
print("CHM CONNECTED!")
print (p[0] + ' - Initialization Complete.')
arduino_connected=True
break
else:
print ('Wrong serial connection.')
except:
print ('Serial port exception')
if line.find('connOK')==-1:
print ('\nCHMachine not found, check out the connection.\n')
checkAO.configure(state=NORMAL)
checkPUL.configure(state=NORMAL)
checkDET.configure(state=NORMAL)
checkSET.configure(state=NORMAL)
buttonserial.configure(state=NORMAL)
comentry.configure(state=NORMAL)
comentry.delete(0, END)
return True
def serialstart(COMstring, baud):
global arduino_connected
global arduino
line=('')
comentry.delete(0, 'end') # delete text from the widget
root.focus() #remove focus from the entry widget
if COMstring == ('') or COMstring == ('COM Port'): #if no port is specified start autoserialstart() to find it automatically
tserial=threading.Thread(target=autoserialstart, args={serialbaud})
tserial.setDaemon(True)
tserial.start()
#manual port:
else:
print (COMstring + ' - Initializing...')
resetGUI()
arduino_connected=False
motor.stop()
try:
if arduino.is_open:
arduino.close()
pygame.time.wait(500)
except:
pass
try:
arduino = serial.Serial(COMstring, baud, timeout = 1, write_timeout = 1) # 2=Com3 on windows always a good idea to specify a timeout in case we send bad data
pygame.time.wait(4000)# wait for the Arduino to initialize
#test the connection(see Arduino code):
arduino.write(('T').encode('utf-8'))
pygame.time.wait(300)
line = arduino.read(arduino.inWaiting()).decode(encoding='UTF-8',errors='replace')
if line.find('connOK')!=-1:
print("CHM CONNECTED!")
print (COMstring + ' - Initialization Complete.')
arduino_connected=True
else:
print ('Wrong serial connection.')
arduino.close()
except serial.SerialTimeoutException:
print (COMstring + ' TIMEOUT EXCEPTION. Try another port.')
arduino.close()
arduino_connected=False
except:
print('No port found.')
def onKeyDown(event):
global speed
global pos
global arrbase
global savelist
global loadlist
global top
global left
global screenshotsizex
global screenshotsizey
global match_window_open
global match_window
global match_canvas
# never put any condition before event.key
if event.Key == ('Return'):
if comentry==root.focus_get() and comentry.get()!=(''):
serialstart(comtext.get(), serialbaud)
if event.Key == (slowdownbutton):
speedint=int(speed)
if (checkAOVar.get()==True or checkPULVar.get()==True or checkDETVar.get()==True):
if speedint>10:
speedint -= 10
motorspeed.set(speedint)
speed=str(speedint)
else:
motorspeed.set(0)
speed=('0')
if event.Key == (speedupbutton):
speedint=int(speed)
if (checkAOVar.get()==True or checkPULVar.get()==True or checkDETVar.get()==True):
if speedint <= 245:
speedint += 10
motorspeed.set(speedint)
speed=str(speedint)
else:
motorspeed.set(255)
speed=('255')
if event.Key == (pausebutton):
motor.pause()
if (event.Key == screenshotbutton or event.Key == refreshbutton):
if (event.Key == screenshotbutton):
mousedata=display.Display().screen().root.query_pointer()._data
pos=[mousedata['root_x'], mousedata['root_y']]
if (pos != [-1,-1]):
print('Mouse position',pos)
###find black border width:
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
###
top=int((pos[1]-screenshotsizey/2))
left=int((pos[0]-screenshotsizex/2))
###adjusting screenshot position so it stays into screen boundaries:
if left<0:
left=0
if top<0:
top=0
if left + screenshotsizex > screenwidth:
left=int(screenwidth-screenshotsizex)
if top + screenshotsizey > screenheight:
top=int(screenheight-screenshotsizey)
###
monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arrbase = np.array(mss.mss().grab(monitor))
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
x_offset=int((streamwindowssizex-screenshotsizex)/2)
y_offset=int((streamwindowssizey-screenshotsizey)/2)
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
if match_window_open==False:# open "match" window if isn't already opened
match_window= Toplevel()
match_window.resizable(False,False)
match_window.title('Match')
match_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
match_window.protocol("WM_DELETE_WINDOW", on_closing_match_window)
match_canvas = Canvas(match_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
match_canvas.pack()
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
match_window.tk.call('wm', 'iconphoto', match_window._w, imgicon)
except:
pass
ontop()
match_window_open=True
###show image:
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
match_canvas.image=imagi #to keep a reference in scope else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
###
if event.Key == (savebutton):
filesname=glob.glob(os.path.abspath(os.path.dirname(__file__)) + "/*.npz") #find name of all .npz files in the main folder
savelist=[]
for x in filesname:
try: #in case of a miswritten file name
x = x[-9:-4]
x=x.strip('save')
num=int(x)
savelist.append(num)
except:
pass
if savelist!=[]:
savename=(os.path.abspath(os.path.dirname(__file__)) + '/save' + str(max(savelist) + 1) + '.npz') #find the max value to add to the string
else:
savename=(os.path.abspath(os.path.dirname(__file__)) + '/save0.npz')
np.savez(savename, arrbase, pos, int(screenshotsizex), int(screenshotsizey), speed, floorspeed, timeonvar, timeoffvar, threshold, checkinv)
print(savename, 'SAVED')
loadlist=[]
if event.Key == (loadbutton):
filesname=glob.glob(os.path.abspath(os.path.dirname(__file__)) + "/*.npz") #find name of all npz files in the main folder
if loadlist==[]:
for x in filesname:
try: #in case of a miswritten file name
x = x[-9:-4]
x=x.strip('save')
num=int(x)
loadlist.append(num)
except:
pass
loadlist.sort() #sort numbers in the list
if loadlist!=[]:
loadname=(os.path.abspath(os.path.dirname(__file__)) + '/save' + str(loadlist.pop()) + '.npz') # pop() removes last element and return it
loaded_arrays = np.load(loadname)
load_state(loaded_arrays['arr_0'], loaded_arrays['arr_1'], loaded_arrays['arr_2'], loaded_arrays['arr_3'], loaded_arrays['arr_4'],
loaded_arrays['arr_5'], loaded_arrays['arr_6'], loaded_arrays['arr_7'], loaded_arrays['arr_8'], loaded_arrays['arr_9'])
print(loadname, 'LOADED')
else:
print('nothing to load')
return True
def load_state(image_arrayl, posl, xsizel, ysizel, speedl, floorspeedl, timeonvarl, timeoffvarl, thresholdl, checkinvl):
global screenshotsizex
global screenshotsizey
global speed
global timeonvar
global timeoffvar
global floorspeed
global threshold
global arrbase
global arr
global pos
global top
global left
global checkinv
global match_window_open
global match_window
global match_canvas
###load variables and update interface:
motorspeed.set(speedl)
speed=str(speedl)
timeON.set(timeonvarl)
timeonvar=timeON.get()
timeOFF.set(timeoffvarl)
timeoffvar=timeOFF.get()
floorspeedVAR.set(floorspeedl)
floorspeed=str(floorspeedVAR.get())
thresh.set(thresholdl * 100)
threshold=thresholdl
if checkinvl == True:
checkinvert.select()
checkinv=True
else:
checkinvert.deselect()
checkinv=False
###
###load and display image:
if posl[0] != -1:
pos = [posl[0], posl[1]]
top=int((posl[1]-screenshotsizey/2))
left=int((posl[0]-screenshotsizex/2))
arrbase=image_arrayl
arr=image_arrayl
sizex.set(xsizel/2)
sizey.set(ysizel/2)
screenshotsizex=(xsizel/2)*(streamwindowssizex - 20)/100
screenshotsizey=(ysizel/2)*(streamwindowssizey - 20)/100
x_offset=int((streamwindowssizex - screenshotsizex)/2)
y_offset=int((streamwindowssizey - screenshotsizey)/2)
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
if match_window_open==False:# open "match" window if isn't already opened
match_window= Toplevel()
match_window.resizable(False,False)
match_window.title('Match')
match_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
match_window.protocol("WM_DELETE_WINDOW", on_closing_match_window)
match_canvas = Canvas(match_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
match_canvas.pack()
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
match_window.tk.call('wm', 'iconphoto', match_window._w, imgicon)
except:
pass
ontop()
match_window_open=True
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
match_canvas.image=imagi #to keep a reference else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
###
# TKINTER FUNCTIONS:
def alwaysONtick():
try:
arduino.name
if (arduino.is_open):
if checkAOVar.get()==False:
resetGUI()
motor.stop()
if checkAOVar.get()==True:
if patternvar=='none':
resetGUI()
slidera.config(foreground='black')
checkAO.select()
motor.alwayson_pattern()
else:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black', label='PATTERN FREQ:')
sliderd.config(foreground='black')
checkAO.select()
motor.alwayson_pattern()
else:
print('No serial connection')
checkAO.deselect()
except:
print('No serial connection')
checkAO.deselect()
def detecttick():
if (pos==[-1,-1]):
print('Position? (Press', screenshotbutton, 'to take a screenshot)')
checkDET.deselect()
else:
try:
arduino.name
if (arduino.is_open):
if checkDETVar.get()==False:
resetGUI()
motor.stop()
if checkDETVar.get()==True:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black')
sliderd.config(foreground='black')
slidersizex.config(foreground='black')
slidersizey.config(foreground='black')
sliderthresh.config(foreground='black')
checkinvert.config(foreground='black')
checkDET.select()
motor.startdetect()
else:
print('No serial connection')
checkDET.deselect()
except:
print('No serial connection')
checkDET.deselect()
def detectsetup():
if (pos==[-1,-1]):
print('Position? (Press', screenshotbutton, 'to take a screenshot)')
checkSET.deselect()
else:
if checkSETVar.get()==False:
resetGUI()
motor.stop()
if checkSETVar.get()==True:
resetGUI()
sliderb.config(foreground='black')
slidersizex.config(foreground='black')
slidersizey.config(foreground='black')
sliderthresh.config(foreground='black')
checkinvert.config(foreground='black')
checkSET.select()
motor.setup()
def pulsetick():
try:
arduino.name
if (arduino.is_open):
if checkPULVar.get()==False:
resetGUI()
motor.stop()
if checkPULVar.get()==True:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black')
sliderc.config(foreground='black')
sliderd.config(foreground='black')
checkPUL.select()
motor.pulse()
else:
print('No serial connection')
checkPUL.deselect()
except:
print('No serial connection')
checkPUL.deselect()
def on_closing_match_window():
global match_window_open
global match_window
match_window_open=False
match_window.destroy()
def on_closing_stream_window():
global stream_window_open
global stream_window
stream_window_open=False
stream_window.destroy()
def on_closing():
global detectflag
motor.stop()
detectflag=False
print ('Bye Bye')
pygame.time.wait(1)
hm.cancel()
cv2.destroyAllWindows()
root.quit()
root.destroy()
print ('Be vigilant')
sys.exit()
def slidersize(value):
global arrbase
global screenshotsizex
global screenshotsizey
global top
global left
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
if pos != [-1,-1]:
top=int((pos[1]-screenshotsizey/2))
left=int((pos[0]-screenshotsizex/2))
### adjusting screenshot position so it stays into screen boundaries:
if left<0:
left=0
if top<0:
top=0
if left + screenshotsizex > screenwidth:
left=int(screenwidth-screenshotsizex)
if top + screenshotsizey > screenheight:
top=int(screenheight-screenshotsizey)
###
### show image:
monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arrbase = np.array(mss.mss().grab(monitor))
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
x_offset=int((streamwindowssizex-screenshotsizex)/2)
y_offset=int((streamwindowssizey-screenshotsizey)/2)
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
try: # if the window is not opened an exception occur
match_canvas.image=imagi #to keep a reference else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
except:
pass
###
def speedslider(value):
global speed
speed=value
def floorspeedslider(value):
global floorspeed
floorspeed=value
def timeONslider(value):
global timeonvar
timeonvar=int(value)
def timeOFFslider(value):
global timeoffvar
timeoffvar=int(value)
def thresholdslider(value):
global threshold
threshold=int(value)/100
def about():
top = Toplevel()
top.wm_attributes("-topmost", 1)
top.resizable(False,False)
top.focus()
top.geometry("220x150")
top.title('About')
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
top.tk.call('wm', 'iconphoto', top._w, imgicon)
except:
pass
msg = Message(top, width=300, text='COCK HERO MACHINE Ver.' + version)
msga = Message(top, width=300, text='[email protected]')
msgb = Message(top, width=300, text='For more informations visit:')
msgc = Message(top, width=300, text='cockheromachine.blogspot.com\n')
msg.pack()
msga.pack()
msgb.pack()
msgc.pack()
button = Button(top, height=1, width=10, text="OK", command=top.destroy)
button.pack()
def ontop():
if checkontopvar.get()==True:
root.wm_attributes("-topmost", 1) # on top
try:#if the window is not opened an exception occur
match_window.wm_attributes("-topmost", 1)# on top
except:
pass
try:
stream_window.wm_attributes("-topmost", 1)# on top
except:
pass
if checkontopvar.get()==False:
root.wm_attributes("-topmost", 0) # NOT on top
try:
match_window.wm_attributes("-topmost", 0)# NOT on top
except:
pass
try:
stream_window.wm_attributes("-topmost", 0)# NOT on top
except:
pass
def on_entry_click(event):
if comentry.get() == 'COM Port':
comentry.delete(0, "end") # delete all the text in the entry widget
comentry.insert(0, '') #Insert blank
comentry.config(foreground = 'black')
def resetGUI():
checkAO.deselect()
checkPUL.deselect()
checkDET.deselect()
checkSET.deselect()
slidera.config(foreground='gray')
sliderb.config(foreground='gray', label='TIME ON(ms):')
sliderc.config(foreground='gray')
sliderd.config(foreground='gray')
slidersizex.config(foreground='gray')
slidersizey.config(foreground='gray')
sliderthresh.config(foreground='gray')
checkinvert.config(foreground='gray')
def inverttick():
global checkinv
checkinv=not checkinv
def patternmenu(value):
global patternvar
patternvar=value
alwaysONtick()
# TKINTER INTERFACE:
root= Tk()
streamwindowssizex=220
streamwindowssizey=220
comtext = StringVar()
comentry=Entry(root, textvariable=comtext)
comentry.grid(row = 0, column = 0)
comentry.insert(0, "COM Port")
comentry.bind('<FocusIn>', on_entry_click)
comentry.config(fg = 'gray', width=13)
buttonserial=Button(root,height=1, width=8,text='CONNECT', command=lambda:serialstart(comtext.get(), serialbaud))
buttonserial.grid(row = 0, column = 1, sticky=W)
checkontopvar = BooleanVar()
checkontop=Checkbutton(root,text = 'On top', variable=checkontopvar, command=lambda:ontop())
checkontop.grid(row = 0, column = 3)
checkontop.select()
buttonabout=Button(root,height=1, width=8,text='About...', command=lambda:about())
buttonabout.grid(row = 0, column = 4)
patternsetup(os.path.abspath(os.path.dirname(__file__)) + '/pattern.txt')#load patterns
patternvar='none'
pattern_variable = StringVar()
pattern_variable.set("PATTERNS")
optionmenu_widget = OptionMenu(root, pattern_variable, *namelist[1:], command=patternmenu)
optionmenu_widget.grid(row = 2, column=0)
optionmenu_widget.config(width=7)
checkAOVar = IntVar()
checkAO=Checkbutton(root,text = 'ALWAYS ON', command=lambda:alwaysONtick(), variable = checkAOVar)
checkAO.grid(row = 2, column = 1, pady=10)
checkPULVar = IntVar()
checkPUL=Checkbutton(root,text = 'PULSE', command=lambda:pulsetick(), variable = checkPULVar)
checkPUL.grid(row = 2, column = 2, pady=10)
checkDETVar = IntVar()
checkDET=Checkbutton(root,text = 'DETECT', command=lambda:detecttick(), variable = checkDETVar)
checkDET.grid(row = 2, column = 3, pady=10)
checkSETVar = IntVar()
checkSET=Checkbutton(root,text = 'DETECT SETUP', command=lambda:detectsetup(), variable = checkSETVar)
checkSET.grid(row = 2, column = 4, pady=10)
buttonpause=Button(root, height=2, width=60, text='-PAUSE/START-', command=lambda:motor.pause())
buttonpause.grid(row = 4, columnspan = 5, pady=10)
motorspeed=IntVar(value=10)
slidera = Scale(root, from_=0, to=255, orient=HORIZONTAL,length=400.00, variable=motorspeed, label='MOTOR SPEED:', command=speedslider)
slidera.grid(columnspan = 6,pady=5)
speed=(str(motorspeed.get()))
timeON=IntVar(value=200)
sliderb = Scale(root, from_=10, to=1000, orient=HORIZONTAL,length=400.00, variable=timeON, label='TIME ON(ms):', command=timeONslider)
sliderb.grid(columnspan = 7,pady=5)
timeonvar=timeON.get()
timeOFF=IntVar(value=100)
sliderc = Scale(root, from_=10, to=1000, orient=HORIZONTAL,length=400.00, variable=timeOFF, label='TIME OFF(ms):', command=timeOFFslider)
sliderc.grid(columnspan = 8,pady=5)
timeoffvar=timeOFF.get()
floorspeedVAR=IntVar(value=0)
sliderd = Scale(root, from_=0, to=255, orient=HORIZONTAL,length=400.00, variable=floorspeedVAR, label='FLOOR SPEED:', command=floorspeedslider)
sliderd.grid(columnspan = 9,pady=5)
floorspeed=str(floorspeedVAR.get())
sizex=IntVar(value=25)
slidersizex = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=sizex, label='Xsize:', command=slidersize)
slidersizex.grid(columnspan = 10,pady=5)
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
sizey=IntVar(value=25)
slidersizey = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=sizey, label='Ysize:', command=slidersize)
slidersizey.grid(columnspan = 11,pady=5)
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
thresh=IntVar(value=70)
sliderthresh = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=thresh, label='THRESHOLD:', command=thresholdslider)
sliderthresh.grid(columnspan = 12,pady=5)
threshold=int(thresh.get())/100
checkinv=False
checkinvert=Checkbutton(root,text = 'Invert', command=inverttick, variable=checkinv)
checkinvert.grid(columnspan = 13)
#THREADS:
detectflag=True
arduino_connected=False
motor=motorclass()
tmotordetect=threading.Thread(target=motor.detect, args=())
tmotordetect.setDaemon(True)
tmotordetect.start()
#INITIALIZING:
pos=[-1,-1]
top=0
left=0
savelist=[]
loadlist=[]
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
arrbase=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
serialbaud=9600
arduino=None
keysetup(os.path.abspath(os.path.dirname(__file__)) + '/setup.txt') #assign keys from setup.txt
comportsetup() #list all available com ports
pygame.init()
hm = pyxhook.HookManager() # hooking keyboard
hm.KeyDown = onKeyDown
hm.HookKeyboard()
hm.start()
pygame.event.pump()
root.withdraw()
root.wm_attributes("-topmost", 1)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.title('CHM ' + version)
root.resizable(False,False)
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
root.tk.call('wm', 'iconphoto', root._w, imgicon)
except:
pass
screenwidth, screenheight = root.winfo_screenwidth(), root.winfo_screenheight()# get the screen resolution
match_window_open=False
stream_window_open=False
root.deiconify()
resetGUI()
ontop()
root.mainloop()
|
|
from django.db.utils import DatabaseError
try:
import thread
except ImportError:
import dummy_thread as thread
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils.timezone import is_aware
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = thread.get_ident()
self.allow_thread_sharing = allow_thread_sharing
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
# We will pass the next status (after leaving the previous state
# behind) to subclass hook.
self._leave_transaction_management(self.is_managed())
if self._dirty:
self.rollback()
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
self._dirty = False
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if (not self.allow_thread_sharing
and self._thread_ident != thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
# Note that this setting isn't documented, and is only used here, and
# in enter_transaction_management()
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_commit(sid)
@contextmanager
def constraint_checks_disabled(self):
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key constraint
checking.
"""
pass
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.
"""
pass
def close(self):
self.validate_thread_sharing()
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
self.connection.managed(True)
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
self.connection._dirty = False
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def cache_key_culling_sql(self):
"""
Returns a SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Backends can override this to return the column name of the primary key for the given table.
"""
raise NotImplementedError
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPT2Config,
T5Config,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPT2LMHeadModel,
TFRobertaForMaskedLM,
TFT5ForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPT2LMHeadModel,
RobertaForMaskedLM,
T5ForConditionalGeneration,
)
@is_pt_tf_cross_test
class TFPTAutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
import h5py
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModel.from_pretrained(model_name, from_pt=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertModel)
model = AutoModel.from_pretrained(model_name, from_tf=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
@slow
def test_model_for_pretraining_from_pretrained(self):
import h5py
self.assertTrue(h5py.version.hdf5_version.startswith("1.10"))
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForPreTraining.from_pretrained(model_name, from_pt=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForPreTraining)
model = AutoModelForPreTraining.from_pretrained(model_name, from_tf=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
@slow
def test_model_for_causal_lm(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = TFAutoModelForCausalLM.from_pretrained(model_name, from_pt=True)
model, loading_info = TFAutoModelForCausalLM.from_pretrained(
model_name, output_loading_info=True, from_pt=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFGPT2LMHeadModel)
model = AutoModelForCausalLM.from_pretrained(model_name, from_tf=True)
model, loading_info = AutoModelForCausalLM.from_pretrained(
model_name, output_loading_info=True, from_tf=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelWithLMHead.from_pretrained(model_name, from_pt=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
model = AutoModelWithLMHead.from_pretrained(model_name, from_tf=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_masked_lm(self):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForMaskedLM.from_pretrained(model_name, from_pt=True)
model, loading_info = TFAutoModelForMaskedLM.from_pretrained(
model_name, output_loading_info=True, from_pt=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForMaskedLM)
model = AutoModelForMaskedLM.from_pretrained(model_name, from_tf=True)
model, loading_info = AutoModelForMaskedLM.from_pretrained(
model_name, output_loading_info=True, from_tf=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, from_pt=True)
model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(
model_name, output_loading_info=True, from_pt=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFT5ForConditionalGeneration)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, from_tf=True)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(
model_name, output_loading_info=True, from_tf=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForSequenceClassification)
model = AutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = TFAutoModelForQuestionAnswering.from_pretrained(model_name, from_pt=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, TFBertForQuestionAnswering)
model = AutoModelForQuestionAnswering.from_pretrained(model_name, from_tf=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
def test_from_pretrained_identifier(self):
model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_pt=True)
self.assertIsInstance(model, TFBertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_tf=True)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER, from_pt=True)
self.assertIsInstance(model, TFRobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER, from_tf=True)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator related util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
def silly_example_function():
pass
class SillyCallableClass(object):
def __call__(self):
pass
class FnArgsTest(test.TestCase):
def test_simple_function(self):
def fn(a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(fn))
def test_callable(self):
class Foo(object):
def __call__(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo()))
def test_bound_method(self):
class Foo(object):
def bar(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo().bar))
def test_bound_method_no_self(self):
class Foo(object):
def bar(*args): # pylint:disable=no-method-argument
return args[1] + args[2]
self.assertEqual((), function_utils.fn_args(Foo().bar))
def test_partial_function(self):
expected_test_arg = 123
def fn(a, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg=123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
def test_partial_function_with_positional_args(self):
expected_test_arg = 123
def fn(test_arg, a):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
self.assertEqual(3, wrapped_fn(3))
self.assertEqual(3, wrapped_fn(a=3))
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(a, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
def test_double_partial_with_positional_args_in_outer_layer(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, a, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, 123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3)) # pylint: disable=no-value-for-parameter
self.assertEqual(3, double_wrapped_fn(a=3)) # pylint: disable=no-value-for-parameter
def test_double_partial_with_positional_args_in_both_layers(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, test_arg2, a):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123) # binds to test_arg1
double_wrapped_fn = functools.partial(wrapped_fn, 456) # binds to test_arg2
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3)) # pylint: disable=no-value-for-parameter
self.assertEqual(3, double_wrapped_fn(a=3)) # pylint: disable=no-value-for-parameter
class HasKwargsTest(test.TestCase):
def test_simple_function(self):
fn_has_kwargs = lambda **x: x
self.assertTrue(function_utils.has_kwargs(fn_has_kwargs))
fn_has_no_kwargs = lambda x: x
self.assertFalse(function_utils.has_kwargs(fn_has_no_kwargs))
def test_callable(self):
class FooHasKwargs(object):
def __call__(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs()))
class FooHasNoKwargs(object):
def __call__(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs()))
def test_bound_method(self):
class FooHasKwargs(object):
def fn(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs().fn))
class FooHasNoKwargs(object):
def fn(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs().fn))
def test_partial_function(self):
expected_test_arg = 123
def fn_has_kwargs(test_arg, **x):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg=123)
self.assertTrue(function_utils.has_kwargs(wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg=123)
self.assertFalse(function_utils.has_kwargs(wrapped_fn))
some_arg = 1
self.assertEqual(wrapped_fn(some_arg), some_arg)
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn_has_kwargs(test_arg1, test_arg2, **x):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertTrue(function_utils.has_kwargs(double_wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(double_wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertFalse(function_utils.has_kwargs(double_wrapped_fn))
some_arg = 1
self.assertEqual(double_wrapped_fn(some_arg), some_arg) # pylint: disable=no-value-for-parameter
def test_raises_type_error(self):
with self.assertRaisesRegex(TypeError,
'should be a callable'):
function_utils.has_kwargs('not a function')
class GetFuncNameTest(test.TestCase):
def testWithSimpleFunction(self):
self.assertEqual(
'silly_example_function',
function_utils.get_func_name(silly_example_function))
def testWithClassMethod(self):
self.assertEqual(
'GetFuncNameTest.testWithClassMethod',
function_utils.get_func_name(self.testWithClassMethod))
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
self.assertRegex(
function_utils.get_func_name(callable_instance),
'<.*SillyCallableClass.*>')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
self.assertRegex(
function_utils.get_func_name(partial), '<.*functools.partial.*>')
def testWithLambda(self):
anon_fn = lambda x: x
self.assertEqual('<lambda>', function_utils.get_func_name(anon_fn))
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_name(None)
class GetFuncCodeTest(test.TestCase):
def testWithSimpleFunction(self):
code = function_utils.get_func_code(silly_example_function)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithClassMethod(self):
code = function_utils.get_func_code(self.testWithClassMethod)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
code = function_utils.get_func_code(callable_instance)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithLambda(self):
anon_fn = lambda x: x
code = function_utils.get_func_code(anon_fn)
self.assertIsNotNone(code)
self.assertRegex(code.co_filename, 'function_utils_test.py')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
code = function_utils.get_func_code(partial)
self.assertIsNone(code)
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_code(None)
if __name__ == '__main__':
test.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ReplicationsOperations(object):
"""ReplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> "_models.Replication":
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Replication, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication, 'Replication')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication: The parameters for creating a replication.
:type replication: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication=replication,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication_update_parameters, 'ReplicationUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.ReplicationUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_06_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication_update_parameters=replication_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.ReplicationListResult"]:
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2021_06_01_preview.models.ReplicationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ReplicationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import logging
import os
import urllib
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from mcfw.utils import chunks
from rogerthat.bizz import channel
from rogerthat.bizz.profile import create_user_profile
from rogerthat.consts import DEBUG
from rogerthat.dal.mobile import get_user_active_mobiles_count
from rogerthat.dal.profile import get_profile_info, get_service_or_user_profile
from rogerthat.dal.service import get_service_identities_by_service_identity_users
from rogerthat.pages.legal import get_current_document_version, DOC_TERMS_SERVICE, DOC_TERMS
from rogerthat.restapi.roles import login_as
from rogerthat.rpc import users
from rogerthat.settings import get_server_settings
from rogerthat.utils.service import create_service_identity_user
class CrossDomainDotXml(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write("""<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="*" />
</cross-domain-policy>
""")
class RobotsTxt(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""User-agent: *
Disallow:
""")
class MainPage(webapp.RequestHandler):
def head(self):
pass
def get_request_host(self):
host = os.environ.get('HTTP_X_FORWARDED_HOST')
if not host:
host = os.environ.get('HTTP_HOST')
return host
def get_custom_signin_path(self, host):
settings = get_server_settings()
paths = settings.customSigninPaths
mapping = dict((h, p) for h, p in chunks(paths, 2))
return mapping.get(host)
def get(self):
user = users.get_current_user()
if not user:
signin_path = self.get_custom_signin_path(self.get_request_host())
if signin_path and self.request.path != signin_path:
return self.redirect(str(signin_path))
owning_user_services = None
should_show_service_picker = False
session_user = None
session_service_identity_user = None
loading_enabled = False
if user:
session_ = users.get_current_session()
# Support reloading
user_account = self.request.get("user")
if user_account and user_account != user.email():
if not login_as(user_account):
self.response.write("<html><body>You don't have access to account %s. This problem is logged and may cause legal action against you.</body></html>" % user_account)
return
user = users.get_current_user()
session_ = users.get_current_session()
mobile_count = get_user_active_mobiles_count(user)
my_profile_info = get_profile_info(user, skip_warning=True)
if not my_profile_info:
my_profile_info = create_user_profile(user, user.email()[:40]) # todo communities set community_id
myavatarid = my_profile_info.avatarId
if my_profile_info.isServiceIdentity:
myname = my_profile_info.name or my_profile_info.qualifiedIdentifier or user.email()
else:
myname = my_profile_info.name or user.email()
if my_profile_info.owningServiceEmails and my_profile_info.isCreatedForService:
should_show_service_picker = True
my_owning_service_identity_users = [create_service_identity_user(users.User(owning_service_email)) for owning_service_email in my_profile_info.owningServiceEmails]
my_owning_service_identities = get_service_identities_by_service_identity_users(my_owning_service_identity_users)
result = list()
for si in my_owning_service_identities:
result.append(dict(is_default=si.is_default,
service_user=si.service_user.email(),
service_identity_user=si.service_identity_user.email(),
name=si.name,
description=si.description,
avatar_url=si.avatarUrl))
owning_user_services = result
myname = myname.replace("\\", "\\\\").replace("'", "\\'")
is_service = my_profile_info.isServiceIdentity
loading_enabled = not is_service
user_services = session_.service_users
session_user = session_.user
session_service_identity_user = session_.service_identity_user
else:
mobile_count = 0
myavatarid = None
myname = None
is_service = False
user_services = None
owning_user_services = None
template_params = {
'continue': "/",
'debug': DEBUG,
'user': user,
'myavatarid': myavatarid,
'myname': myname,
'mobile_count': mobile_count,
'is_service': is_service,
'session': users.create_logout_url("/") if user else users.create_login_url("/"),
"loading_enabled": loading_enabled,
'user_services': user_services,
'owning_user_services': owning_user_services,
'session_user': session_user,
'session_service_identity_user': session_service_identity_user,
'service_profile': None,
'email': self.request.get("email", None)}
channel.append_firebase_params(template_params)
if user:
profile = get_service_or_user_profile(user)
if is_service:
if profile.tos_version != get_current_document_version(DOC_TERMS_SERVICE) and not profile.solution:
logging.info('Redirecting to service terms and conditions page')
self.redirect('/terms-and-conditions')
return
elif profile.tos_version != get_current_document_version(DOC_TERMS) and not profile.isCreatedForService:
logging.info('Redirecting to user terms and conditions page')
self.redirect('/terms-and-conditions')
return
else:
profile = None
if is_service:
service_profile = profile
template_params['service_profile'] = service_profile
if not self.request.get('sp') and service_profile.solution:
params = self.request.GET
redirect_url = '/%s/' % service_profile.solution
if params:
params = dict((k, v.decode('utf8')) for k, v in params.iteritems())
redirect_url = "%s?%s" % (redirect_url, urllib.urlencode(params))
logging.info("Redirecting to url: %s" % redirect_url)
self.redirect(redirect_url)
return
if user:
if should_show_service_picker:
page = "pick_account.html"
else:
page = "main.html"
elif self.request.get("sp", None):
template_params["bg_image_uri"] = _get_front_page_image_by_ip(os.environ.get('HTTP_X_FORWARDED_FOR', None))
page = 'main_unauthenticated.html'
else:
self.redirect('/customers/signin')
return
path = os.path.join(os.path.dirname(__file__), page)
self.response.out.write(template.render(path, template_params))
FRONT_PAGE_IMAGES = [([41, 189, 192, 0], [41, 189, 223, 255], "/static/images/bg-image-cd.jpg"),
([41, 243, 0, 0], [41, 243, 255, 255], "/static/images/bg-image-cd.jpg"),
([197, 189, 0, 0], [197, 189, 127, 255], "/static/images/bg-image-cd.jpg")]
def _get_front_page_image_by_ip(ip_addresses):
if ip_addresses:
exceptions = list()
splitted = ip_addresses.split(',')
for ip_address in splitted:
try:
ip_parts = [int(part) for part in ip_address.strip().split(".")]
for from_ip, to_ip, url in FRONT_PAGE_IMAGES:
if from_ip <= ip_parts <= to_ip:
return url
except Exception, e:
logging.debug("Could not determine background image for IP '%s'.", ip_address, exc_info=True)
exceptions.append(e)
if splitted and len(splitted) == len(exceptions):
logging.warn("Could not determine background image for IP '%s'. Showing the default background image.",
ip_addresses)
return "/static/images/bg-image.jpg"
|
|
import windows.security
from windows.security import SecurityDescriptor
from .pfwtest import *
import ctypes
# CC -> Create-Child -> 1
# GR -> Generic read -> 0x80000000L
# AN -> Anonymous -> S-1-5-7
TEST_SDDL = [
"O:ANG:AND:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)(D;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)",
"O:ANG:AND:(A;;GR;;;S-1-0-0)",
"O:ANG:AND:(OA;;CC;;00000042-0043-0044-0045-000000000000;S-1-0-0)",
"O:ANG:AND:(OA;;CCGR;00004242-0043-0044-0045-000000000000;00000042-0043-0044-0045-000000000000;S-1-0-0)",
]
@pytest.mark.parametrize("sddl", TEST_SDDL)
def test_security_descriptor_from_string(sddl):
sd = SecurityDescriptor.from_string(sddl)
TEST_BIN_SDDL = [
# TapiSrv security descriptor
b'\x01\x00\x14\x80\x8c\x00\x00\x00\x98\x00\x00\x00\x14\x00\x00\x000\x00\x00\x00\x02\x00\x1c\x00\x01\x00\x00\x00\x02\x80\x14\x00\xff\x01\x0f\x00\x01\x01\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x02\x00\\\x00\x04\x00\x00\x00\x00\x00\x14\x00\xfd\x01\x02\x00\x01\x01\x00\x00\x00\x00\x00\x05\x12\x00\x00\x00\x00\x00\x18\x00\xff\x01\x0f\x00\x01\x02\x00\x00\x00\x00\x00\x05 \x00\x00\x00 \x02\x00\x00\x00\x00\x14\x00\x9d\x01\x02\x00\x01\x01\x00\x00\x00\x00\x00\x05\x04\x00\x00\x00\x00\x00\x14\x00\x9d\x01\x02\x00\x01\x01\x00\x00\x00\x00\x00\x05\x06\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x05\x12\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x05\x12\x00\x00\x00',
]
@pytest.mark.parametrize("binsd", TEST_BIN_SDDL)
def test_security_descriptor_from_binary(binsd):
sd = SecurityDescriptor.from_binary(binsd)
def test_empty_security_descriptor():
esd = SecurityDescriptor.from_string("")
assert esd.owner is None # Should NOT be NULL PSID but None
assert esd.group is None # Should NOT be NULL PSID but None
assert esd.dacl is None
assert esd.sacl is None
def test_security_descriptor__str__():
sddl = "D:(A;;FA;;;WD)"
sd = SecurityDescriptor.from_string(sddl)
assert str(sd) == sddl
def test_pacl_object():
SDDL = "O:ANG:S-1-2-3D:(A;;;;;S-1-42-42)(A;;;;;S-1-42-43)(A;;;;;S-1-42-44)"
dacl = SecurityDescriptor.from_string(SDDL).dacl
assert dacl is not None
assert len(dacl) == 3 # __len__
assert len(list(dacl)) == 3 # __iter__
assert len(dacl.aces) == 3
assert ctypes.addressof(dacl[0]) == ctypes.addressof(dacl[0]) # __getitem__
assert len([ctypes.addressof(dacl[i])for i in range(3)]) == 3
with pytest.raises(IndexError):
x = dacl[3]
def test_sec_descrip_owner_group():
SDDL = "O:ANG:S-1-2-3"
sd = SecurityDescriptor.from_string(SDDL)
assert sd.owner.to_string() == "S-1-5-7"
assert sd.group.to_string() == "S-1-2-3"
assert sd.dacl is None
assert sd.sacl is None
def test_mask_sid_ace():
SDDL = "D:(A;CIOI;CCGR;;;S-1-42-42)"
# OBJECT_INHERIT_ACE(0x1L) | CONTAINER_INHERIT_ACE(0x2L)
# Create-Child | GENERIC_READ(0x80000000L)
sd = SecurityDescriptor.from_string(SDDL)
dacl = sd.dacl
assert dacl is not None
ace = dacl[0]
# Test the ACE
assert ace.Header.AceType == gdef.ACCESS_ALLOWED_ACE_TYPE
# flags + flags split
assert ace.Header.AceFlags == gdef.OBJECT_INHERIT_ACE | gdef.CONTAINER_INHERIT_ACE
assert set(ace.Header.flags) == {gdef.OBJECT_INHERIT_ACE, gdef.CONTAINER_INHERIT_ACE}
# mask + mask split
assert ace.Mask == 1 | gdef.GENERIC_READ
assert set(ace.mask) == {1, gdef.GENERIC_READ}
# SID
assert ace.sid.to_string() == "S-1-42-42"
SGUID = gdef.GUID.from_string
COMPLEXE_SDDL_GUID = [
("D:(OA;;;00000042-0043-0044-0045-000000000001;;S-1-0-0)",
SGUID("00000042-0043-0044-0045-000000000001"),
None),
("D:(OA;;;;00000042-0043-0044-0045-000000000000;S-1-0-0)",
None,
SGUID("00000042-0043-0044-0045-000000000000")),
("D:(OA;;;00000042-0043-0044-0045-000000000002;00000042-0043-0044-0045-000000000003;S-1-0-0)",
SGUID("00000042-0043-0044-0045-000000000002"),
SGUID("00000042-0043-0044-0045-000000000003")),
("D:(OA;;;;;S-1-0-0)",
None,
None),
]
@pytest.mark.parametrize("sddl, obj_guid, inherited_object_guid", COMPLEXE_SDDL_GUID)
def test_complex_ace_guid_sid(sddl, obj_guid, inherited_object_guid):
sd = SecurityDescriptor.from_string(sddl)
assert sd.dacl is not None
ace = sd.dacl[0]
assert ace.sid.to_string() == "S-1-0-0"
if obj_guid is None and inherited_object_guid is None:
# No GUID -> transformed in ACCESS_ALLOWED_ACE_TYPE
assert ace.Header.AceType == gdef.ACCESS_ALLOWED_ACE_TYPE
return
assert ace.object_type == obj_guid
assert ace.inherited_object_type == inherited_object_guid
ALL_DACL_ACE_TYPES = [
("D:(A;;;;;S-1-2-3)", gdef.ACCESS_ALLOWED_ACE_TYPE),
("D:(D;;;;;S-1-2-3)", gdef.ACCESS_DENIED_ACE_TYPE),
("D:(OA;;;;00000042-0043-0044-0045-000000000000;S-1-0-0)",
gdef.ACCESS_ALLOWED_OBJECT_ACE_TYPE),
("D:(OD;;;;00000042-0043-0044-0045-000000000001;S-1-0-0)",
gdef.ACCESS_DENIED_OBJECT_ACE_TYPE),
("D:AI(XA;;GR;;;WD;(YOLO))", gdef.ACCESS_ALLOWED_CALLBACK_ACE_TYPE),
("D:AI(XD;;GR;;;WD;(YOLO))", gdef.ACCESS_DENIED_CALLBACK_ACE_TYPE),
("D:AI(ZA;;GR;;00000042-0043-0044-0045-000000000001;WD;(YOLO))", gdef.ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE),
# NO SDDL DEFINE FOR : gdef.ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE)
]
@pytest.mark.parametrize("sddl, ace_type", ALL_DACL_ACE_TYPES)
def test_ace_dacl_subclass(sddl, ace_type):
sd = SecurityDescriptor.from_string(sddl)
dacl = sd.dacl
assert len(dacl) == 1
ace = dacl[0] # Will raise if AceHeader is not handled
assert ace.Header.AceType == ace_type
# SACL STUFF
ALL_SACL_ACE_TYPES = [
("S:(AU;;;;;AN)", gdef.SYSTEM_AUDIT_ACE_TYPE),
("S:(ML;;;;;S-1-16-4000)", gdef.SYSTEM_MANDATORY_LABEL_ACE_TYPE),
# S-1-19-512-4096 what retrieved in a ACE from a directory in C:\Program Files\WindowsApps\
("S:(TL;;;;;S-1-19-512-4096)", gdef.SYSTEM_PROCESS_TRUST_LABEL_ACE_TYPE),
("S:(SP;;;;;S-1-17-1)", gdef.SYSTEM_SCOPED_POLICY_ID_ACE_TYPE),
("S:(OU;;;;00000042-0043-0044-0045-000000000000;AN)", gdef.SYSTEM_AUDIT_OBJECT_ACE_TYPE),
("S:(XU;;;;;S-1-2-3;(YOLO))", gdef.SYSTEM_AUDIT_CALLBACK_ACE_TYPE),
## Reserved for futur use (RFS): not handled by ADVAPI.dll
# ("S:(AL;;;;;S-1-2-3)", gdef.SYSTEM_ALARM_OBJECT_ACE_TYPE),
#("S:(OL;;;;00000042-0043-0044-0045-000000000000;AN)", gdef.SYSTEM_ALARM_OBJECT_ACE_TYPE),
## NO SDDL FOR:
# SYSTEM_ALARM_CALLBACK_ACE_TYPE
# SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE
# SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
]
@pytest.mark.parametrize("sddl, ace_type", ALL_SACL_ACE_TYPES)
def test_ace_sacl_subclass(sddl, ace_type):
sd = SecurityDescriptor.from_string(sddl)
sacl = sd.sacl
assert len(sacl) == 1
ace = sacl[0] # Will raise if AceHeader is not handled
assert ace.Header.AceType == ace_type
RESOURCE_ATTRIBUTES_SDDLS = [
("""S:(RA;;;;;WD; ("TestName",TI,0,-2, -1, 0, 1, 2))""",
(-2, -1, 0, 1, 2 )),
("""S:(RA;;;;;WD; ("TestName",TU,0,3,4,42))""",
(3, 4, 42)),
("""S:(RA;;;;;WD; ("TestName",TS,0,"Windows","SQL", ""))""",
("Windows", "SQL", "")),
("""S:(RA;;;;;WD; ("TestName",TD,0, AN, S-1-2-3-4-5-6-7-8-9))""",
(gdef.PSID.from_string("S-1-5-7"),
gdef.PSID.from_string("S-1-2-3-4-5-6-7-8-9"))),
("""S:(RA;;;;;WD; ("TestName",TX,0, 42000042, 0123456789abcdef))""",
(b"B\x00\x00B", b"\x01\x23\x45\x67\x89\xab\xcd\xef")),
("""S:(RA;;;;;WD; ("TestName",TB,0, 0, 1, 0, 0, 1))""",
(False, True, False, False, True)),
]
@pytest.mark.parametrize("sddl, expected_values", RESOURCE_ATTRIBUTES_SDDLS)
def test_ace_resource_attribute(sddl, expected_values):
sd = SecurityDescriptor.from_string(sddl)
ra = sd.sacl[0]
assert ra.Header.AceType == gdef.SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE
attr = ra.attribute
assert attr.name == "TestName"
assert attr.values == expected_values
CONDITIONAL_SDDLS = [
("D:AI(XA;;GR;;;WD;(ATTR1))", b"ATTR1"),
("D:AI(XD;;GR;;;WD;(ATTR2))", b"ATTR2"),
("S:AI(XU;;GR;;;WD;(ATTR3))", b"ATTR3")
]
@pytest.mark.parametrize("sddl, expected_value", CONDITIONAL_SDDLS)
def test_conditional_ace_applicationdata(sddl, expected_value):
sd = SecurityDescriptor.from_string(sddl)
acl = sd.dacl
if acl is None:
acl = sd.sacl
ace = acl[0]
appdata = ace.application_data
# https://msdn.microsoft.com/en-us/library/hh877860.aspx
assert appdata.startswith(b"artx")
assert expected_value in appdata.replace(b"\x00", b"")
|
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.preprocessing import LabelEncoder
from .print_message import PrintMessageMixin
class Visualizer(PrintMessageMixin):
"""
Provides a number of descriptive functions for creating useful visualizations.
Initialize the class by passing in a data set and then call the functions
individually to create the plots. Each method is designed to adapt the character of
the visualization based on the inputs provided.
Parameters
----------
data : array-like
The data set to use to create visualizations.
fig_size : int, optional, default 16
Size of the plots.
verbose : boolean, optional, default True
If true, messages will be written to the console.
logger : object, optional, default None
An instantiated log writer with an open file handle. If provided, messages
will be written to the log file.
"""
def __init__(self, data, fig_size=16, verbose=True, logger=None):
PrintMessageMixin.__init__(self, verbose, logger)
self.data = data
self.fig_size = fig_size
def feature_distributions(self, viz_type='hist', bins=None, max_features=None, grid_size=4):
"""
Generates feature distribution plots (histogram or kde) for each feature.
Parameters
----------
viz_type : {'hist', 'kde', 'both'}, optional, default 'hist'
Type of plot used for visualization.
bins : int, optional, default None
Number of bins to use in histogram plots.
max_features : int, optional, default None
The maximum number of columns in the data to plot.
grid_size : int, optional, default 4
Number of vertical/horizontal plots to display in a single window.
"""
self.print_message('Generating feature distribution plots...')
if viz_type == 'hist':
hist = True
kde = False
elif viz_type == 'kde':
hist = False
kde = True
elif viz_type == 'both':
hist = True
kde = True
else:
raise Exception('Visualization type not supported.')
data = self.data.fillna(0)
if max_features:
data = data.iloc[:, :max_features]
n_features = len(data.columns)
plot_size = grid_size ** 2
n_plots = n_features // plot_size if n_features % plot_size == 0 else n_features // plot_size + 1
for i in range(n_plots):
fig, ax = plt.subplots(grid_size, grid_size, figsize=(self.fig_size, self.fig_size / 2))
for j in range(plot_size):
index = (i * plot_size) + j
if index < n_features:
if type(data.iloc[0, index]) is str:
sb.countplot(x=data.columns[index], data=data,
ax=ax[j // grid_size, j % grid_size])
else:
sb.distplot(a=data.iloc[:, index], bins=bins, hist=hist, kde=kde,
label=data.columns[index], ax=ax[j // grid_size, j % grid_size],
kde_kws={"shade": True})
fig.tight_layout()
self.print_message('Plot generation complete.')
def feature_correlations(self, color_palette='coolwarm', max_features=None, annotate=False):
"""
Generates a correlation matrix heat map.
Parameters
----------
color_palette : string, optional, default 'coolwarm'
Seaborn color palette.
max_features : int, optional, default None
The maximum number of columns in the data to plot.
annotate : boolean, optional, default False
Annotate the heat map with labels.
"""
self.print_message('Generating feature correlations plot...')
if max_features:
corr = self.data.iloc[:, :max_features].corr()
else:
corr = self.data.corr()
if annotate:
corr = np.round(corr, 2)
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=(self.fig_size, self.fig_size * 3 / 4))
colormap = sb.blend_palette(sb.color_palette(color_palette), as_cmap=True)
sb.heatmap(corr, mask=mask, cmap=colormap, annot=annotate)
fig.tight_layout()
self.print_message('Plot generation complete.')
def variable_relationship(self, quantitative_vars, category_vars=None,
joint_viz_type='scatter', pair_viz_type='scatter',
factor_viz_type='strip', pair_diag_type='kde'):
"""
Generates plots showing the relationship between several variables. The
combination of plots generated depends on the number of quantitative and discrete
(categorical or ordinal) variables to be analyzed. Plots are rendered using the
seaborn statistical visualization package.
Parameters
----------
quantitative_vars : array-like
List of variable names to analyze quantitatively.
category_vars : array-like, optional, default None
List of variable names to analyze discretely.
joint_viz_type : {'scatter', 'reg', 'resid', 'kde', 'hex'},
optional, default 'scatter'
Method to use to display two quantitative variables together.
pair_viz_type : {'scatter', 'reg'}, optional, default 'scatter'
Method to use to display more than two quantitative variables together.
factor_viz_type : {'point', 'bar', 'count', 'box', 'violin', 'strip'},
optional, default 'strip'
Method to use to display one quantitative variable along with categorical
variables.
pair_diag_type : {'hist', 'kde'}, optional, default 'kde'
Display type for the diagonal plots in a pair plot.
"""
self.print_message('Generating variable relationship plots...')
if quantitative_vars is None or len(quantitative_vars) == 0:
raise Exception('Must provide at least one quantitative variable.')
if joint_viz_type not in ['scatter', 'reg', 'resid', 'kde', 'hex']:
raise Exception('Invalid value for joint_viz_type.')
if pair_viz_type not in ['scatter', 'reg']:
raise Exception('Invalid value for pair_viz_type.')
if factor_viz_type not in ['point', 'bar', 'count', 'box', 'violin', 'strip']:
raise Exception('Invalid value for factor_viz_type.')
if pair_diag_type not in ['hist', 'kde']:
raise Exception('Invalid value for pair_diag_type.')
sub_data = self.data[quantitative_vars]
fig, ax = plt.subplots(1, 1, figsize=(self.fig_size, self.fig_size * 3 / 4))
sb.violinplot(data=sub_data, ax=ax)
fig.tight_layout()
if category_vars is not None:
fig, ax = plt.subplots(len(quantitative_vars), len(category_vars),
figsize=(self.fig_size, self.fig_size * 3 / 4))
if len(quantitative_vars) == 1:
if len(category_vars) == 1:
sb.violinplot(x=quantitative_vars[0], y=category_vars[0], data=self.data, ax=ax)
else:
for i, cat in enumerate(category_vars):
sb.violinplot(x=quantitative_vars[0], y=cat, data=self.data, ax=ax[i])
else:
for i, var in enumerate(quantitative_vars):
if len(category_vars) == 1:
sb.violinplot(x=var, y=category_vars[0], data=self.data, ax=ax[i])
else:
for j, cat in enumerate(category_vars):
sb.violinplot(x=var, y=cat, data=self.data, ax=ax[i, j])
fig.tight_layout()
if category_vars is None:
if len(quantitative_vars) == 2:
sb.jointplot(x=quantitative_vars[0], y=quantitative_vars[1], data=self.data,
kind=joint_viz_type, size=self.fig_size)
else:
sb.pairplot(data=self.data, vars=quantitative_vars, kind=pair_viz_type,
diag_kind=pair_diag_type, size=self.fig_size / len(quantitative_vars))
else:
if len(quantitative_vars) == 1:
if len(category_vars) == 1:
sb.factorplot(x=category_vars[0], y=quantitative_vars[0],
data=self.data, kind=factor_viz_type, size=self.fig_size)
else:
sb.factorplot(x=category_vars[0], y=quantitative_vars[0], hue=category_vars[1],
data=self.data, kind=factor_viz_type, size=self.fig_size)
elif len(quantitative_vars) == 2:
if len(category_vars) == 1:
sb.lmplot(x=quantitative_vars[0], y=quantitative_vars[1],
data=self.data, row=category_vars[0], size=self.fig_size)
else:
sb.lmplot(x=quantitative_vars[0], y=quantitative_vars[1], data=self.data,
col=category_vars[0], row=category_vars[1], size=self.fig_size)
else:
sb.pairplot(data=self.data, hue=category_vars[0], vars=quantitative_vars,
kind=pair_viz_type, diag_kind=pair_diag_type,
size=self.fig_size / len(quantitative_vars))
self.print_message('Plot generation complete.')
def sequential_relationships(self, time='index', smooth_method=None, window=1, grid_size=4):
"""
Generates line plots to visualize sequential data.
Parameters
----------
time : string, optional, default 'index'
Datetime input column to use for visualization.
smooth_method : {'mean', 'var', 'skew', 'kurt', None}, optional, default None
Apply a function to the time series to smooth out variations.
window : int, optional, default 1
Size of the moving window used to calculate the smoothing function.
grid_size : int, optional, default 4
Number of vertical/horizontal plots to display in a single window.
"""
self.print_message('Generating sequential relationship plots...')
if smooth_method not in ['mean', 'var', 'skew', 'kurt', None]:
raise Exception('Invalid value for smooth_method.')
data = self.data.fillna(0)
if time is not 'index':
data = data.reset_index()
data = data.set_index(time)
data.index.name = None
n_features = len(data.columns)
plot_size = grid_size ** 2
n_plots = n_features // plot_size if n_features % plot_size == 0 else n_features // plot_size + 1
for i in range(n_plots):
fig, ax = plt.subplots(grid_size, grid_size, sharex=True,
figsize=(self.fig_size, self.fig_size / 2))
for j in range(plot_size):
index = (i * plot_size) + j
if index < n_features:
if type(data.iloc[0, index]) is not str:
if smooth_method == 'mean':
data.iloc[:, index] = pd.rolling_mean(data.iloc[:, index], window)
elif smooth_method == 'var':
data.iloc[:, index] = pd.rolling_var(data.iloc[:, index], window)
elif smooth_method == 'skew':
data.iloc[:, index] = pd.rolling_skew(data.iloc[:, index], window)
elif smooth_method == 'kurt':
data.iloc[:, index] = pd.rolling_kurt(data.iloc[:, index], window)
data.iloc[:, index].plot(ax=ax[j // grid_size, j % grid_size], kind='line',
legend=False, title=data.columns[index])
fig.tight_layout()
self.print_message('Plot generation complete.')
def transform(self, transform, X_columns, y_column=None, task=None,
n_components=2, scatter_size=50):
"""
Generates plots to visualize the data transformed by a linear or manifold
algorithm.
Parameters
----------
transform : array-like
Transform object. Can be a pipeline with multiple transforms.
X_columns : list
List of columns to use to fit the transform.
y_column : string, optional, default None
Target column. Used to color input values for label-based visualizations.
task : {'classification', 'regression', None}, optional, default None
Specifies if the data set is being used for classification or regression.
If one of these is specified, the plots will color input values using the
provided labels.
n_components : int, optional, default 2
Number of components of the transformed data set to visualize.
scatter_size : int, optional, default 50
Size of the points on the scatter plot.
"""
self.print_message('Generating transform plot...')
if task not in ['classification', 'regression', None]:
raise Exception('Invalid value for task.')
if hasattr(transform, 'n_components'):
transform.n_components = n_components
X = self.data[X_columns].values
X = transform.fit_transform(X)
y = None
encoder = None
if y_column:
y = self.data[y_column].values
if task == 'classification':
encoder = LabelEncoder()
y = encoder.fit_transform(y)
if y_column and task == 'classification':
class_count = len(np.unique(y))
colors = sb.color_palette('hls', class_count)
for i in range(n_components - 1):
fig, ax = plt.subplots(figsize=(self.fig_size, self.fig_size * 3 / 4))
for j in range(class_count):
ax.scatter(X[y == j, i], X[y == j, i + 1], s=scatter_size, c=colors[j],
label=encoder.classes_[j])
ax.set_title('Components ' + str(i + 1) + ' and ' + str(i + 2))
ax.legend()
fig.tight_layout()
elif y_column and task == 'regression':
for i in range(n_components - 1):
fig, ax = plt.subplots(figsize=(self.fig_size, self.fig_size * 3 / 4))
sc = ax.scatter(X[:, i], X[:, i + 1], s=scatter_size, c=y, cmap='Blues')
ax.set_title('Components ' + str(i + 1) + ' and ' + str(i + 2))
ax.legend()
fig.colorbar(sc)
fig.tight_layout()
else:
for i in range(n_components - 1):
fig, ax = plt.subplots(figsize=(self.fig_size, self.fig_size * 3 / 4))
ax.scatter(X[:, i], X[:, i + 1], s=scatter_size, label='None')
ax.set_title('Components ' + str(i + 1) + ' and ' + str(i + 2))
ax.legend()
fig.tight_layout()
self.print_message('Plot generation complete.')
def feature_importance(self, X_columns, y_column, average=False,
task='classification', **kwargs):
"""
Visualize the predictive importance of each feature in a data set using a trained
gradient boosting model.
Parameters
----------
X_columns : list
List of columns to use to fit the transform.
y_column : string, optional, default None
Target column. Used to color input values for label-based visualizations.
average : boolean, optional, default False
Smooth the results by fitting the model multiple times to reduce random
variance.
task : {'classification', 'regression'}, optional, default 'classification'
Specifies if the target is continuous or categorical.
**kwargs : dict, optional
Arguments to pass to the scikit-learn gradient boosting model to improve the
quality of the fit. If none are provided then the defaults will be used.
"""
self.print_message('Generating feature importance plot...')
if task not in ['classification', 'regression']:
raise Exception('Invalid value for task.')
X = self.data[X_columns]
y = self.data[y_column]
if task == 'classification':
model = GradientBoostingClassifier(**kwargs)
else:
model = GradientBoostingRegressor(**kwargs)
if average:
feature_importance = np.ones((1, X.shape[1]))
for i in range(10):
model.fit(X, y)
temp = model.feature_importances_.reshape(1, -1)
feature_importance = np.append(feature_importance, temp, axis=0)
feature_importance = feature_importance[1:, :].mean(axis=0).reshape(1, -1)
else:
model.fit(X, y)
feature_importance = model.feature_importances_.reshape(1, -1)
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance.ravel())
pos = np.arange(sorted_idx.shape[0])
fig, ax = plt.subplots(figsize=(self.fig_size, self.fig_size * 3 / 4))
ax.set_title('Variable Importance')
ax.barh(pos, feature_importance.ravel()[sorted_idx], align='center')
ax.set_yticks(pos)
ax.set_yticklabels([X_columns[i] for i in sorted_idx])
ax.set_xlabel('Relative Importance')
fig.tight_layout()
self.print_message('Plot generation complete.')
def partial_dependence(self, X_columns, y_column, var_column, average=False,
task='classification', grid_resolution=100, **kwargs):
"""
Visualize the marginal effect of a single variable on a dependent variable,
holding all other variables constant. Generated via a trained gradient
boosting model.
Parameters
----------
X_columns : list
List of columns to use to fit the transform.
y_column : string, optional, default None
Target column. Used to color input values for label-based visualizations.
var_column : string
The name of the variable to compare to the response value.
average : boolean, optional, default False
Smooth the results by fitting the model multiple times to reduce random
variance.
task : {'classification', 'regression'}, optional, default 'classification'
Specifies if the target is continuous or categorical.
grid_resolution : int, optional, default 100
Defines the granularity of the segments in the plot.
**kwargs : dict, optional
Arguments to pass to the scikit-learn gradient boosting model to improve the
quality of the fit. If none are provided then the defaults will be used.
"""
self.print_message('Generating partial dependence plot...')
if task not in ['classification', 'regression']:
raise Exception('Invalid value for task.')
X = self.data[X_columns]
y = self.data[y_column]
index = X_columns.index(var_column)
distinct = len(np.unique(self.data[var_column]))
if distinct < grid_resolution:
grid_resolution = distinct
if task == 'classification':
model = GradientBoostingClassifier(**kwargs)
else:
model = GradientBoostingRegressor(**kwargs)
if average:
response = np.ones((1, grid_resolution))
axes = np.ones((1, grid_resolution))
for i in range(10):
model.fit(X, y)
a, b = partial_dependence(model, [index], X=X, grid_resolution=grid_resolution)
response = np.append(response, a, axis=0)
axes = np.append(axes, b[0].reshape((1, grid_resolution)), axis=0)
response = response[1:, :].mean(axis=0).reshape((grid_resolution, 1))
axes = axes[1:, :].mean(axis=0).reshape((grid_resolution, 1))
else:
model.fit(X, y)
response, axes = partial_dependence(model, [index], X=X, grid_resolution=grid_resolution)
response = response.reshape((grid_resolution, 1))
axes = axes[0].reshape((grid_resolution, 1))
df = pd.DataFrame(np.append(axes, response, axis=1), columns=[var_column, y_column])
df.plot(x=var_column, y=y_column, kind='line', figsize=(self.fig_size, self.fig_size * 3 / 4))
self.print_message('Plot generation complete.')
|
|
r"""
Study and StudyPerson objects (:mod:`qiita_db.study`)
=====================================================
.. currentmodule:: qiita_db.study
This module provides the implementation of the Study and StudyPerson classes.
The study class allows access to all basic information including name and
pmids associated with the study, as well as returning ids for the data,
sample template, owner, and shared users. It is the central hub for creating,
deleting, and accessing a study in the database.
Contacts are taken care of by the StudyPerson class. This holds the contact's
name, email, address, and phone of the various persons in a study, e.g. The PI
or lab contact.
Classes
-------
.. autosummary::
:toctree: generated/
Study
StudyPerson
Examples
--------
Studies contain contact people (PIs, Lab members, and EBI contacts). These
people have names, emails, addresses, and phone numbers. The email and name are
the minimum required information.
>>> from qiita_db.study import StudyPerson # doctest: +SKIP
>>> person = StudyPerson.create('Some Dude', '[email protected]',
... address='111 fake street',
... phone='111-121-1313') # doctest: +SKIP
>>> person.name # doctest: +SKIP
Some dude
>>> person.email # doctest: +SKIP
somedude@foobar
>>> person.address # doctest: +SKIP
111 fake street
>>> person.phone # doctest: +SKIP
111-121-1313
A study requres a minimum of information to be created. Note that the people
must be passed as StudyPerson objects and the owner as a User object.
>>> from qiita_db.study import Study # doctest: +SKIP
>>> from qiita_db.user import User # doctest: +SKIP
>>> info = {
... "timeseries_type_id": 1,
... "metadata_complete": True,
... "mixs_compliant": True,
... "number_samples_collected": 25,
... "number_samples_promised": 28,
... "portal_type_id": 3,
... "study_alias": "TST",
... "study_description": "Some description of the study goes here",
... "study_abstract": "Some abstract goes here",
... "emp_person_id": StudyPerson(2),
... "principal_investigator_id": StudyPerson(3),
... "lab_person_id": StudyPerson(1)} # doctest: +SKIP
>>> owner = User('[email protected]') # doctest: +SKIP
>>> Study(owner, "New Study Title", 1, info) # doctest: +SKIP
You can also add a study to an investigation by passing the investigation
object while creating the study.
>>> from qiita_db.study import Study # doctest: +SKIP
>>> from qiita_db.user import User # doctest: +SKIP
>>> from qiita_db.study import Investigation # doctest: +SKIP
>>> info = {
... "timeseries_type_id": 1,
... "metadata_complete": True,
... "mixs_compliant": True,
... "number_samples_collected": 25,
... "number_samples_promised": 28,
... "portal_type_id": 3,
... "study_alias": "TST",
... "study_description": "Some description of the study goes here",
... "study_abstract": "Some abstract goes here",
... "emp_person_id": StudyPerson(2),
... "principal_investigator_id": StudyPerson(3),
... "lab_person_id": StudyPerson(1)} # doctest: +SKIP
>>> owner = User('[email protected]') # doctest: +SKIP
>>> investigation = Investigation(1) # doctest: +SKIP
>>> Study(owner, "New Study Title", 1, info, investigation) # doctest: +SKIP
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from future.utils import viewitems
from copy import deepcopy
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from .base import QiitaStatusObject, QiitaObject
from .exceptions import (QiitaDBStatusError, QiitaDBColumnError, QiitaDBError)
from .util import (check_required_columns, check_table_cols, convert_to_id,
get_environmental_packages)
from .sql_connection import SQLConnectionHandler
class Study(QiitaStatusObject):
r"""Study object to access to the Qiita Study information
Attributes
----------
data_types
efo
info
investigation
name
pmids
shared_with
sample_template
status
title
owner
Methods
-------
raw_data
preprocessed_data
processed_data
add_pmid
exists
has_access
share
unshare
Notes
-----
All setters raise QiitaDBStatusError if trying to change a public study.
You should not be doing that.
"""
_table = "study"
# The following columns are considered not part of the study info
_non_info = {"email", "study_status_id", "study_title"}
def _lock_non_sandbox(self, conn_handler):
"""Raises QiitaDBStatusError if study is non-sandboxed"""
if self.status != 'sandbox':
raise QiitaDBStatusError("Illegal operation on non-sandbox study!")
def _status_setter_checks(self, conn_handler):
r"""Perform a check to make sure not setting status away from public
"""
if self.check_status(("public", )):
raise QiitaDBStatusError("Illegal operation on public study!")
@classmethod
def get_by_status(cls, status):
"""Returns study id for all Studies with given status
Parameters
----------
status : str
Status setting to search for
Returns
-------
list of Study objects
All studies in the database that match the given status
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT study_id FROM qiita.{0} s JOIN qiita.{0}_status ss ON "
"s.study_status_id = ss.study_status_id WHERE "
"ss.status = %s".format(cls._table))
return [x[0] for x in conn_handler.execute_fetchall(sql, (status, ))]
@classmethod
def exists(cls, study_title):
"""Check if a study exists based on study_title, which is unique
Parameters
----------
study_title : str
The title of the study to search for in the database
Returns
-------
bool
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT exists(select study_id from qiita.{} WHERE "
"study_title = %s)").format(cls._table)
return conn_handler.execute_fetchone(sql, [study_title])[0]
@classmethod
def create(cls, owner, title, efo, info, investigation=None):
"""Creates a new study on the database
Parameters
----------
owner : User object
the study's owner
title : str
Title of the study
efo : list
Experimental Factor Ontology id(s) for the study
info : dict
the information attached to the study. All "*_id" keys must pass
the objects associated with them.
investigation : Investigation object, optional
If passed, the investigation to associate with. Defaults to None.
Raises
------
QiitaDBColumnError
Non-db columns in info dictionary
All required keys not passed
IncompetentQiitaDeveloperError
email, study_id, study_status_id, or study_title passed as a key
empty efo list passed
Notes
-----
All keys in info, except the efo, must be equal to columns in
qiita.study table in the database.
"""
# make sure not passing non-info columns in the info dict
if cls._non_info.intersection(info):
raise QiitaDBColumnError("non info keys passed: %s" %
cls._non_info.intersection(info))
# make sure efo info passed
if not efo:
raise IncompetentQiitaDeveloperError("Need EFO information!")
# add default values to info
insertdict = deepcopy(info)
insertdict['email'] = owner.id
insertdict['study_title'] = title
if "reprocess" not in insertdict:
insertdict['reprocess'] = False
# default to sandboxed status
insertdict['study_status_id'] = 4
# No nuns allowed
insertdict = {k: v for k, v in viewitems(insertdict) if v is not None}
conn_handler = SQLConnectionHandler()
# make sure dictionary only has keys for available columns in db
check_table_cols(conn_handler, insertdict, cls._table)
# make sure reqired columns in dictionary
check_required_columns(conn_handler, insertdict, cls._table)
# Insert study into database
sql = ("INSERT INTO qiita.{0} ({1}) VALUES ({2}) RETURNING "
"study_id".format(cls._table, ','.join(insertdict),
','.join(['%s'] * len(insertdict))))
# make sure data in same order as sql column names, and ids are used
data = []
for col in insertdict:
if isinstance(insertdict[col], QiitaObject):
data.append(insertdict[col].id)
else:
data.append(insertdict[col])
study_id = conn_handler.execute_fetchone(sql, data)[0]
# insert efo information into database
sql = ("INSERT INTO qiita.{0}_experimental_factor (study_id, "
"efo_id) VALUES (%s, %s)".format(cls._table))
conn_handler.executemany(sql, [(study_id, e) for e in efo])
# add study to investigation if necessary
if investigation:
sql = ("INSERT INTO qiita.investigation_study (investigation_id, "
"study_id) VALUES (%s, %s)")
conn_handler.execute(sql, (investigation.id, study_id))
return cls(study_id)
# --- Attributes ---
@property
def title(self):
"""Returns the title of the study
Returns
-------
str
Title of study
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT study_title FROM qiita.{0} WHERE "
"study_id = %s".format(self._table))
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@title.setter
def title(self, title):
"""Sets the title of the study
Parameters
----------
title : str
The new study title
"""
conn_handler = SQLConnectionHandler()
sql = ("UPDATE qiita.{0} SET study_title = %s WHERE "
"study_id = %s".format(self._table))
return conn_handler.execute(sql, (title, self._id))
@property
def info(self):
"""Dict with all information attached to the study
Returns
-------
dict
info of study keyed to column names
"""
conn_handler = SQLConnectionHandler()
sql = "SELECT * FROM qiita.{0} WHERE study_id = %s".format(self._table)
info = dict(conn_handler.execute_fetchone(sql, (self._id, )))
# remove non-info items from info
for item in self._non_info:
info.pop(item)
# This is an optional column, but should not be considered part of the
# info
info.pop('study_id')
return info
@info.setter
def info(self, info):
"""Updates the information attached to the study
Parameters
----------
info : dict
information to change/update for the study, keyed to column name
Raises
------
IncompetentQiitaDeveloperError
Empty dict passed
QiitaDBColumnError
Unknown column names passed
"""
if not info:
raise IncompetentQiitaDeveloperError("Need entries in info dict!")
if 'study_id' in info:
raise QiitaDBColumnError("Cannot set study_id!")
if self._non_info.intersection(info):
raise QiitaDBColumnError("non info keys passed: %s" %
self._non_info.intersection(info))
conn_handler = SQLConnectionHandler()
if 'timeseries_type_id' in info:
# We only lock if the timeseries type changes
self._lock_non_sandbox(conn_handler)
# make sure dictionary only has keys for available columns in db
check_table_cols(conn_handler, info, self._table)
sql_vals = []
data = []
# build query with data values in correct order for SQL statement
for key, val in viewitems(info):
sql_vals.append("{0} = %s".format(key))
if isinstance(val, QiitaObject):
data.append(val.id)
else:
data.append(val)
data.append(self._id)
sql = ("UPDATE qiita.{0} SET {1} WHERE "
"study_id = %s".format(self._table, ','.join(sql_vals)))
conn_handler.execute(sql, data)
@property
def efo(self):
conn_handler = SQLConnectionHandler()
sql = ("SELECT efo_id FROM qiita.{0}_experimental_factor WHERE "
"study_id = %s".format(self._table))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id, ))]
@efo.setter
def efo(self, efo_vals):
"""Sets the efo for the study
Parameters
----------
efo_vals : list
Id(s) for the new efo values
Raises
------
IncompetentQiitaDeveloperError
Empty efo list passed
"""
if not efo_vals:
raise IncompetentQiitaDeveloperError("Need EFO information!")
conn_handler = SQLConnectionHandler()
self._lock_non_sandbox(conn_handler)
# wipe out any EFOs currently attached to study
sql = ("DELETE FROM qiita.{0}_experimental_factor WHERE "
"study_id = %s".format(self._table))
conn_handler.execute(sql, (self._id, ))
# insert new EFO information into database
sql = ("INSERT INTO qiita.{0}_experimental_factor (study_id, "
"efo_id) VALUES (%s, %s)".format(self._table))
conn_handler.executemany(sql, [(self._id, efo) for efo in efo_vals])
@property
def shared_with(self):
"""list of users the study is shared with
Returns
-------
list of User ids
Users the study is shared with
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT email FROM qiita.{0}_users WHERE "
"study_id = %s".format(self._table))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id,))]
@property
def pmids(self):
""" Returns list of paper PMIDs from this study
Returns
-------
list of str
list of all the PMIDs
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT pmid FROM qiita.{0}_pmid WHERE "
"study_id = %s".format(self._table))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id, ))]
@pmids.setter
def pmids(self, values):
"""Sets the pmids for the study
Parameters
----------
values : list of str
The list of pmids to associate with the study
Raises
------
TypeError
If values is not a list
"""
# Check that a list is actually passed
if not isinstance(values, list):
raise TypeError('pmids should be a list')
# Get the connection to the database
conn_handler = SQLConnectionHandler()
# Create a queue for the operations that we need to do
queue = "%d_pmid_setter" % self._id
conn_handler.create_queue(queue)
# Delete the previous pmids associated with the study
sql = "DELETE FROM qiita.study_pmid WHERE study_id=%s"
sql_args = (self._id,)
conn_handler.add_to_queue(queue, sql, sql_args)
# Set the new ones
sql = "INSERT INTO qiita.study_pmid (study_id, pmid) VALUES (%s, %s)"
sql_args = [(self._id, val) for val in values]
conn_handler.add_to_queue(queue, sql, sql_args, many=True)
# Execute the queue
conn_handler.execute_queue(queue)
@property
def investigation(self):
""" Returns Investigation this study is part of
Returns
-------
Investigation id
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT investigation_id FROM qiita.investigation_study WHERE "
"study_id = %s")
inv = conn_handler.execute_fetchone(sql, (self._id, ))
return inv[0] if inv is not None else inv
@property
def sample_template(self):
""" Returns sample_template information id
Returns
-------
SampleTemplate id
"""
return self._id
@property
def data_types(self):
"""Returns list of the data types for this study
Returns
-------
list of str
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT DISTINCT DT.data_type FROM qiita.study_raw_data SRD "
"JOIN qiita.prep_template PT ON SRD.raw_data_id = "
"PT.raw_data_id JOIN qiita.data_type DT ON PT.data_type_id = "
"DT.data_type_id WHERE SRD.study_id = %s")
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id,))]
@property
def owner(self):
"""Gets the owner of the study
Returns
-------
str
The email (id) of the user that owns this study
"""
conn_handler = SQLConnectionHandler()
sql = """select email from qiita.{} where study_id = %s""".format(
self._table)
return conn_handler.execute_fetchone(sql, [self._id])[0]
@property
def environmental_packages(self):
"""Gets the environmental packages associated with the study
Returns
-------
list of str
The environmental package names associated with the study
"""
conn_handler = SQLConnectionHandler()
env_pkgs = conn_handler.execute_fetchall(
"SELECT environmental_package_name FROM "
"qiita.study_environmental_package WHERE study_id = %s",
(self._id,))
return [pkg[0] for pkg in env_pkgs]
@environmental_packages.setter
def environmental_packages(self, values):
"""Sets the environmental packages for the study
Parameters
----------
values : list of str
The list of environmental package names to associate with the study
Raises
------
TypeError
If values is not a list
ValueError
If any environmental packages listed on values is not recognized
"""
# Get the connection to the database
conn_handler = SQLConnectionHandler()
# The environmental packages can be changed only if the study is
# sandboxed
self._lock_non_sandbox(conn_handler)
# Check that a list is actually passed
if not isinstance(values, list):
raise TypeError('Environmental packages should be a list')
# Get all the environmental packages
env_pkgs = [pkg[0] for pkg in get_environmental_packages(
conn_handler=conn_handler)]
# Check that all the passed values are valid environmental packages
missing = set(values).difference(env_pkgs)
if missing:
raise ValueError('Environmetal package(s) not recognized: %s'
% ', '.join(missing))
# Create a queue for the operations that we need to do
queue = "%d_env_pkgs_setter" % self._id
conn_handler.create_queue(queue)
# Delete the previous environmental packages associated with the study
sql = "DELETE FROM qiita.study_environmental_package WHERE study_id=%s"
sql_args = (self._id,)
conn_handler.add_to_queue(queue, sql, sql_args)
# Set the new ones
sql = ("INSERT INTO qiita.study_environmental_package "
"(study_id, environmental_package_name) VALUES (%s, %s)")
sql_args = [(self._id, val) for val in values]
conn_handler.add_to_queue(queue, sql, sql_args, many=True)
# Execute the queue
conn_handler.execute_queue(queue)
# --- methods ---
def raw_data(self, data_type=None):
""" Returns list of data ids for raw data info
Parameters
----------
data_type : str, optional
If given, retrieve only raw_data for given datatype. Default None.
Returns
-------
list of RawData ids
"""
spec_data = ""
if data_type:
spec_data = " AND data_type_id = %d" % convert_to_id(data_type,
"data_type")
conn_handler = SQLConnectionHandler()
sql = ("SELECT raw_data_id FROM qiita.study_raw_data WHERE "
"study_id = %s{0}".format(spec_data))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id,))]
def add_raw_data(self, raw_data):
""" Adds raw_data to the current study
Parameters
----------
raw_data : list of RawData
The RawData objects to be added to the study
Raises
------
QiitaDBError
If the raw_data is already linked to the current study
"""
conn_handler = SQLConnectionHandler()
self._lock_non_sandbox(conn_handler)
queue = "%d_add_raw_data" % self.id
sql = ("SELECT EXISTS(SELECT * FROM qiita.study_raw_data WHERE "
"study_id=%s AND raw_data_id=%s)")
conn_handler.create_queue(queue)
sql_args = [(self.id, rd.id) for rd in raw_data]
conn_handler.add_to_queue(queue, sql, sql_args, many=True)
linked = conn_handler.execute_queue(queue)
if any(linked):
raise QiitaDBError("Some of the passed raw datas have been already"
" linked to the study %s" % self.id)
conn_handler.executemany(
"INSERT INTO qiita.study_raw_data (study_id, raw_data_id) "
"VALUES (%s, %s)", sql_args)
def preprocessed_data(self, data_type=None):
""" Returns list of data ids for preprocessed data info
Parameters
----------
data_type : str, optional
If given, retrieve only raw_data for given datatype. Default None.
Returns
-------
list of PreprocessedData ids
"""
spec_data = ""
if data_type:
spec_data = " AND data_type_id = %d" % convert_to_id(data_type,
"data_type")
conn_handler = SQLConnectionHandler()
sql = ("SELECT preprocessed_data_id FROM qiita.study_preprocessed_data"
" WHERE study_id = %s{0}".format(spec_data))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id,))]
def processed_data(self, data_type=None):
""" Returns list of data ids for processed data info
Parameters
----------
data_type : str, optional
If given, retrieve only for given datatype. Default None.
Returns
-------
list of ProcessedData ids
"""
spec_data = ""
if data_type:
spec_data = " AND p.data_type_id = %d" % convert_to_id(data_type,
"data_type")
conn_handler = SQLConnectionHandler()
sql = ("SELECT p.processed_data_id FROM qiita.processed_data p JOIN "
"qiita.study_processed_data sp ON p.processed_data_id = "
"sp.processed_data_id WHERE "
"sp.study_id = %s{0}".format(spec_data))
return [x[0] for x in conn_handler.execute_fetchall(sql, (self._id,))]
def add_pmid(self, pmid):
"""Adds PMID to study
Parameters
----------
pmid : str
pmid to associate with study
"""
conn_handler = SQLConnectionHandler()
sql = ("INSERT INTO qiita.{0}_pmid (study_id, pmid) "
"VALUES (%s, %s)".format(self._table))
conn_handler.execute(sql, (self._id, pmid))
def has_access(self, user, no_public=False):
"""Returns whether the given user has access to the study
Parameters
----------
user : User object
User we are checking access for
no_public: bool
If we should ignore those studies shared with the user. Defaults
to False
Returns
-------
bool
Whether user has access to study or not
"""
# if admin or superuser, just return true
if user.level in {'superuser', 'admin'}:
return True
if no_public:
return self._id in user.user_studies + user.shared_studies
else:
return self._id in user.user_studies + user.shared_studies \
+ self.get_by_status('public')
def share(self, user):
"""Share the study with another user
Parameters
----------
user: User object
The user to share the study with
"""
conn_handler = SQLConnectionHandler()
# Make sure the study is not already shared with the given user
if user.id in self.shared_with:
return
# Do not allow the study to be shared with the owner
if user.id == self.owner:
return
sql = ("INSERT INTO qiita.study_users (study_id, email) VALUES "
"(%s, %s)")
conn_handler.execute(sql, (self._id, user.id))
def unshare(self, user):
"""Unshare the study with another user
Parameters
----------
user: User object
The user to unshare the study with
"""
conn_handler = SQLConnectionHandler()
sql = ("DELETE FROM qiita.study_users WHERE study_id = %s AND "
"email = %s")
conn_handler.execute(sql, (self._id, user.id))
class StudyPerson(QiitaObject):
r"""Object handling information pertaining to people involved in a study
Attributes
----------
name : str
name of the person
email : str
email of the person
affiliation : str
institution with which the person is affiliated
address : str or None
address of the person
phone : str or None
phone number of the person
"""
_table = "study_person"
@classmethod
def iter(cls):
"""Iterate over all study people in the database
Returns
-------
generator
Yields a `StudyPerson` object for each person in the database,
in order of ascending study_person_id
"""
conn = SQLConnectionHandler()
sql = "select study_person_id from qiita.{} order by study_person_id"
results = conn.execute_fetchall(sql.format(cls._table))
for result in results:
ID = result[0]
yield StudyPerson(ID)
@classmethod
def exists(cls, name, affiliation):
"""Checks if a person exists
Parameters
----------
name: str
Name of the person
affiliation : str
institution with which the person is affiliated
Returns
-------
bool
True if person exists else false
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT exists(SELECT * FROM qiita.{0} WHERE "
"name = %s AND affiliation = %s)".format(cls._table))
return conn_handler.execute_fetchone(sql, (name, affiliation))[0]
@classmethod
def create(cls, name, email, affiliation, address=None, phone=None):
"""Create a StudyPerson object, checking if person already exists.
Parameters
----------
name : str
name of person
email : str
email of person
affiliation : str
institution with which the person is affiliated
address : str, optional
address of person
phone : str, optional
phone number of person
Returns
-------
New StudyPerson object
"""
if cls.exists(name, affiliation):
sql = ("SELECT study_person_id from qiita.{0} WHERE name = %s and"
" affiliation = %s".format(cls._table))
conn_handler = SQLConnectionHandler()
spid = conn_handler.execute_fetchone(sql, (name, affiliation))
# Doesn't exist so insert new person
else:
sql = ("INSERT INTO qiita.{0} (name, email, affiliation, address, "
"phone) VALUES"
" (%s, %s, %s, %s, %s) RETURNING "
"study_person_id".format(cls._table))
conn_handler = SQLConnectionHandler()
spid = conn_handler.execute_fetchone(sql, (name, email,
affiliation, address,
phone))
return cls(spid[0])
# Properties
@property
def name(self):
"""Returns the name of the person
Returns
-------
str
Name of person
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT name FROM qiita.{0} WHERE "
"study_person_id = %s".format(self._table))
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@property
def email(self):
"""Returns the email of the person
Returns
-------
str
Email of person
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT email FROM qiita.{0} WHERE "
"study_person_id = %s".format(self._table))
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@property
def affiliation(self):
"""Returns the affiliation of the person
Returns
-------
str
Affiliation of person
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT affiliation FROM qiita.{0} WHERE "
"study_person_id = %s".format(self._table))
return conn_handler.execute_fetchone(sql, [self._id])[0]
@property
def address(self):
"""Returns the address of the person
Returns
-------
str or None
address or None if no address in database
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT address FROM qiita.{0} WHERE study_person_id ="
" %s".format(self._table))
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@address.setter
def address(self, value):
"""Set/update the address of the person
Parameters
----------
value : str
New address for person
"""
conn_handler = SQLConnectionHandler()
sql = ("UPDATE qiita.{0} SET address = %s WHERE "
"study_person_id = %s".format(self._table))
conn_handler.execute(sql, (value, self._id))
@property
def phone(self):
"""Returns the phone number of the person
Returns
-------
str or None
phone or None if no address in database
"""
conn_handler = SQLConnectionHandler()
sql = ("SELECT phone FROM qiita.{0} WHERE "
"study_person_id = %s".format(self._table))
return conn_handler.execute_fetchone(sql, (self._id, ))[0]
@phone.setter
def phone(self, value):
"""Set/update the phone number of the person
Parameters
----------
value : str
New phone number for person
"""
conn_handler = SQLConnectionHandler()
sql = ("UPDATE qiita.{0} SET phone = %s WHERE "
"study_person_id = %s".format(self._table))
conn_handler.execute(sql, (value, self._id))
|
|
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# TeamCity fails to run this test because it can't import the C++ module.
# I think it's because the C++ part gets built in another directory.
import unittest
import random
import numpy
from annoy import AnnoyIndex
try:
xrange
except NameError:
# Python 3 compat
xrange = range
class TestCase(unittest.TestCase):
def assertAlmostEquals(self, x, y):
# Annoy uses float precision, so we override the default precision
super(TestCase, self).assertAlmostEquals(x, y, 4)
class AngularIndexTest(TestCase):
def test_get_nns_by_vector(self):
f = 3
i = AnnoyIndex(f)
i.add_item(0, [0, 0, 1])
i.add_item(1, [0, 1, 0])
i.add_item(2, [1, 0, 0])
i.build(10)
self.assertEqual(i.get_nns_by_vector([3, 2, 1], 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_vector([1, 2, 3], 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_vector([2, 0, 1], 3), [2, 0, 1])
def test_get_nns_by_item(self):
f = 3
i = AnnoyIndex(f)
i.add_item(0, [2, 1, 0])
i.add_item(1, [1, 2, 0])
i.add_item(2, [0, 0, 1])
i.build(10)
self.assertEqual(i.get_nns_by_item(0, 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_item(1, 3), [1, 0, 2])
self.assertTrue(i.get_nns_by_item(2, 3) in [[2, 0, 1], [2, 1, 0]]) # could be either
def test_dist(self):
f = 2
i = AnnoyIndex(f)
i.add_item(0, [0, 1])
i.add_item(1, [1, 1])
self.assertAlmostEqual(i.get_distance(0, 1), 2 * (1.0 - 2 ** -0.5))
def test_dist_2(self):
f = 2
i = AnnoyIndex(f)
i.add_item(0, [1000, 0])
i.add_item(1, [10, 0])
self.assertAlmostEqual(i.get_distance(0, 1), 0)
def test_dist_3(self):
f = 2
i = AnnoyIndex(f)
i.add_item(0, [97, 0])
i.add_item(1, [42, 42])
dist = (1 - 2 ** -0.5) ** 2 + (2 ** -0.5) ** 2
self.assertAlmostEqual(i.get_distance(0, 1), dist)
def test_dist_degen(self):
f = 2
i = AnnoyIndex(f)
i.add_item(0, [1, 0])
i.add_item(1, [0, 0])
self.assertAlmostEqual(i.get_distance(0, 1), 2.0)
def test_large_index(self):
# Generate pairs of random points where the pair is super close
f = 10
i = AnnoyIndex(f)
for j in xrange(0, 10000, 2):
p = [random.gauss(0, 1) for z in xrange(f)]
f1 = random.random() + 1
f2 = random.random() + 1
x = [f1 * pi + random.gauss(0, 1e-2) for pi in p]
y = [f2 * pi + random.gauss(0, 1e-2) for pi in p]
i.add_item(j, x)
i.add_item(j+1, y)
i.build(10)
for j in xrange(0, 10000, 2):
self.assertEqual(i.get_nns_by_item(j, 2), [j, j+1])
self.assertEqual(i.get_nns_by_item(j+1, 2), [j+1, j])
def precision(self, n, n_trees=10, n_points=10000, n_rounds=10):
found = 0
for r in xrange(n_rounds):
# create random points at distance x from (1000, 0, 0, ...)
f = 10
i = AnnoyIndex(f, 'euclidean')
for j in xrange(n_points):
p = [random.gauss(0, 1) for z in xrange(f - 1)]
norm = sum([pi ** 2 for pi in p]) ** 0.5
x = [1000] + [pi / norm * j for pi in p]
i.add_item(j, x)
i.build(n_trees)
nns = i.get_nns_by_vector([1000] + [0] * (f-1), n)
self.assertEqual(nns, sorted(nns)) # should be in order
# The number of gaps should be equal to the last item minus n-1
found += len([x for x in nns if x < n])
return 1.0 * found / (n * n_rounds)
def test_precision_1(self):
self.assertTrue(self.precision(1) >= 0.98)
def test_precision_10(self):
self.assertTrue(self.precision(10) >= 0.98)
def test_precision_100(self):
self.assertTrue(self.precision(100) >= 0.98)
def test_precision_1000(self):
self.assertTrue(self.precision(1000) >= 0.98)
def test_load_save_get_item_vector(self):
f = 3
i = AnnoyIndex(f)
i.add_item(0, [1.1, 2.2, 3.3])
i.add_item(1, [4.4, 5.5, 6.6])
i.add_item(2, [7.7, 8.8, 9.9])
numpy.testing.assert_array_almost_equal(i.get_item_vector(0), [1.1, 2.2, 3.3])
self.assertTrue(i.build(10))
self.assertTrue(i.save('blah.ann'))
numpy.testing.assert_array_almost_equal(i.get_item_vector(1), [4.4, 5.5, 6.6])
j = AnnoyIndex(f)
self.assertTrue(j.load('blah.ann'))
numpy.testing.assert_array_almost_equal(j.get_item_vector(2), [7.7, 8.8, 9.9])
def test_get_nns_search_k(self):
f = 3
i = AnnoyIndex(f)
i.add_item(0, [0, 0, 1])
i.add_item(1, [0, 1, 0])
i.add_item(2, [1, 0, 0])
i.build(10)
self.assertEqual(i.get_nns_by_item(0, 3, 10), [0, 1, 2])
self.assertEqual(i.get_nns_by_vector([3, 2, 1], 3, 10), [2, 1, 0])
class EuclideanIndexTest(TestCase):
def test_get_nns_by_vector(self):
f = 2
i = AnnoyIndex(f, 'euclidean')
i.add_item(0, [2, 2])
i.add_item(1, [3, 2])
i.add_item(2, [3, 3])
i.build(10)
self.assertEqual(i.get_nns_by_vector([4, 4], 3), [2, 1, 0])
self.assertEqual(i.get_nns_by_vector([1, 1], 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_vector([4, 2], 3), [1, 2, 0])
def test_get_nns_by_item(self):
f = 2
i = AnnoyIndex(f, 'euclidean')
i.add_item(0, [2, 2])
i.add_item(1, [3, 2])
i.add_item(2, [3, 3])
i.build(10)
self.assertEqual(i.get_nns_by_item(0, 3), [0, 1, 2])
self.assertEqual(i.get_nns_by_item(2, 3), [2, 1, 0])
def test_dist(self):
f = 2
i = AnnoyIndex(f, 'euclidean')
i.add_item(0, [0, 1])
i.add_item(1, [1, 1])
self.assertAlmostEqual(i.get_distance(0, 1), 1.0)
def test_large_index(self):
# Generate pairs of random points where the pair is super close
f = 10
q = [random.gauss(0, 10) for z in xrange(f)]
i = AnnoyIndex(f, 'euclidean')
for j in xrange(0, 10000, 2):
p = [random.gauss(0, 1) for z in xrange(f)]
x = [1 + pi + random.gauss(0, 1e-2) for pi in p] # todo: should be q[i]
y = [1 + pi + random.gauss(0, 1e-2) for pi in p]
i.add_item(j, x)
i.add_item(j+1, y)
i.build(10)
for j in xrange(0, 10000, 2):
self.assertEqual(i.get_nns_by_item(j, 2), [j, j+1])
self.assertEqual(i.get_nns_by_item(j+1, 2), [j+1, j])
def precision(self, n, n_trees=10, n_points=10000, n_rounds=10):
found = 0
for r in xrange(n_rounds):
# create random points at distance x
f = 10
i = AnnoyIndex(f, 'euclidean')
for j in xrange(n_points):
p = [random.gauss(0, 1) for z in xrange(f)]
norm = sum([pi ** 2 for pi in p]) ** 0.5
x = [pi / norm * j for pi in p]
i.add_item(j, x)
i.build(n_trees)
nns = i.get_nns_by_vector([0] * f, n)
self.assertEqual(nns, sorted(nns)) # should be in order
# The number of gaps should be equal to the last item minus n-1
found += len([x for x in nns if x < n])
return 1.0 * found / (n * n_rounds)
def test_precision_1(self):
self.assertTrue(self.precision(1) >= 0.98)
def test_precision_10(self):
self.assertTrue(self.precision(10) >= 0.98)
def test_precision_100(self):
self.assertTrue(self.precision(100) >= 0.98)
def test_precision_1000(self):
self.assertTrue(self.precision(1000) >= 0.98)
def test_get_nns_with_distances(self):
f = 3
i = AnnoyIndex(f, 'euclidean')
i.add_item(0, [0, 0, 2])
i.add_item(1, [0, 1, 1])
i.add_item(2, [1, 0, 0])
i.build(10)
l, d = i.get_nns_by_item(0, 3, -1, True)
self.assertEquals(l, [0, 1, 2])
self.assertAlmostEquals(d[0]**2, 0.0)
self.assertAlmostEquals(d[1]**2, 2.0)
self.assertAlmostEquals(d[2]**2, 5.0)
l, d = i.get_nns_by_vector([2, 2, 2], 3, -1, True)
self.assertEquals(l, [1, 0, 2])
self.assertAlmostEquals(d[0]**2, 6.0)
self.assertAlmostEquals(d[1]**2, 8.0)
self.assertAlmostEquals(d[2]**2, 9.0)
class IndexTest(TestCase):
def test_not_found_tree(self):
i = AnnoyIndex(10)
self.assertRaises(IOError, i.load, 'nonexists.tree')
def test_binary_compatibility(self):
i = AnnoyIndex(10)
i.load('test/test.tree')
# This might change in the future if we change the search algorithm, but in that case let's update the test
self.assertEquals(i.get_nns_by_item(0, 10), [0, 85, 42, 11, 54, 38, 53, 66, 19, 31])
class TypesTest(TestCase):
def test_numpy(self, n_points=1000, n_trees=10):
f = 10
i = AnnoyIndex(f, 'euclidean')
for j in xrange(n_points):
a = numpy.random.normal(size=f)
a = a.astype(random.choice([numpy.float64, numpy.float32, numpy.uint8, numpy.int16]))
i.add_item(j, a)
i.build(n_trees)
def test_tuple(self, n_points=1000, n_trees=10):
f = 10
i = AnnoyIndex(f, 'euclidean')
for j in xrange(n_points):
i.add_item(j, (random.gauss(0, 1) for x in xrange(f)))
i.build(n_trees)
def test_wrong_length(self, n_points=1000, n_trees=10):
f = 10
i = AnnoyIndex(f, 'euclidean')
i.add_item(0, [random.gauss(0, 1) for x in xrange(f)])
self.assertRaises(IndexError, i.add_item, 1, [random.gauss(0, 1) for x in xrange(f+1000)])
self.assertRaises(IndexError, i.add_item, 2, [])
i.build(n_trees)
|
|
from threading import Thread
from ..decorators import vendor_required
from app import os, create_app
import app
import boto
from config import Config
from flask import (
render_template,
abort,
redirect,
flash,
url_for,
send_from_directory,
jsonify,
request
)
import json
from flask.ext.login import login_required, current_user
from forms import (ChangeListingInformation, NewItemForm, NewCSVForm, EditProfileForm)
from . import vendor
from ..models import Listing, Order, Status, User
from ..models.listing import Updated
from ..models.user import Vendor
import csv
import re
import copy
from .. import db
from ..email import send_email
from flask.ext.rq import get_queue
from werkzeug.utils import secure_filename
from uuid import uuid4
from pint import UnitRegistry, UndefinedUnitError
@vendor.route('/')
@login_required
@vendor_required
def index():
tut_completed = User.query.filter_by(id=current_user.id).first().tutorial_completed
return render_template('vendor/index.html', tut_completed=tut_completed)
@vendor.route('/tutorial_completed', methods=['POST'])
@login_required
@vendor_required
def tutorial_completed():
current_tutorial_status = User.query.filter_by(id=current_user.id).first().tutorial_completed;
User.query.filter_by(id=current_user.id).first().tutorial_completed = \
not User.query.filter_by(id=current_user.id).first().tutorial_completed;
db.session.commit();
return '', 204
@vendor.route('/new-item', methods=['GET', 'POST'])
@login_required
@vendor_required
def new_listing():
tut_completed = User.query.filter_by(id=current_user.id).first().tutorial_completed
"""Create a new item."""
form = NewItemForm()
if form.validate_on_submit():
listing = Listing(
name=form.listing_name.data,
description=form.listing_description.data,
available=True,
unit= form.listing_unit.data,
quantity= form.listing_quantity.data,
price=form.listing_price.data,
vendor_id=current_user.id,
product_id=form.listing_productID.data
)
db.session.add(listing)
db.session.commit()
flash('Item {} successfully created'.format(listing.name),
'form-success')
return redirect(url_for('.new_listing', tut_completed=tut_completed))
return render_template('vendor/new_listing.html', form=form, tut_completed=tut_completed)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.000001):
b = float(b)
print "ABS", abs(a-b)
print "MAX", max(rel_tol * max(abs(a), abs(b)), abs_tol)
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
@vendor.route('/csv-row-upload', methods=['POST'])
@login_required
@vendor_required
def row_upload():
data = json.loads(request.form['json'])
print data
if data['action'] == 'replace':
listings_delete = db.session.query(Listing).filter_by(vendor_id = current_user.id)
if listings_delete.first():
listings_delete.first().delete_listing()
return jsonify({"status": "Prep", "message": "Prepared current items for replacement. {} left".format(listings_delete.count())})
if data['action'] == 'add':
row = data['row']
name = row['name']
description = row['description']
unit = row['unit']
quantity = row['quantity']
price = row['price']
print name, description, unit, quantity, price
print re.findall("\d+\.\d+", price)
formatted_raw_price = re.findall("\d+\.*\d*", price)
product_id = row['productId']
if not (is_number(formatted_raw_price)):
message_string = ("Skipping {} (Product Id: {}), due to fact that price was unable to be interpreted as a number. Found {}".
format(name, product_id, price))
return jsonify({"status": "Failure", "message": message_string})
formatted_price = re.findall("\d+\.*\d*", price)[0]
queried_listing = Listing.query.filter_by(product_id=product_id, vendor_id = current_user.id).first()
if queried_listing:
changed = False
if queried_listing.name != name:
print "name"
changed = True
queried_listing.name = name
if queried_listing.description != description:
print "desc"
changed = True
queried_listing.description = description
if queried_listing.unit != unit:
print "unit"
changed = True
queried_listing.unit = unit
if queried_listing.quantity != quantity:
print "quantity"
changed = True
queried_listing.quantity = quantity
if (isclose(queried_listing.price,formatted_price)) == False:
changed = True
queried_listing.price = formatted_price
if changed is True:
queried_listing.available = True
db.session.commit()
return jsonify({"status": "Success", "message": "Successfully merged {} (Product Id: {}) with price ${}".format(name, product_id, formatted_price)})
else:
return jsonify({"status": "Prep", "message": "No change {} (Product Id: {})".format(name, product_id)})
else:
Listing.add_listing(Listing(product_id, current_user.id, unit, name, True, formatted_price, description, Updated.NEW_ITEM, quantity))
return jsonify({"status": "Success", "message": "Successfully added {} (Product Id: {}) with price ${}".format(name, product_id, formatted_price)})
@vendor.route('/csv-upload', methods=['GET', 'POST'])
@login_required
@vendor_required
def csv_upload():
tut_completed = User.query.filter_by(id=current_user.id).first().tutorial_completed
"""Create a new item."""
form = NewCSVForm()
listings = []
count = db.session.query(Listing).filter_by(vendor_id = current_user.id).count()
current_row = 0
if form.validate_on_submit():
if test_csv(form):
csv_field = form.file_upload
buff = csv_field.data.stream
buff.seek(0)
csv_data = csv.DictReader(buff, delimiter=',')
#for each row in csv, create a listing
current_vendor = Vendor.get_vendor_by_user_id(user_id=current_user.id)
if form.replace_or_merge.data == 'replace':
listings_delete = db.session.query(Listing).filter_by(vendor_id=current_user.id).all()
for listing in listings_delete:
listing.available = False
for row in csv_data:
#cheap way to skip weird 'categorical' lines
if (row[current_vendor.product_id_col]).strip().isdigit() and form.replace_or_merge.data == 'merge':
safe_price = row[current_vendor.price_col]
description = row[current_vendor.listing_description_col]
name = row[current_vendor.name_col]
unit = row[current_vendor.unit_col]
quantity = row[current_vendor.quantity_col]
proposed_listing = Listing.add_csv_row_as_listing(csv_row=row, price=safe_price)
queried_listing = Listing.get_listing_by_product_id(product_id=row[current_vendor.product_id_col])
if queried_listing:
# case: listing exists and price has not changed
queried_listing.available = True
if (
queried_listing.price == float(safe_price)
and queried_listing.description == description
and queried_listing.name == name
and queried_listing.unit == unit
and queried_listing.quantity == quantity):
proposed_listing.updated = Updated.NO_CHANGE
listings.append(proposed_listing)
# case: listing exists and price has changed
else:
queried_listing.price = float(safe_price)
proposed_listing.price = float(safe_price)
proposed_listing.description = description
proposed_listing.name = name
proposed_listing.unit = unit
proposed_listing.quantity = quantity
proposed_listing.updated = Updated.PRICE_CHANGE
listings.append(proposed_listing)
db.session.commit()
#case: listing does not yet exist
else:
proposed_listing.updated = Updated.NEW_ITEM
listings.append(proposed_listing)
Listing.add_listing(new_listing=proposed_listing)
elif (row[current_vendor.product_id_col]).strip().isdigit() and form.replace_or_merge.data == 'replace':
safe_price = row[current_vendor.price_col]
proposed_listing = Listing.add_csv_row_as_listing(csv_row=row, price=safe_price)
proposed_listing.updated = Updated.NEW_ITEM
listings.append(proposed_listing)
Listing.add_listing(new_listing=proposed_listing)
return render_template('vendor/new_csv.html', tut_completed=tut_completed, form=form, listings=listings, count=count)
#get rid of those pesky dollar signs that mess up parsing
def stripPriceHelper(price):
r = re.compile("\$(\d+.\d+)")
return r.search(price.replace(',','')).group(1)
def is_number(s):
if len(s) == 0:
return False
try:
complex(s[0]) # for int, long, float and complex
except ValueError:
return False
return True
def is_numeric_col(current_vendor, row, col, row_count):
if not is_number(row[col]) and row[col]:
flash("Error parsing {}'s CSV file. Bad entry in {} column, at row {}. Must be number (no letters/characters). Found <b>{}</b>"
.format(current_vendor.full_name(),col, row_count, row[col]),
'form-error')
return False
return True
def is_proper_unit(vendor_name, unit, row, row_count):
return True
@vendor_required
def test_csv(form):
current_vendor = Vendor.get_vendor_by_user_id(user_id=current_user.id)
if current_vendor is None:
abort(404)
columns = [current_vendor.product_id_col,current_vendor.listing_description_col, current_vendor.unit_col,
current_vendor.price_col, current_vendor.name_col, current_vendor.quantity_col]
csv_file = form.file_upload
print csv_file.data.filename
if '.csv' not in csv_file.data.filename:
flash("Must be a .csv file", 'form-error')
return False
buff = csv_file.data.stream
csv_data = csv.DictReader(buff, delimiter=',')
c = current_vendor.product_id_col
row_count = 0
for row in csv_data:
if len(row.keys()) > 1:
row_count += 1
for c in columns:
if c not in row:
flash("Error parsing {}'s CSV file. Couldn't find {} column at row {}"
.format(current_vendor.full_name(),c, row_count),
'form-error')
return False
if row[current_vendor.product_id_col]=="" and row[current_vendor.listing_description_col]=="":
flash("Successfully parsed {}'s CSV file!"
.format(current_vendor.full_name()), 'form-success')
return True
if not(
is_numeric_col(current_vendor=current_vendor, row=row,
col=current_vendor.price_col, row_count=row_count) and
is_numeric_col(current_vendor=current_vendor, row=row,
col=current_vendor.quantity_col,row_count=row_count) and
is_numeric_col(current_vendor=current_vendor, row=row,
col=current_vendor.product_id_col,row_count=row_count)):
return False
if not is_proper_unit(current_vendor.full_name(), current_vendor.unit_col,row, row_count):
return False
return True
@vendor.route('/itemslist/')
@vendor.route('/itemslist/<int:page>')
@login_required
@vendor_required
def current_listings(page=1):
"""View all current listings."""
tut_completed = User.query.filter_by(id=current_user.id).first().tutorial_completed
main_search_term = request.args.get('main-search', "", type=str)
sort_by = request.args.get('sort-by', "", type=str)
avail = request.args.get('avail', "", type=str)
search = request.args.get('search', "", type=str)
listings_raw = Listing.search(
sort_by=sort_by,
main_search_term=main_search_term,
avail=avail
)
listings_raw = listings_raw.filter(Listing.vendor_id == current_user.id)
if search != "False":
page = 1
listings_paginated = listings_raw.paginate(page, 21, False)
result_count = listings_raw.count()
if result_count > 0:
header = "Search Results: {} in total".format(result_count)
else:
header = "No Search Results"
return render_template(
'vendor/current_listings.html',
listings=listings_paginated,
main_search_term=main_search_term,
sort_by=sort_by,
count=result_count,
header=header,
tut_completed=tut_completed
)
@vendor.route('/items/<int:listing_id>')
@vendor.route('/items/<int:listing_id>/info')
@login_required
@vendor_required
def listing_info(listing_id):
"""View a listing's info."""
listing = Listing.query.filter_by(id=listing_id).first()
if listing is None:
abort(404)
elif listing.vendor_id != current_user.id:
abort(403)
return render_template('vendor/manage_listing.html', listing=listing)
@vendor.route('/items/<int:listing_id>/edit-item', methods=['GET', 'POST'])
@login_required
@vendor_required
def change_listing_info(listing_id):
"""Change a listings's info."""
listing = Listing.query.filter_by(
id=listing_id,
vendor_id=current_user.id
).first()
if listing is None:
abort(404)
form = ChangeListingInformation()
form.listing_id = listing_id
if form.validate_on_submit():
listing.name = form.listing_name.data
listing.description = form.listing_description.data
listing.unit = form.listing_unit.data
listing.quantity = form.listing_quantity.data
if form.listing_available.data:
listing.available = True
else:
listing.disable_listing()
listing.price = form.listing_price.data
listing.vendor_id = current_user.id
flash('Information for item {} successfully changed.'
.format(listing.name), 'form-success')
form.listing_name.default = listing.name
form.listing_description.default = listing.description
form.listing_price.default = listing.price
form.listing_unit.default = listing.unit
form.listing_quantity.default = listing.quantity
form.listing_available.default = listing.available
form.process()
return render_template(
'vendor/manage_listing.html',
listing=listing,
form=form
)
@vendor.route('/item/<int:listing_id>/delete')
@login_required
@vendor_required
def delete_listing_request(listing_id):
"""Request deletion of an item"""
listing = Listing.query.filter_by(
id=listing_id,
vendor_id=current_user.id
).first()
if listing is None:
abort(404)
return render_template('vendor/manage_listing.html', listing=listing)
@vendor.route('/item/<int:listing_id>/_delete')
@login_required
@vendor_required
def delete_listing(listing_id):
"""Delete an item."""
listing = Listing.query.filter_by(
id=listing_id,
vendor_id=current_user.id
).first()
listing.delete_listing()
flash('Successfully deleted item %s.' % listing.name, 'success')
return redirect(url_for('vendor.current_listings'))
@vendor.route('/orders')
@login_required
@vendor_required
def view_orders():
orders = (Order.query.filter_by(vendor_id=current_user.id)
.order_by(Order.id.desc()))
status_filter = request.args.get('status')
if status_filter == 'approved':
orders = orders.filter_by(status=Status.APPROVED)
elif status_filter == 'declined':
orders = orders.filter_by(status=Status.DECLINED)
elif status_filter == 'pending':
orders = orders.filter_by(status=Status.PENDING)
else:
status_filter = None
return render_template(
'vendor/orders.html',
orders=orders.all(),
status_filter=status_filter
)
@vendor.route('/approve/<int:order_id>', methods=['POST'])
@login_required
@vendor_required
def approve_order(order_id):
order = Order.query.filter_by(id = order_id).first()
if not order or order.vendor_id != current_user.id:
abort(404)
if order.status != Status.PENDING:
abort(400)
order.status = Status.APPROVED
order.comment = request.json['comment']
db.session.commit()
merchant_id = order.merchant_id
merchant = User.query.filter_by(id=merchant_id).first()
vendor_name = order.company_name
purchases = order.purchases
comment = order.comment
send_email(merchant.email,
'Vendor order request approved',
'vendor/email/approved_order',
vendor_name=vendor_name,
order=order,
purchases=purchases,
comment=comment)
return jsonify({'order_id': order_id, 'status': 'approved', 'comment': comment})
@vendor.route('/decline/<int:order_id>', methods=['POST'])
@login_required
@vendor_required
def decline_order(order_id):
order = Order.query.filter_by(id=order_id).first()
if not order or order.vendor_id != current_user.id:
abort(404)
if order.status != Status.PENDING:
abort(400)
order.status = Status.DECLINED
order.comment = request.json['comment']
db.session.commit()
merchant_id = order.merchant_id
merchant = User.query.filter_by(id=merchant_id).first()
vendor_name = order.company_name
vendor_email = current_user.email
purchases = order.purchases
comment = order.comment
send_email(merchant.email,
'Vendor order request declined',
'vendor/email/declined_order',
vendor_name=vendor_name,
vendor_email=vendor_email,
order=order,
purchases=purchases,
comment=comment)
return jsonify({'order_id': order_id, 'status': 'declined', 'comment': comment})
@vendor.route('/profile', methods=['GET'])
@login_required
@vendor_required
def view_profile():
return render_template('vendor/profile.html', vendor=current_user)
@vendor.route('/picture/<filename>', methods=['GET'])
@login_required
def get_picture(filename):
c = Config()
return send_from_directory(c.UPLOAD_FOLDER, filename)
@vendor.route('/suggestions/<search>', methods=['GET'])
@login_required
def get_suggestions(search):
listings_raw = Listing.search(
available=True,
strict_name_search=search,
sort_by='alphaAZ'
).filter_by(vendor_id=current_user.id).limit(10)
final_arr = []
for a in listings_raw:
final_arr.append(a.name)
return jsonify({'json_list': final_arr});
@vendor.route('/profile/edit', methods=['GET', 'POST'])
@login_required
@vendor_required
def edit_profile():
form = EditProfileForm()
c = Config()
if form.validate_on_submit():
current_user.bio = form.bio.data
current_user.address = form.address.data
current_user.phone_number = form.phone_number.data
current_user.website = form.website.data
current_user.public_email = form.email.data
current_user.f1 = form.featured1.data
if form.image.data:
filename = form.image.data.filename
get_queue().enqueue(process_image,
type='image',
filename=filename,
data =form.image.data.read(),
user_id=current_user.id)
if form.pdf.data:
filename = form.pdf.data.filename
get_queue().enqueue(process_image,
type='pdf',
filename=filename,
data =form.pdf.data.read(),
user_id=current_user.id)
db.session.commit()
return redirect(url_for('vendor.view_profile'))
form.bio.data = current_user.bio
form.address.data = current_user.address
form.phone_number.data = current_user.phone_number
form.website.data = current_user.website
form.email.data = current_user.public_email
form.featured1.data = current_user.f1
return render_template('vendor/edit_profile.html', form=form)
def process_image(filename, type, data, user_id):
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
with app.app_context():
source_filename = secure_filename(filename)
source_extension = os.path.splitext(source_filename)[1]
destination_filename = uuid4().hex + source_extension
conn = boto.connect_s3(os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"])
b = conn.get_bucket(os.environ["S3_BUCKET"])
sml = b.new_key("/".join([destination_filename]))
sml.set_contents_from_string(data)
sml.set_acl('public-read')
user = User.query.filter_by(id=user_id).first()
if type == 'image':
user.image = 'https://s3-us-west-2.amazonaws.com/{}/{}'.format(os.environ["S3_BUCKET"], destination_filename)
if type == 'pdf':
user.pdf = 'https://s3-us-west-2.amazonaws.com/{}/{}'.format(os.environ["S3_BUCKET"], destination_filename)
db.session.commit()
|
|
# encoding: utf-8
import os
import fcntl
from select import select
from collections import namedtuple
from evdev import _input, _uinput, ecodes, util
from evdev.events import InputEvent
#--------------------------------------------------------------------------
class EvdevError(Exception):
pass
#--------------------------------------------------------------------------
_AbsInfo = namedtuple('AbsInfo', ['value', 'min', 'max', 'fuzz', 'flat', 'resolution'])
_KbdInfo = namedtuple('KbdInfo', ['repeat', 'delay'])
_DeviceInfo = namedtuple('DeviceInfo', ['bustype', 'vendor', 'product', 'version'])
class AbsInfo(_AbsInfo):
'''
A ``namedtuple`` for storing absolut axis information -
corresponds to the ``input_absinfo`` struct:
**value**
Latest reported value for the axis.
**min**
Specifies minimum value for the axis.
**max**
Specifies maximum value for the axis.
**fuzz**
Specifies fuzz value that is used to filter noise from the
event stream.
**flat**
Values that are within this value will be discarded by joydev
interface and reported as 0 instead.
**resolution**
Specifies resolution for the values reported for the axis.
Resolution for main axes (``ABS_X, ABS_Y, ABS_Z``) is reported
in units per millimeter (units/mm), resolution for rotational
axes (``ABS_RX, ABS_RY, ABS_RZ``) is reported in units per
radian.
.. note: The input core does not clamp reported values to the
``[minimum, maximum]`` limits, such task is left to userspace.
'''
def __str__(self):
return 'val {}, min {}, max {}, fuzz {}, flat {}, res {}'.format(*self)
class KbdInfo(_KbdInfo):
'''
Keyboard repeat rate:
**repeat**
Keyboard repeat rate in characters per second.
**delay**
Amount of time that a key must be depressed before it will start
to repeat (in milliseconds).
'''
def __str__(self):
return 'repeat {}, delay {}'.format(*self)
class DeviceInfo(_DeviceInfo):
def __str__(self):
msg = 'bus: {:04x}, vendor {:04x}, product {:04x}, version {:04x}'
return msg.format(*self)
class InputDevice(object):
'''
A linux input device from which input events can be read.
'''
__slots__ = ('fn', 'fd', 'info', 'name', 'phys', '_rawcapabilities',
'version', 'ff_effects_count')
def __init__(self, dev):
'''
:param dev: path to input device
'''
#: Path to input device.
self.fn = dev
# Certain operations are possible only when the device is opened in
# read-write mode.
try:
fd = os.open(dev, os.O_RDWR | os.O_NONBLOCK)
except OSError:
fd = os.open(dev, os.O_RDONLY | os.O_NONBLOCK)
#: A non-blocking file descriptor to the device file.
self.fd = fd
# Returns (bustype, vendor, product, version, name, phys, capabilities).
info_res = _input.ioctl_devinfo(self.fd)
#: A :class:`DeviceInfo <evdev.device.DeviceInfo>` instance.
self.info = DeviceInfo(*info_res[:4])
#: The name of the event device.
self.name = info_res[4]
#: The physical topology of the device.
self.phys = info_res[5]
#: The evdev protocol version.
self.version = _input.ioctl_EVIOCGVERSION(self.fd)
#: The raw dictionary of device capabilities - see `:func:capabilities()`.
self._rawcapabilities = _input.ioctl_capabilities(self.fd)
#: The number of force feedback effects the device can keep in its memory.
self.ff_effects_count = _input.ioctl_EVIOCGEFFECTS(self.fd)
def __del__(self):
if hasattr(self, 'fd') and self.fd is not None:
try:
self.close()
except OSError:
pass
def _capabilities(self, absinfo=True):
res = {}
for etype, ecodes in self._rawcapabilities.items():
for code in ecodes:
l = res.setdefault(etype, [])
if isinstance(code, tuple):
if absinfo:
a = code[1] # (0, 0, 0, 255, 0, 0)
i = AbsInfo(*a)
l.append((code[0], i))
else:
l.append(code[0])
else:
l.append(code)
return res
def capabilities(self, verbose=False, absinfo=True):
'''
Return the event types that this device supports as a mapping of
supported event types to lists of handled event codes. Example::
{ 1: [272, 273, 274],
2: [0, 1, 6, 8] }
If ``verbose`` is ``True``, event codes and types will be resolved
to their names. Example::
{ ('EV_KEY', 1): [('BTN_MOUSE', 272),
('BTN_RIGHT', 273),
('BTN_MIDDLE', 273)],
('EV_REL', 2): [('REL_X', 0),
('REL_Y', 1),
('REL_HWHEEL', 6),
('REL_WHEEL', 8)] }
Unknown codes or types will be resolved to ``'?'``.
If ``absinfo`` is ``True``, the list of capabilities will also
include absolute axis information in the form of
:class:`AbsInfo` instances::
{ 3: [ (0, AbsInfo(min=0, max=255, fuzz=0, flat=0)),
(1, AbsInfo(min=0, max=255, fuzz=0, flat=0)) ]}
Combined with ``verbose`` the above becomes::
{ ('EV_ABS', 3): [ (('ABS_X', 0), AbsInfo(min=0, max=255, fuzz=0, flat=0)),
(('ABS_Y', 1), AbsInfo(min=0, max=255, fuzz=0, flat=0)) ]}
'''
if verbose:
return dict(util.resolve_ecodes(self._capabilities(absinfo)))
else:
return self._capabilities(absinfo)
def need_write(func):
'''
Decorator that raises EvdevError() if there is no write access to the
input device.
'''
def wrapper(*args):
fd = args[0].fd
if fcntl.fcntl(fd, fcntl.F_GETFL) & os.O_RDWR:
return func(*args)
msg = 'no write access to device "%s"' % args[0].fn
raise EvdevError(msg)
return wrapper
def leds(self, verbose=False):
'''
Return currently set LED keys. For example::
[0, 1, 8, 9]
If ``verbose`` is ``True``, event codes are resolved to their
names. Unknown codes are resolved to ``'?'``. For example::
[('LED_NUML', 0), ('LED_CAPSL', 1), ('LED_MISC', 8), ('LED_MAIL', 9)]
'''
leds = _input.get_sw_led_snd(self.fd, ecodes.EV_LED)
if verbose:
return [(ecodes.LED[l] if l in ecodes.LED else '?', l) for l in leds]
return leds
@need_write
def set_led(self, led_num, value):
'''
Set the state of the selected LED. For example::
device.set_led(ecodes.LED_NUML, 1)
..
'''
_uinput.write(self.fd, ecodes.EV_LED, led_num, value)
def __eq__(self, other):
'''Two devices are equal if their :data:`info` attributes are equal.'''
return isinstance(other, self.__class__) and self.info == other.info
def __str__(self):
msg = 'device {}, name "{}", phys "{}"'
return msg.format(self.fn, self.name, self.phys)
def __repr__(self):
msg = (self.__class__.__name__, self.fn)
return '{}({!r})'.format(*msg)
def close(self):
if self.fd > -1:
try:
os.close(self.fd)
finally:
self.fd = -1
def fileno(self):
'''
Return the file descriptor to the open event device. This
makes it possible to pass pass ``InputDevice`` instances
directly to :func:`select.select()` and
:class:`asyncore.file_dispatcher`.'''
return self.fd
def read_one(self):
'''
Read and return a single input event as an instance of
:class:`InputEvent <evdev.events.InputEvent>`.
Return ``None`` if there are no pending input events.
'''
# event -> (sec, usec, type, code, val)
event = _input.device_read(self.fd)
if event:
return InputEvent(*event)
def read_loop(self):
'''Enter an endless ``select()`` loop that yields input events.'''
while True:
r, w, x = select([self.fd], [], [])
for event in self.read():
yield event
def read(self):
'''
Read multiple input events from device. Return a generator object that
yields :class:`InputEvent <evdev.events.InputEvent>` instances. Raises
`BlockingIOError` if there are no available events at the moment.
'''
# events -> [(sec, usec, type, code, val), ...]
events = _input.device_read_many(self.fd)
for i in events:
yield InputEvent(*i)
def grab(self):
'''
Grab input device using ``EVIOCGRAB`` - other applications will
be unable to receive events until the device is released. Only
one process can hold a ``EVIOCGRAB`` on a device.
.. warning:: Grabbing an already grabbed device will raise an
``IOError``.'''
_input.ioctl_EVIOCGRAB(self.fd, 1)
def ungrab(self):
'''Release device if it has been already grabbed (uses
`EVIOCGRAB`).
.. warning:: Releasing an already released device will raise an
``IOError('Invalid argument')``.'''
_input.ioctl_EVIOCGRAB(self.fd, 0)
def upload_effect(self, effect):
'''Upload a force feedback effect to a force feedback device.'''
data = bytes(buffer(effect)[:])
ff_id = _input.upload_effect(self.fd, data)
return ff_id
def erase_effect(self, ff_id):
'''Erase a force effect from a force feedback device. This
also stops the effect.'''
_input.erase_effect(self.fd, ff_id)
@property
def repeat(self):
'''Get or set the keyboard repeat rate (in characters per
minute) and delay (in milliseconds).'''
return KbdInfo(*_input.ioctl_EVIOCGREP(self.fd))
@repeat.setter
def repeat(self, value):
return _input.ioctl_EVIOCSREP(self.fd, *value)
def active_keys(self, verbose=False):
'''
Return currently active keys. Example::
[1, 42]
If ``verbose`` is ``True``, key codes are resolved to their
verbose names. Unknown codes are resolved to ``'?'``. For
example::
[('KEY_ESC', 1), ('KEY_LEFTSHIFT', 42)]
'''
active_keys = _input.ioctl_EVIOCGKEY(self.fd)
if verbose:
return [(ecodes.KEY[k] if k in ecodes.KEY else '?', k) for k in active_keys]
return active_keys
|
|
"""Unit tests for raw_wind_io.py."""
import unittest
import numpy
import pandas
from gewittergefahr.gg_io import raw_wind_io
TOLERANCE = 1e-6
# The following constants are used to test _check_data_source.
FAKE_PRIMARY_DATA_SOURCE = 'foo'
FAKE_SECONDARY_DATA_SOURCE = 'bar'
# The following constants are used to test
# _primary_and_secondary_sources_to_table.
PRIMARY_SOURCE_BY_PAIR = [
raw_wind_io.OK_MESONET_DATA_SOURCE, raw_wind_io.STORM_EVENTS_DATA_SOURCE,
raw_wind_io.HFMETAR_DATA_SOURCE]
PRIMARY_SOURCE_BY_PAIR += (
[raw_wind_io.MADIS_DATA_SOURCE] * len(raw_wind_io.SECONDARY_DATA_SOURCES))
SECONDARY_SOURCE_BY_PAIR = [None] * 3 + raw_wind_io.SECONDARY_DATA_SOURCES
PRIMARY_AND_SECONDARY_SOURCE_PAIRS_AS_DICT = {
raw_wind_io.PRIMARY_SOURCE_COLUMN: PRIMARY_SOURCE_BY_PAIR,
raw_wind_io.SECONDARY_SOURCE_COLUMN: SECONDARY_SOURCE_BY_PAIR}
PRIMARY_AND_SECONDARY_SOURCE_PAIRS_AS_TABLE = pandas.DataFrame.from_dict(
PRIMARY_AND_SECONDARY_SOURCE_PAIRS_AS_DICT)
# The following constants are used to test _check_elevations.
ELEVATIONS_M_ASL = numpy.array(
[-1000., 0., 1000., 5000., 10000., numpy.nan, None], dtype=numpy.float64)
ELEV_INVALID_INDICES = numpy.array([0, 4, 5, 6], dtype=int)
# The following constants are used to test check_wind_speeds.
SIGNED_WIND_SPEEDS_M_S01 = numpy.array(
[-100., -50., -10., 0., 10., 50., 100., numpy.nan, None],
dtype=numpy.float64)
ABSOLUTE_WIND_SPEEDS_M_S01 = numpy.array(
[-100., -50., -10., 0., 10., 50., 100., numpy.nan, None],
dtype=numpy.float64)
SIGNED_SPEED_INVALID_INDICES = numpy.array([0, 6, 7, 8], dtype=int)
ABSOLUTE_SPEED_INVALID_INDICES = numpy.array([0, 1, 2, 6, 7, 8], dtype=int)
# The following constants are used to test _check_wind_directions.
WIND_DIRECTIONS_DEG = numpy.array(
[-10., 0., 180., 359.99, 360., 5000., numpy.nan, None], dtype=numpy.float64)
DIRECTION_INVALID_INDICES = numpy.array([0, 4, 5, 6, 7], dtype=int)
# The following constants are used to test append_source_to_station_id.
STATION_ID_NO_SOURCE = 'CYEG'
NON_MADIS_PRIMARY_SOURCE = 'ok_mesonet'
STATION_ID_NON_MADIS = 'CYEG_ok-mesonet'
SECONDARY_DATA_SOURCE = 'sao'
STATION_ID_MADIS = 'CYEG_madis_sao'
# The following constants are used to test _remove_duplicate_observations.
THESE_LATITUDES_DEG = numpy.array(
[51.1, 51.102, 51.104, 51.106, 53.5, 53.501, 53.502, 53.503])
THESE_LONGITUDES_DEG = numpy.array(
[246.0, 246.001, 246.002, 246.1, 246.5, 246.501, 246.502, 246.6])
THESE_TIMES_UNIX_SEC = numpy.array([0, 0, 1, 0, 2, 2, 2, 2], dtype=int)
THESE_U_WINDS_M_S01 = numpy.array(
[5., 4.999, 5.001, 5.003, 8., 8.001, 8.002, 7.999])
THESE_V_WINDS_M_S01 = numpy.array(
[-4., -4.001, -4.002, -3.999, 17., 17., 18., 17.])
WIND_DICT_WITH_DUPLICATES = {raw_wind_io.LATITUDE_COLUMN: THESE_LATITUDES_DEG,
raw_wind_io.LONGITUDE_COLUMN: THESE_LONGITUDES_DEG,
raw_wind_io.TIME_COLUMN: THESE_TIMES_UNIX_SEC,
raw_wind_io.U_WIND_COLUMN: THESE_U_WINDS_M_S01,
raw_wind_io.V_WIND_COLUMN: THESE_V_WINDS_M_S01}
WIND_TABLE_WITH_DUPLICATES = pandas.DataFrame.from_dict(
WIND_DICT_WITH_DUPLICATES)
WITH_TABLE_SANS_DUPLICATES = WIND_TABLE_WITH_DUPLICATES.iloc[[0, 2, 3, 4, 6, 7]]
# The following constants are used to test _get_pathless_processed_file_name.
FILE_START_TIME_UNIX_SEC = 1506999600 # 0300 UTC 3 Oct 2017
FILE_END_TIME_UNIX_SEC = 1507003200 # 0400 UTC 3 Oct 2017
PATHLESS_FILE_NAME_MADIS = (
'wind-observations_madis_sao_2017-10-03-030000_2017-10-03-040000.csv')
PATHLESS_FILE_NAME_NON_MADIS = (
'wind-observations_ok-mesonet_2017-10-03-030000_2017-10-03-040000.csv')
# The following constants are used to test find_processed_file.
TOP_DIRECTORY_NAME = 'wind'
PROCESSED_FILE_NAME_MADIS = (
'wind/madis/sao/201710/wind-observations_madis_sao_2017-10-03-030000_'
'2017-10-03-040000.csv')
PROCESSED_FILE_NAME_NON_MADIS = (
'wind/ok_mesonet/201710/wind-observations_ok-mesonet_2017-10-03-030000_'
'2017-10-03-040000.csv')
# The following constants are used to test find_processed_hourly_files.
PERIOD_START_TIME_UNIX_SEC = 1506993753 # 012233 UTC 3 Oct 2017
PERIOD_END_TIME_UNIX_SEC = 1507002295 # 034455 UTC 3 Oct 2017
PROCESSED_HOURLY_FILE_NAMES_MADIS = [
'wind/madis/sao/201710/'
'wind-observations_madis_sao_2017-10-03-010000_2017-10-03-015959.csv',
'wind/madis/sao/201710/'
'wind-observations_madis_sao_2017-10-03-020000_2017-10-03-025959.csv',
'wind/madis/sao/201710/'
'wind-observations_madis_sao_2017-10-03-030000_2017-10-03-035959.csv']
PROCESSED_HOURLY_FILE_NAMES_NON_MADIS = [
'wind/ok_mesonet/201710/'
'wind-observations_ok-mesonet_2017-10-03-010000_2017-10-03-015959.csv',
'wind/ok_mesonet/201710/'
'wind-observations_ok-mesonet_2017-10-03-020000_2017-10-03-025959.csv',
'wind/ok_mesonet/201710/'
'wind-observations_ok-mesonet_2017-10-03-030000_2017-10-03-035959.csv'
]
# The following constants are used to test get_max_of_sustained_and_gust.
WIND_SPEEDS_TO_CONVERT_M_S01 = numpy.array(
[5., 10., 20., 30., numpy.nan, 6.6, 0., 40.])
WIND_GUST_SPEEDS_TO_CONVERT_M_S01 = numpy.array(
[numpy.nan, 12.5, 17.5, 34., 0., numpy.nan, 1.7, 38.])
WIND_DIRECTIONS_TO_CONVERT_DEG = numpy.array(
[0., 70., 90., 145., 200., 225., 280., 315.])
WIND_GUST_DIRECTIONS_TO_CONVERT_DEG = numpy.array(
[20., 45., 105., 135., 180., 230.1, 270., 335.])
MAX_WIND_SPEEDS_M_S01 = numpy.array(
[5., 12.5, 20., 34., 0., 6.6, 1.7, 40.])
MAX_WIND_DIRECTIONS_DEG = numpy.array(
[0., 45., 90., 135., 180., 225., 270., 315.])
MAX_WIND_DIRECTIONS_WITH_NAN_DEG = numpy.array(
[numpy.nan, 45., 90., 135., 180., 225., 270., 315.])
# The following constants are used to test speed_and_direction_to_uv and
# uv_to_speed_and_direction.
HALF_SQRT_OF_TWO = numpy.sqrt(2.) / 2
EXPECTED_MAX_U_WINDS_M_S01 = numpy.array(
[0., -12.5 * HALF_SQRT_OF_TWO, -20., -34. * HALF_SQRT_OF_TWO, 0.,
6.6 * HALF_SQRT_OF_TWO, 1.7, 40. * HALF_SQRT_OF_TWO])
EXPECTED_MAX_V_WINDS_M_S01 = numpy.array(
[-5., -12.5 * HALF_SQRT_OF_TWO, 0., 34. * HALF_SQRT_OF_TWO, 0.,
6.6 * HALF_SQRT_OF_TWO, 0., -40. * HALF_SQRT_OF_TWO])
class RawWindIoTests(unittest.TestCase):
"""Each method is a unit test for raw_wind_io.py."""
def test_check_data_sources_fake_primary(self):
"""Ensures correct output from check_data_sources.
In this case, primary data source is fake.
"""
with self.assertRaises(ValueError):
raw_wind_io.check_data_sources(
primary_source=FAKE_PRIMARY_DATA_SOURCE)
def test_check_data_sources_merged_not_allowed(self):
"""Ensures correct output from check_data_sources.
In this case, primary data source is "merged", which is not allowed.
"""
with self.assertRaises(ValueError):
raw_wind_io.check_data_sources(
primary_source=raw_wind_io.MERGED_DATA_SOURCE,
allow_merged=False)
def test_check_data_sources_merged_allowed(self):
"""Ensures correct output from check_data_sources.
In this case, primary data source is "merged", which is allowed.
"""
raw_wind_io.check_data_sources(
primary_source=raw_wind_io.MERGED_DATA_SOURCE, allow_merged=True)
def test_check_data_sources_fake_secondary(self):
"""Ensures correct output from check_data_sources.
In this case, secondary data source is fake.
"""
with self.assertRaises(ValueError):
raw_wind_io.check_data_sources(
primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=FAKE_SECONDARY_DATA_SOURCE)
def test_check_data_sources_madis(self):
"""Ensures correct output from check_data_sources.
In this case, primary source is MADIS and secondary source is valid.
"""
raw_wind_io.check_data_sources(
primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=SECONDARY_DATA_SOURCE)
def test_check_data_sources_non_madis(self):
"""Ensures correct output from check_data_sources.
In this case, primary source is non-MADIS.
"""
raw_wind_io.check_data_sources(primary_source=NON_MADIS_PRIMARY_SOURCE)
def test_primary_and_secondary_sources_to_table(self):
"""Ensures correctness of _primary_and_secondary_sources_to_table."""
this_actual_table = raw_wind_io._primary_and_secondary_sources_to_table()
this_actual_table.sort_values(
raw_wind_io.PRIMARY_SOURCE_COLUMN, axis=0, ascending=True,
inplace=True)
this_actual_table.fillna(value='FOO', axis=1, inplace=True)
this_actual_table.reset_index(inplace=True, drop=True)
this_expected_table = (
PRIMARY_AND_SECONDARY_SOURCE_PAIRS_AS_TABLE.sort_values(
raw_wind_io.PRIMARY_SOURCE_COLUMN, axis=0, ascending=True,
inplace=False)
)
this_expected_table.fillna(value='FOO', axis=1, inplace=True)
this_expected_table.reset_index(inplace=True, drop=True)
self.assertTrue(this_actual_table.equals(this_expected_table))
def test_check_elevations(self):
"""Ensures correct output from _check_elevations."""
these_invalid_indices = raw_wind_io._check_elevations(ELEVATIONS_M_ASL)
self.assertTrue(numpy.array_equal(these_invalid_indices,
ELEV_INVALID_INDICES))
def test_check_wind_speeds_signed(self):
"""Ensures correct output from check_wind_speeds.
In this case wind speeds are signed (either u- or v-component), so they
can be negative.
"""
these_invalid_indices = raw_wind_io.check_wind_speeds(
SIGNED_WIND_SPEEDS_M_S01, one_component=True)
self.assertTrue(numpy.array_equal(these_invalid_indices,
SIGNED_SPEED_INVALID_INDICES))
def test_check_wind_speeds_absolute(self):
"""Ensures correct output from check_wind_speeds.
In this case wind speeds are absolute (vector magnitudes), so they
cannot be negative.
"""
these_invalid_indices = raw_wind_io.check_wind_speeds(
ABSOLUTE_WIND_SPEEDS_M_S01, one_component=False)
self.assertTrue(numpy.array_equal(these_invalid_indices,
ABSOLUTE_SPEED_INVALID_INDICES))
def test_check_wind_directions(self):
"""Ensures correct output from _check_wind_directions."""
these_invalid_indices = raw_wind_io._check_wind_directions(
WIND_DIRECTIONS_DEG)
self.assertTrue(numpy.array_equal(these_invalid_indices,
DIRECTION_INVALID_INDICES))
def test_remove_duplicate_observations(self):
"""Ensures correct output from _remove_duplicate_observations."""
this_wind_table = raw_wind_io._remove_duplicate_observations(
WIND_TABLE_WITH_DUPLICATES)
self.assertTrue(this_wind_table.equals(WITH_TABLE_SANS_DUPLICATES))
def test_get_pathless_processed_file_name_madis(self):
"""Ensures correct output from _get_pathless_processed_file_name.
In this case, primary data source is MADIS.
"""
this_pathless_file_name = raw_wind_io._get_pathless_processed_file_name(
start_time_unix_sec=FILE_START_TIME_UNIX_SEC,
end_time_unix_sec=FILE_END_TIME_UNIX_SEC,
primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=SECONDARY_DATA_SOURCE)
self.assertTrue(this_pathless_file_name == PATHLESS_FILE_NAME_MADIS)
def test_get_pathless_processed_file_name_non_madis(self):
"""Ensures correct output from _get_pathless_processed_file_name.
In this case, primary data source is non-MADIS.
"""
this_pathless_file_name = raw_wind_io._get_pathless_processed_file_name(
start_time_unix_sec=FILE_START_TIME_UNIX_SEC,
end_time_unix_sec=FILE_END_TIME_UNIX_SEC,
primary_source=NON_MADIS_PRIMARY_SOURCE)
self.assertTrue(this_pathless_file_name == PATHLESS_FILE_NAME_NON_MADIS)
def test_append_source_to_station_id_madis(self):
"""Ensures correct output from append_source_to_station_id.
In this case, primary data source is MADIS.
"""
this_station_id = raw_wind_io.append_source_to_station_id(
STATION_ID_NO_SOURCE, primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=SECONDARY_DATA_SOURCE)
self.assertTrue(this_station_id == STATION_ID_MADIS)
def test_append_source_to_station_id_non_madis(self):
"""Ensures correct output from append_source_to_station_id.
In this case, primary data source is non-MADIS.
"""
this_station_id = raw_wind_io.append_source_to_station_id(
STATION_ID_NO_SOURCE, primary_source=NON_MADIS_PRIMARY_SOURCE)
self.assertTrue(this_station_id == STATION_ID_NON_MADIS)
def test_get_max_of_sustained_and_gust(self):
"""Ensures correct output from get_max_of_sustained_and_gust."""
these_max_wind_speeds_m_s01, these_max_wind_directions_deg = (
raw_wind_io.get_max_of_sustained_and_gust(
WIND_SPEEDS_TO_CONVERT_M_S01, WIND_GUST_SPEEDS_TO_CONVERT_M_S01,
WIND_DIRECTIONS_TO_CONVERT_DEG,
WIND_GUST_DIRECTIONS_TO_CONVERT_DEG))
self.assertTrue(numpy.allclose(
these_max_wind_speeds_m_s01, MAX_WIND_SPEEDS_M_S01, atol=TOLERANCE,
equal_nan=True))
self.assertTrue(numpy.allclose(
these_max_wind_directions_deg, MAX_WIND_DIRECTIONS_DEG,
atol=TOLERANCE))
def test_speed_and_direction_to_uv_with_nan(self):
"""Ensures correct output from speed_and_direction_to_uv.
In this case, input directions include NaN.
"""
these_u_winds_m_s01, these_v_winds_m_s01 = (
raw_wind_io.speed_and_direction_to_uv(
MAX_WIND_SPEEDS_M_S01, MAX_WIND_DIRECTIONS_WITH_NAN_DEG))
self.assertTrue(numpy.allclose(
these_u_winds_m_s01, EXPECTED_MAX_U_WINDS_M_S01, atol=TOLERANCE))
self.assertTrue(numpy.allclose(
these_v_winds_m_s01, EXPECTED_MAX_V_WINDS_M_S01, atol=TOLERANCE))
def test_speed_and_direction_to_uv_without_nan(self):
"""Ensures correct output from speed_and_direction_to_uv.
In this case, input directions do not include NaN.
"""
these_u_winds_m_s01, these_v_winds_m_s01 = (
raw_wind_io.speed_and_direction_to_uv(
MAX_WIND_SPEEDS_M_S01, MAX_WIND_DIRECTIONS_DEG))
self.assertTrue(numpy.allclose(
these_u_winds_m_s01, EXPECTED_MAX_U_WINDS_M_S01, atol=TOLERANCE))
self.assertTrue(numpy.allclose(
these_v_winds_m_s01, EXPECTED_MAX_V_WINDS_M_S01, atol=TOLERANCE))
def test_uv_to_speed_and_direction(self):
"""Ensures correct output from uv_to_speed_and_direction."""
these_wind_speeds_m_s01, these_wind_directions_deg = (
raw_wind_io.uv_to_speed_and_direction(
EXPECTED_MAX_U_WINDS_M_S01, EXPECTED_MAX_V_WINDS_M_S01))
self.assertTrue(numpy.allclose(
these_wind_speeds_m_s01, MAX_WIND_SPEEDS_M_S01, atol=TOLERANCE))
self.assertTrue(numpy.allclose(
these_wind_directions_deg, MAX_WIND_DIRECTIONS_DEG, atol=TOLERANCE))
def test_find_processed_file_madis(self):
"""Ensures correct output from find_processed_file.
In this case, primary data source is MADIS.
"""
this_file_name = raw_wind_io.find_processed_file(
start_time_unix_sec=FILE_START_TIME_UNIX_SEC,
end_time_unix_sec=FILE_END_TIME_UNIX_SEC,
primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=SECONDARY_DATA_SOURCE,
top_directory_name=TOP_DIRECTORY_NAME, raise_error_if_missing=False)
self.assertTrue(this_file_name == PROCESSED_FILE_NAME_MADIS)
def test_find_processed_file_non_madis(self):
"""Ensures correct output from find_processed_file.
In this case, primary data source is non-MADIS.
"""
this_file_name = raw_wind_io.find_processed_file(
start_time_unix_sec=FILE_START_TIME_UNIX_SEC,
end_time_unix_sec=FILE_END_TIME_UNIX_SEC,
primary_source=NON_MADIS_PRIMARY_SOURCE,
top_directory_name=TOP_DIRECTORY_NAME, raise_error_if_missing=False)
self.assertTrue(this_file_name == PROCESSED_FILE_NAME_NON_MADIS)
def test_find_processed_hourly_files_madis(self):
"""Ensures correct output from find_processed_hourly_files.
In this case, primary data source is MADIS.
"""
these_file_names, _ = raw_wind_io.find_processed_hourly_files(
start_time_unix_sec=PERIOD_START_TIME_UNIX_SEC,
end_time_unix_sec=PERIOD_END_TIME_UNIX_SEC,
primary_source=raw_wind_io.MADIS_DATA_SOURCE,
secondary_source=SECONDARY_DATA_SOURCE,
top_directory_name=TOP_DIRECTORY_NAME, raise_error_if_missing=False)
self.assertTrue(these_file_names == PROCESSED_HOURLY_FILE_NAMES_MADIS)
def test_find_processed_hourly_files_non_madis(self):
"""Ensures correct output from find_processed_hourly_files.
In this case, primary data source is non-MADIS.
"""
these_file_names, _ = raw_wind_io.find_processed_hourly_files(
start_time_unix_sec=PERIOD_START_TIME_UNIX_SEC,
end_time_unix_sec=PERIOD_END_TIME_UNIX_SEC,
primary_source=NON_MADIS_PRIMARY_SOURCE,
top_directory_name=TOP_DIRECTORY_NAME, raise_error_if_missing=False)
self.assertTrue(
these_file_names == PROCESSED_HOURLY_FILE_NAMES_NON_MADIS)
if __name__ == '__main__':
unittest.main()
|
|
import array
import gc
import os
from math import sqrt
from rubikscolorresolver.base import (
LabColor,
RubiksColorSolverGenericBase,
Square,
lab_distance,
html_color,
rgb2lab,
)
from rubikscolorresolver.tsp_solver_greedy import solve_tsp
from rubikscolorresolver.permutations import (
even_cube_center_color_permutations,
len_even_cube_center_color_permutations,
odd_cube_center_color_permutations,
)
# from rubikscolorresolver.profile import timed_function, print_profile_data
import sys
if sys.version_info < (3, 4):
raise SystemError("Must be using Python 3.4 or higher")
def is_micropython():
return sys.implementation.name == "micropython"
ALL_COLORS = ("Bu", "Gr", "OR", "Rd", "Wh", "Ye")
SIDES_COUNT = 6
if is_micropython():
HTML_FILENAME = "rubiks-color-resolver.html"
else:
HTML_FILENAME = "/tmp/rubiks-color-resolver.html"
try:
os.unlink(HTML_FILENAME)
except Exception:
pass
# @timed_function
def median(list_foo):
list_foo = sorted(list_foo)
list_foo_len = len(list_foo)
if list_foo_len < 1:
return None
# Even number of entries
if list_foo_len % 2 == 0:
return (
list_foo[int((list_foo_len - 1) / 2)]
+ list_foo[int((list_foo_len + 1) / 2)]
) / 2.0
# Odd number of entries
else:
return list_foo[int((list_foo_len - 1) / 2)]
def tsp_matrix_corners(corners):
len_corners = len(corners)
# build a full matrix of color to color distances
# init the 2d list with 0s
matrix = [x[:] for x in [[0] * len_corners] * len_corners]
color_names = set(("Wh", "Ye", "OR", "Rd", "Bu", "Gr"))
for x in range(len_corners):
x_corner = corners[x]
for y in range(x + 1, len_corners):
y_corner = corners[y]
if (
x_corner[0].position in color_names
and y_corner[0].position in color_names
):
distance = 999
elif (
x_corner[0].position not in color_names
and y_corner[0].position not in color_names
):
distance = 999
else:
distance_012 = (
lab_distance(x_corner[0].lab, y_corner[0].lab)
+ lab_distance(x_corner[1].lab, y_corner[1].lab)
+ lab_distance(x_corner[2].lab, y_corner[2].lab)
)
distance_201 = (
lab_distance(x_corner[0].lab, y_corner[2].lab)
+ lab_distance(x_corner[1].lab, y_corner[0].lab)
+ lab_distance(x_corner[2].lab, y_corner[1].lab)
)
distance_120 = (
lab_distance(x_corner[0].lab, y_corner[1].lab)
+ lab_distance(x_corner[1].lab, y_corner[2].lab)
+ lab_distance(x_corner[2].lab, y_corner[0].lab)
)
distance = min(distance_012, distance_201, distance_120)
matrix[x][y] = distance
matrix[y][x] = distance
# print("corners matrix")
# for row in matrix:
# print(row)
return matrix
def corner_distance(corner1, corner2):
return (
lab_distance(corner1[0].lab, corner2[0].lab)
+ lab_distance(corner1[1].lab, corner2[1].lab)
+ lab_distance(corner1[2].lab, corner2[2].lab)
)
def traveling_salesman_corners(corners, desc):
matrix = tsp_matrix_corners(corners)
path = solve_tsp(matrix, desc=desc)
sorted_corners = [corners[x] for x in path]
for x in range(0, len(sorted_corners), 2):
corner1 = sorted_corners[x]
corner2 = sorted_corners[x + 1]
distance_012 = (
lab_distance(corner1[0].lab, corner2[0].lab)
+ lab_distance(corner1[1].lab, corner2[1].lab)
+ lab_distance(corner1[2].lab, corner2[2].lab)
)
distance_201 = (
lab_distance(corner1[0].lab, corner2[2].lab)
+ lab_distance(corner1[1].lab, corner2[0].lab)
+ lab_distance(corner1[2].lab, corner2[1].lab)
)
distance_120 = (
lab_distance(corner1[0].lab, corner2[1].lab)
+ lab_distance(corner1[1].lab, corner2[2].lab)
+ lab_distance(corner1[2].lab, corner2[0].lab)
)
distance = min(distance_012, distance_201, distance_120)
if distance == distance_012:
pass
elif distance == distance_201:
sorted_corners[x + 1] = (corner2[2], corner2[0], corner2[1])
elif distance == distance_120:
sorted_corners[x + 1] = (corner2[1], corner2[2], corner2[0])
else:
raise ValueError(distance)
while True:
max_delta = 0
max_delta_corners_to_swap = None
for x in range(0, len(sorted_corners), 2):
corner1 = sorted_corners[x]
corner2 = sorted_corners[x + 1]
distance12 = corner_distance(corner1, corner2)
for y in range(x + 2, len(sorted_corners), 2):
corner3 = sorted_corners[y]
corner4 = sorted_corners[y + 1]
distance34 = corner_distance(corner3, corner4)
# If we were to swap corner2 with corner4, what would that do to the corner1->corner2 distance plus the corner3->corner4 distance?
distance14 = corner_distance(corner1, corner4)
distance32 = corner_distance(corner3, corner2)
if distance14 + distance32 < distance12 + distance34:
delta = (distance12 + distance34) - (distance14 + distance32)
if delta > max_delta:
max_delta = delta
max_delta_corners_to_swap = (x + 1, y + 1)
if max_delta_corners_to_swap:
(x, y) = max_delta_corners_to_swap
orig_x = sorted_corners[x]
sorted_corners[x] = sorted_corners[y]
sorted_corners[y] = orig_x
else:
break
return sorted_corners
def tsp_matrix_edge_pairs(edge_pairs):
len_edge_pairs = len(edge_pairs)
# build a full matrix of color to color distances
# init the 2d list with 0s
matrix = [x[:] for x in [[0] * len_edge_pairs] * len_edge_pairs]
color_names = set(("Wh", "Ye", "OR", "Rd", "Bu", "Gr"))
for x in range(len_edge_pairs):
x_edge_pair = edge_pairs[x]
for y in range(x + 1, len_edge_pairs):
y_edge_pair = edge_pairs[y]
if (
x_edge_pair[0].position in color_names
and y_edge_pair[0].position in color_names
):
distance = 999
elif (
x_edge_pair[0].position not in color_names
and y_edge_pair[0].position not in color_names
):
distance = 999
else:
distance_01 = lab_distance(
x_edge_pair[0].lab, y_edge_pair[0].lab
) + lab_distance(x_edge_pair[1].lab, y_edge_pair[1].lab)
distance_10 = lab_distance(
x_edge_pair[0].lab, y_edge_pair[1].lab
) + lab_distance(x_edge_pair[1].lab, y_edge_pair[0].lab)
distance = min(distance_01, distance_10)
matrix[x][y] = distance
matrix[y][x] = distance
return matrix
def edge_pair_distance(pair1, pair2, normal):
if normal:
return lab_distance(pair1[0].lab, pair2[0].lab) + lab_distance(
pair1[1].lab, pair2[1].lab
)
else:
return lab_distance(pair1[0].lab, pair2[1].lab) + lab_distance(
pair1[1].lab, pair2[0].lab
)
def traveling_salesman_edge_pairs(edge_pairs, desc):
matrix = tsp_matrix_edge_pairs(edge_pairs)
path = solve_tsp(matrix, desc=desc)
sorted_edge_pairs = [edge_pairs[x] for x in path]
for x in range(0, len(sorted_edge_pairs), 2):
pair1 = sorted_edge_pairs[x]
pair2 = sorted_edge_pairs[x + 1]
distance_01 = edge_pair_distance(pair1, pair2, normal=True)
distance_10 = edge_pair_distance(pair1, pair2, normal=False)
if distance_10 < distance_01:
sorted_edge_pairs[x + 1] = (
sorted_edge_pairs[x + 1][1],
sorted_edge_pairs[x + 1][0],
)
while True:
max_delta = 0
max_delta_edges_to_swap = None
for x in range(0, len(sorted_edge_pairs), 2):
pair1 = sorted_edge_pairs[x]
pair2 = sorted_edge_pairs[x + 1]
distance12 = edge_pair_distance(pair1, pair2, True)
for y in range(x + 2, len(sorted_edge_pairs), 2):
pair3 = sorted_edge_pairs[y]
pair4 = sorted_edge_pairs[y + 1]
distance34 = edge_pair_distance(pair3, pair4, True)
# If we were to swap pair2 with pair4, what would that do to the pair1->pair2 distance plus the pair3->pair4 distance?
distance14 = edge_pair_distance(pair1, pair4, True)
distance32 = edge_pair_distance(pair3, pair2, True)
if distance14 + distance32 < distance12 + distance34:
delta = (distance12 + distance34) - (distance14 + distance32)
if delta > max_delta:
max_delta = delta
max_delta_edges_to_swap = (x + 1, y + 1)
if max_delta_edges_to_swap:
(x, y) = max_delta_edges_to_swap
orig_x = sorted_edge_pairs[x]
sorted_edge_pairs[x] = sorted_edge_pairs[y]
sorted_edge_pairs[y] = orig_x
else:
break
return sorted_edge_pairs
"""
def path_streak_cost(squares):
if len(squares) <= 1:
return 0
cost = 0
prev_square = squares[0]
for square in squares[1:]:
cost += lab_distance(prev_square.lab, square.lab)
prev_square = square
return cost
def best_path_streak(sorted_squares, streak_length, middle_squares, edge_pairs, corners):
max_cost_start = len(sorted_squares) - streak_length
min_cost = 999
min_cost_start = None
# print(middle_squares)
len_edge_pairs = len(edge_pairs)
if len_edge_pairs == 0:
pass
elif len_edge_pairs == 12:
target_edges_in_streak = 4
else:
raise ValueError(len_edge_pairs)
for x in range(0, max_cost_start):
squares_for_streak = sorted_squares[x:x + streak_length]
cost = path_streak_cost(squares_for_streak)
valid = True
if middle_squares:
middle_squares_in_streak = [square for square in squares_for_streak if square in middle_squares]
valid = bool(len(middle_squares_in_streak) == 1)
# print(middle_squares_in_streak)
'''
if valid and edge_pairs:
for edge_pair in edge_pairs:
edges_in_pair_in_streak = [square for square in squares_for_streak if square in edge_pair]
valid = bool(len(edges_in_pair_in_streak) == target_edges_in_streak)
'''
if valid and corners:
# print(f"corners {corners}")
corners_in_streak = []
for corner in corners:
corner_in_streak = [square for square in squares_for_streak if square in corner]
corners_in_streak.extend(corner_in_streak)
# print(f"corner_in_streak {len(corner_in_streak)}")
valid = bool(len(corner_in_streak) <= 1)
if not valid:
break
if valid:
valid = bool(len(corners_in_streak) == 4)
# print(f"corners_in_streak {len(corners_in_streak)}")
if valid and cost < min_cost:
min_cost = cost
min_cost_start = x
return sorted_squares[min_cost_start : min_cost_start + streak_length]
"""
def tsp_matrix(squares):
len_squares = len(squares)
r_len_squares = range(len_squares)
# build a full matrix of color to color distances
# init the 2d list with 0s
matrix = [x[:] for x in [[0] * len_squares] * len_squares]
for x in r_len_squares:
x_lab = squares[x].lab
for y in range(x + 1, len_squares):
y_lab = squares[y].lab
distance = lab_distance(x_lab, y_lab)
matrix[x][y] = distance
matrix[y][x] = distance
# convert to tuple of tuples
for (row_index, row) in enumerate(matrix):
matrix[row_index] = tuple(row)
matrix = tuple(matrix)
return matrix
# @timed_function
def traveling_salesman(squares, desc, middle_squares=[], edge_pairs=[], corners=[]):
"""
SQUARES_PER_ROW = int(len(squares) / SIDES_COUNT)
results = []
_squares = squares[:]
for x in range(SIDES_COUNT - 1):
if x == 4:
matrix = tsp_matrix(_squares)
path = solve_tsp(matrix, desc=desc)
path_squares = [_squares[x] for x in path]
results.extend(path_squares)
else:
matrix = tsp_matrix(_squares)
path = solve_tsp(matrix, desc=desc)
path_squares = [_squares[x] for x in path]
results.extend(best_path_streak(path_squares, SQUARES_PER_ROW, middle_squares, edge_pairs, corners))
_squares = [square for square in squares if square not in results]
return results
"""
matrix = tsp_matrix(squares)
path = solve_tsp(matrix, desc=desc)
return [squares[x] for x in path]
def traveling_salesman_two_colors(squares, endpoints=None, desc=None):
matrix = tsp_matrix(squares)
if endpoints:
start_index = squares.index(endpoints[0])
end_index = squares.index(endpoints[1])
endpoints = (start_index, end_index)
path = solve_tsp(matrix, endpoints=endpoints, desc=desc)
return [squares[x] for x in path]
# @timed_function
def get_important_square_indexes(size):
squares_per_side = size * size
max_square = squares_per_side * 6
first_squares = []
last_squares = []
for index in range(1, max_square + 1):
if (index - 1) % squares_per_side == 0:
first_squares.append(index)
elif index % squares_per_side == 0:
last_squares.append(index)
last_UBD_squares = (last_squares[0], last_squares[4], last_squares[5])
return (first_squares, last_squares, last_UBD_squares)
# @timed_function
def hex_to_rgb(rgb_string):
"""
Takes #112233 and returns the RGB values in decimal
"""
if rgb_string.startswith("#"):
rgb_string = rgb_string[1:]
red = int(rgb_string[0:2], 16)
green = int(rgb_string[2:4], 16)
blue = int(rgb_string[4:6], 16)
return (red, green, blue)
# @timed_function
def hashtag_rgb_to_labcolor(rgb_string):
(red, green, blue) = hex_to_rgb(rgb_string)
# lab = rgb2lab((red, green, blue))
# print("LabColor({}, {}, {}, {}, {}, {}),".format(lab.L, lab.a, lab.b, lab.red, lab.green, lab.blue))
# return lab
return rgb2lab((red, green, blue))
crayola_colors = {
# Handy website for converting RGB tuples to hex
# http://www.w3schools.com/colors/colors_converter.asp
#
# These are the RGB values as seen via a webcam
# white = (235, 254, 250)
# green = (20, 105, 74)
# yellow = (210, 208, 2)
# orange = (148, 53, 9)
# blue = (22, 57, 103)
# red = (104, 4, 2)
#
# "Wh": hashtag_rgb_to_labcolor("#FFFFFF"),
# "Gr": hashtag_rgb_to_labcolor("#14694a"),
# "Ye": hashtag_rgb_to_labcolor("#FFFF00"),
# "OR": hashtag_rgb_to_labcolor("#943509"),
# "Bu": hashtag_rgb_to_labcolor("#163967"),
# "Rd": hashtag_rgb_to_labcolor("#680402"),
"Wh": LabColor(100.0, 0.00526049995830391, -0.01040818452526793, 255, 255, 255),
"Gr": LabColor(
39.14982168015123, -32.45052099773829, 10.60519920674466, 20, 105, 74
),
"Ye": LabColor(
97.13824698129729, -21.55590833483229, 94.48248544644462, 255, 255, 0
),
"OR": LabColor(35.71689493804023, 38.18518746791636, 43.98251678431012, 148, 53, 9),
"Bu": LabColor(
23.92144819784853, 5.28400492805528, -30.63998357385018, 22, 57, 103
),
"Rd": LabColor(20.18063311070288, 40.48184409611946, 29.94038922869042, 104, 4, 2),
}
# @timed_function
def get_row_color_distances(squares, row_baseline_lab):
"""
'colors' is list if (index, (red, green, blue)) tuples
'row_baseline_lab' is a list of Lab colors, one for each row of colors
Return the total distance of the colors in a row vs their baseline
"""
results = []
squares_per_row = int(len(squares) / 6)
count = 0
row_index = 0
distance = 0
baseline_lab = row_baseline_lab[row_index]
for square in squares:
baseline_lab = row_baseline_lab[row_index]
distance += lab_distance(baseline_lab, square.lab)
count += 1
if count % squares_per_row == 0:
results.append(int(distance))
row_index += 1
distance = 0
return results
# @timed_function
def get_squares_for_row(squares, target_row_index):
results = []
squares_per_row = int(len(squares) / 6)
count = 0
row_index = 0
for square in squares:
if row_index == target_row_index:
results.append(square)
count += 1
if count % squares_per_row == 0:
row_index += 1
return results
# @timed_function
def square_list_to_lab(squares):
reds = array.array("B")
greens = array.array("B")
blues = array.array("B")
for square in squares:
(red, green, blue) = (square.lab.red, square.lab.green, square.lab.blue)
reds.append(red)
greens.append(green)
blues.append(blue)
median_red = int(median(reds))
median_green = int(median(greens))
median_blue = int(median(blues))
return rgb2lab((median_red, median_green, median_blue))
class RubiksColorSolverGeneric(RubiksColorSolverGenericBase):
# @timed_function
def www_header(self):
"""
Write the <head> including css
"""
side_margin = 10
square_size = 40
size = self.width # 3 for 3x3x3, etc
with open(HTML_FILENAME, "a") as fh:
fh.write(
"""<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
div.clear {
clear: both;
}
div.clear_left {
clear: left;
}
div.side {
margin: %dpx;
float: left;
}
"""
% side_margin
)
for x in range(1, size - 1):
fh.write("div.col%d,\n" % x)
fh.write(
"""div.col%d {
float: left;
}
div.col%d {
margin-left: %dpx;
}
div#upper,
div#down {
margin-left: %dpx;
}
"""
% (
size - 1,
size,
(size - 1) * square_size,
(size * square_size) + (3 * side_margin),
)
)
fh.write(
"""
span.half_square {
width: %dpx;
height: %dpx;
white-space-collapsing: discard;
display: inline-block;
color: black;
font-weight: bold;
line-height: %dpx;
text-align: center;
}
span.square {
width: %dpx;
height: %dpx;
white-space-collapsing: discard;
display: inline-block;
color: black;
font-weight: bold;
line-height: %dpx;
text-align: center;
}
div.square {
width: %dpx;
height: %dpx;
color: black;
font-weight: bold;
line-height: %dpx;
text-align: center;
}
div.square span {
display: inline-block;
vertical-align: middle;
line-height: normal;
}
div#colormapping {
float: left;
}
div#bottom {
cursor: pointer;
}
div#bottom div.initial_rgb_values {
display: none;
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script>
$(document).ready(function()
{
$("div#bottom").click(function(event)
{
if ($("div#bottom div.final_cube").is(":visible")) {
$("div#bottom div.initial_rgb_values").show();
$("div#bottom div.final_cube").hide();
} else {
$("div#bottom div.initial_rgb_values").hide();
$("div#bottom div.final_cube").show();
}
})
});
</script>
<title>Rubiks Cube Color Resolver</title>
</head>
<body>
"""
% (
int(square_size / 2),
square_size,
square_size,
square_size,
square_size,
square_size,
square_size,
square_size,
square_size,
)
)
def write_color_corners(self, desc, corners):
with open(HTML_FILENAME, "a") as fh:
fh.write("<div class='clear colors'>\n")
fh.write("<h2>%s</h2>\n" % desc)
for row_index in range(3):
for (index, (corner0, corner1, corner2)) in enumerate(corners):
if row_index == 0:
square = corner0
elif row_index == 1:
square = corner1
elif row_index == 2:
square = corner2
else:
raise ValueError(row_index)
(red, green, blue) = (
square.lab.red,
square.lab.green,
square.lab.blue,
)
if index and index % 2 == 0:
fh.write("<span class='half_square'></span>")
fh.write(
"<span class='square' style='background-color:#%02x%02x%02x' title='RGB (%s, %s, %s), Lab (%s, %s, %s), color %s, side %s'>%s</span>\n"
% (
red,
green,
blue,
red,
green,
blue,
int(square.lab.L),
int(square.lab.a),
int(square.lab.b),
square.color_name,
square.side_name,
square.position,
)
)
fh.write("<br>")
fh.write("</div>\n")
def write_color_edge_pairs(self, desc, square_pairs):
with open(HTML_FILENAME, "a") as fh:
fh.write("<div class='clear colors'>\n")
fh.write("<h2>%s</h2>\n" % desc)
for use_square1 in (True, False):
for (index, (square1, square2)) in enumerate(square_pairs):
if use_square1:
square = square1
else:
square = square2
(red, green, blue) = (
square.lab.red,
square.lab.green,
square.lab.blue,
)
if index and index % 2 == 0:
fh.write("<span class='half_square'></span>")
fh.write(
"<span class='square' style='background-color:#%02x%02x%02x' title='RGB (%s, %s, %s), Lab (%s, %s, %s), color %s, side %s'>%s</span>\n"
% (
red,
green,
blue,
red,
green,
blue,
int(square.lab.L),
int(square.lab.a),
int(square.lab.b),
square.color_name,
square.side_name,
square.position,
)
)
fh.write("<br>")
fh.write("</div>\n")
# @timed_function
def write_colors(self, desc, squares):
with open(HTML_FILENAME, "a") as fh:
squares_per_row = int(len(squares) / 6)
fh.write("<div class='clear colors'>\n")
fh.write("<h2>%s</h2>\n" % desc)
count = 0
for square in squares:
(red, green, blue) = (square.lab.red, square.lab.green, square.lab.blue)
fh.write(
"<span class='square' style='background-color:#%02x%02x%02x' title='RGB (%s, %s, %s), Lab (%s, %s, %s), color %s, side %s'>%d</span>\n"
% (
red,
green,
blue,
red,
green,
blue,
int(square.lab.L),
int(square.lab.a),
int(square.lab.b),
square.color_name,
square.side_name,
square.position,
)
)
count += 1
if count % squares_per_row == 0:
fh.write("<br>")
fh.write("</div>\n")
# @timed_function
def www_footer(self):
with open(HTML_FILENAME, "a") as fh:
fh.write(
"""
</body>
</html>
"""
)
# @timed_function
def enter_scan_data(self, scan_data):
for (position, (red, green, blue)) in scan_data.items():
position = int(position)
side = self.pos2side[position]
side.set_square(position, red, green, blue)
if self.write_debug_file:
self.www_header()
with open(HTML_FILENAME, "a") as fh:
fh.write("<h1>RGB Input</h1>\n")
fh.write("<pre>{}</pre>\n".format(scan_data))
self.calculate_pos2square()
# @timed_function
def html_cube(self, desc, use_html_colors, div_class):
cube = ["dummy"]
for side in (
self.sideU,
self.sideL,
self.sideF,
self.sideR,
self.sideB,
self.sideD,
):
for position in range(side.min_pos, side.max_pos + 1):
square = side.squares[position]
if use_html_colors:
red = html_color[square.color_name]["red"]
green = html_color[square.color_name]["green"]
blue = html_color[square.color_name]["blue"]
else:
red = square.lab.red
green = square.lab.green
blue = square.lab.blue
cube.append((red, green, blue, square.color_name, square.lab))
col = 1
squares_per_side = self.width * self.width
max_square = squares_per_side * 6
sides = ("upper", "left", "front", "right", "back", "down")
side_index = -1
(first_squares, last_squares, last_UBD_squares) = get_important_square_indexes(
self.width
)
html = []
html.append("<div class='cube {}'>".format(div_class))
html.append("<h1>%s</h1>\n" % desc)
for index in range(1, max_square + 1):
if index in first_squares:
side_index += 1
html.append("<div class='side' id='%s'>\n" % sides[side_index])
(red, green, blue, color_name, lab) = cube[index]
html.append(
" <div class='square col%d' title='RGB (%d, %d, %d), Lab (%s, %s, %s), "
"color %s' style='background-color: #%02x%02x%02x;'><span>%02d</span></div>\n"
% (
col,
red,
green,
blue,
int(lab.L),
int(lab.a),
int(lab.b),
color_name,
red,
green,
blue,
index,
)
)
if index in last_squares:
html.append("</div>\n")
if index in last_UBD_squares:
html.append("<div class='clear'></div>\n")
col += 1
if col == self.width + 1:
col = 1
html.append("</div>")
return "".join(html)
def write_html(self, html):
with open(HTML_FILENAME, "a") as fh:
fh.write(html)
def _write_colors(self, desc, box):
with open(HTML_FILENAME, "a") as fh:
fh.write("<div class='clear colors'>\n")
fh.write("<h2>{}</h2>\n".format(desc))
for color_name in ("Wh", "Ye", "Gr", "Bu", "OR", "Rd"):
lab = box[color_name]
fh.write(
"<span class='square' style='background-color:#%02x%02x%02x' title='RGB (%s, %s, %s), Lab (%s, %s, %s), color %s'>%s</span>\n"
% (
lab.red,
lab.green,
lab.blue,
lab.red,
lab.green,
lab.blue,
int(lab.L),
int(lab.a),
int(lab.b),
color_name,
color_name,
)
)
fh.write("<br>")
fh.write("</div>\n")
# @timed_function
def write_crayola_colors(self):
self._write_colors("crayola box", crayola_colors)
# @timed_function
def write_color_box(self):
self._write_colors("color_box", self.color_box)
# @timed_function
def set_state(self):
self.state = []
# odd cube
if self.sideU.mid_pos is not None:
# Assign a color name to each center square. Compute
# which naming scheme results in the least total color distance in
# terms of the assigned color name vs. the colors in crayola_colors.
min_distance = None
min_distance_permutation = None
# Build a list of all center squares
center_squares = []
for side in (
self.sideU,
self.sideL,
self.sideF,
self.sideR,
self.sideB,
self.sideD,
):
square = side.squares[side.mid_pos]
center_squares.append(square)
# desc = "middle center"
# log.info("center_squares: %s".format(center_squares))
for permutation in odd_cube_center_color_permutations:
distance = 0
for (index, center_square) in enumerate(center_squares):
color_name = permutation[index]
color_obj = crayola_colors[color_name]
distance += lab_distance(center_square.lab, color_obj)
if min_distance is None or distance < min_distance:
min_distance = distance
min_distance_permutation = permutation
"""
log.info("{} PERMUTATION {}, DISTANCE {:,} (NEW MIN)".format(desc, permutation, int(distance)))
else:
log.info("{} PERMUTATION {}, DISTANCE {}".format(desc, permutation, distance))
"""
self.color_to_side_name = {
min_distance_permutation[0]: "U",
min_distance_permutation[1]: "L",
min_distance_permutation[2]: "F",
min_distance_permutation[3]: "R",
min_distance_permutation[4]: "B",
min_distance_permutation[5]: "D",
}
# log.info("{} FINAL PERMUTATION {}".format(desc, min_distance_permutation))
# even cube
else:
self.color_to_side_name = {
"Wh": "U",
"OR": "L",
"Gr": "F",
"Rd": "R",
"Bu": "B",
"Ye": "D",
}
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for x in range(side.min_pos, side.max_pos + 1):
square = side.squares[x]
square.side_name = self.color_to_side_name[square.color_name]
# @timed_function
def cube_for_json(self):
"""
Return a dictionary of the cube data so that we can json dump it
"""
data = {}
data["kociemba"] = "".join(self.cube_for_kociemba_strict())
data["sides"] = {}
data["squares"] = {}
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for x in range(side.min_pos, side.max_pos + 1):
square = side.squares[x]
color = square.color_name
side_name = self.color_to_side_name[color]
if side_name not in data["sides"]:
data["sides"][side_name] = {}
data["sides"][side_name]["colorName"] = color
data["sides"][side_name]["colorHTML"] = {}
data["sides"][side_name]["colorHTML"]["red"] = html_color[color][
"red"
]
data["sides"][side_name]["colorHTML"]["green"] = html_color[color][
"green"
]
data["sides"][side_name]["colorHTML"]["blue"] = html_color[color][
"blue"
]
data["squares"][square.position] = {"finalSide": side_name}
return data
# @timed_function
def assign_color_names(
self, desc, squares_lists_all, color_permutations, color_box
):
"""
Assign a color name to each square in each squares_list. Compute
which naming scheme results in the least total color distance in
terms of the assigned color name vs. the colors in color_box.
"""
ref_even_cube_center_color_permutations = even_cube_center_color_permutations
# print("\n\n\n")
# print("assign_color_names '{}' via {}".format(desc, color_permutations))
def get_even_cube_center_color_permutation(permutation_index):
LINE_LENGTH = 18
start = permutation_index * LINE_LENGTH
end = start + LINE_LENGTH
return ref_even_cube_center_color_permutations[start:end].split()
ref_ALL_COLORS = ALL_COLORS
# squares_lists_all is sorted by color. Split that list into 6 even buckets (squares_lists).
squares_per_row = int(len(squares_lists_all) / 6)
squares_lists = []
square_list = []
for square in squares_lists_all:
square_list.append(square)
if len(square_list) == squares_per_row:
squares_lists.append(tuple(square_list))
square_list = []
# Compute the distance for each color in the color_box vs each squares_list
# in squares_lists. Store this in distances_of_square_list_per_color
distances_of_square_list_per_color = {}
for color_name in ref_ALL_COLORS:
color_lab = color_box[color_name]
distances_of_square_list_per_color[color_name] = []
for (index, squares_list) in enumerate(squares_lists):
distance = 0
for square in squares_list:
distance += lab_distance(square.lab, color_lab)
distances_of_square_list_per_color[color_name].append(int(distance))
distances_of_square_list_per_color[
color_name
] = distances_of_square_list_per_color[color_name]
min_distance = 99999
min_distance_permutation = None
if color_permutations == "even_cube_center_color_permutations":
# before sorting
"""
print("\n".join(map(str, squares_lists)))
for color_name in ref_ALL_COLORS:
print("pre distances_of_square_list_per_color {} : {}".format(color_name, distances_of_square_list_per_color[color_name]))
print("")
"""
# Move the squares_list row that is closest to Bu to the front, then Gr, OR, Rd, Wh, Ye.
# This will allow us to skip many more entries later.
for (insert_index, color_name) in enumerate(ref_ALL_COLORS):
min_color_name_distance = 99999
min_color_name_distance_index = None
for (index, distance) in enumerate(
distances_of_square_list_per_color[color_name]
):
if distance < min_color_name_distance:
min_color_name_distance = distance
min_color_name_distance_index = index
tmp_square_list = squares_lists[min_color_name_distance_index]
squares_lists.pop(min_color_name_distance_index)
squares_lists.insert(insert_index, tmp_square_list)
for color_name in ref_ALL_COLORS:
blue_distance = distances_of_square_list_per_color[color_name][
min_color_name_distance_index
]
distances_of_square_list_per_color[color_name].pop(
min_color_name_distance_index
)
distances_of_square_list_per_color[color_name].insert(
insert_index, blue_distance
)
# after sorting
"""
print("\n".join(map(str, squares_lists)))
for color_name in ref_ALL_COLORS:
print("post distances_of_square_list_per_color {} : {}".format(color_name, distances_of_square_list_per_color[color_name]))
print("")
"""
permutation_len = len_even_cube_center_color_permutations
permutation_index = 0
# total = 0
# skip_total = 0
r = range(6)
while permutation_index < permutation_len:
permutation = get_even_cube_center_color_permutation(permutation_index)
distance = 0
skip_by = 0
for x in r:
distance += distances_of_square_list_per_color[permutation[x]][x]
if distance > min_distance:
if x == 0:
skip_by = 120
elif x == 1:
skip_by = 24
elif x == 2:
skip_by = 6
elif x == 3:
skip_by = 2
# if skip_by:
# print("{} PERMUTATION {} - {}, x {} distance {:,} > min {}, skip_by {}".format(
# desc, permutation_index, permutation, x, distance, min_distance, skip_by))
break
if skip_by:
permutation_index += skip_by
# skip_total += skip_by
continue
if distance < min_distance:
# print("{} PERMUTATION {} - {}, DISTANCE {:,} vs min {} (NEW MIN)".format(desc, permutation_index, permutation, distance, min_distance))
# log.info("{} PERMUTATION {}, DISTANCE {:,} (NEW MIN)".format(desc, permutation, int(distance)))
min_distance = distance
min_distance_permutation = permutation
# else:
# print("{} PERMUTATION {} - {}, DISTANCE {} vs min {}".format(desc, permutation_index, permutation, distance, min_distance))
# #log.info("{} PERMUTATION {}, DISTANCE {}".format(desc, permutation, distance))
# total += 1
permutation_index += 1
# print("total {}".format(total))
# print("skip total {}".format(skip_total))
# print("")
elif color_permutations == "odd_cube_center_color_permutations":
p = odd_cube_center_color_permutations
for permutation in p:
distance = (
distances_of_square_list_per_color[permutation[0]][0]
+ distances_of_square_list_per_color[permutation[1]][1]
+ distances_of_square_list_per_color[permutation[2]][2]
+ distances_of_square_list_per_color[permutation[3]][3]
+ distances_of_square_list_per_color[permutation[4]][4]
+ distances_of_square_list_per_color[permutation[5]][5]
)
if distance < min_distance:
min_distance = distance
min_distance_permutation = permutation
# print("{} PERMUTATION {} - {}, DISTANCE {:,} (NEW MIN)".format(desc, permutation_index, permutation, int(distance)))
# log.info("{} PERMUTATION {}, DISTANCE {:,} (NEW MIN)".format(desc, permutation, int(distance)))
# else:
# print("{} PERMUTATION {}, DISTANCE {}".format(desc, permutation, distance))
# log.info("{} PERMUTATION {}, DISTANCE {}".format(desc, permutation, distance))
# Assign the color name to the Square object
for (index, squares_list) in enumerate(squares_lists):
color_name = min_distance_permutation[index]
for square in squares_list:
square.color_name = color_name
def get_squares_by_color_name(self):
white_squares = []
yellow_squares = []
orange_squares = []
red_squares = []
green_squares = []
blue_squares = []
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for square in side.center_squares + side.corner_squares + side.edge_squares:
if square.color_name == "Wh":
white_squares.append(square)
elif square.color_name == "Ye":
yellow_squares.append(square)
elif square.color_name == "OR":
orange_squares.append(square)
elif square.color_name == "Rd":
red_squares.append(square)
elif square.color_name == "Gr":
green_squares.append(square)
elif square.color_name == "Bu":
blue_squares.append(square)
return (
white_squares,
yellow_squares,
orange_squares,
red_squares,
green_squares,
blue_squares,
)
# @timed_function
def resolve_color_box(self):
"""
Temporarily assign names to all squares, use crayola colors as reference point.
We use these name assignments to build our "color_box" which will be our
references Wh, Ye, OR, Rd, Gr, Bu colors for assigning color names to edge
and center squares.
"""
if self.width == 3 or is_micropython():
# If we are solving a 3x3x3 or we are using micropython then we are most likely on an
# underpowered platform like a LEGO EV3. Save a lot of CPU cycles by only using the
# corner squares to create the color box.
corner_squares = []
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for square in side.corner_squares:
corner_squares.append(square)
sorted_corner_squares = traveling_salesman(corner_squares, "corner")
self.assign_color_names(
"corner squares for color_box",
sorted_corner_squares,
"even_cube_center_color_permutations",
crayola_colors,
)
if self.write_debug_file:
self.write_colors("corners for color_box", sorted_corner_squares)
else:
# use all squares to create the color box...this takes much more CPU
all_squares = []
middle_squares = []
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for square in (
side.center_squares + side.corner_squares + side.edge_squares
):
all_squares.append(square)
if side.mid_pos:
middle_squares.append(side.squares[side.mid_pos])
edge_pairs = []
if self.width == 2:
from rubikscolorresolver.cube_222 import corner_tuples
elif self.width == 3:
from rubikscolorresolver.cube_333 import corner_tuples
elif self.width == 4:
from rubikscolorresolver.cube_444 import corner_tuples
elif self.width == 5:
from rubikscolorresolver.cube_555 import corner_tuples
elif self.width == 6:
from rubikscolorresolver.cube_666 import corner_tuples
elif self.width == 7:
from rubikscolorresolver.cube_777 import corner_tuples
corners = []
for corner_tuple in corner_tuples:
corners.append(
(
self.pos2square[corner_tuple[0]],
self.pos2square[corner_tuple[1]],
self.pos2square[corner_tuple[2]],
)
)
# ======
# pass 1
# ======
sorted_all_squares = traveling_salesman(
all_squares, "all", middle_squares, edge_pairs, corners
)
self.assign_color_names(
"squares for color_box (pass 1)",
sorted_all_squares,
"even_cube_center_color_permutations",
crayola_colors,
)
if self.write_debug_file:
self.write_colors("squares for color_box (pass 1)", sorted_all_squares)
# ======
# pass 2
# ======
(
white_squares,
yellow_squares,
orange_squares,
red_squares,
green_squares,
blue_squares,
) = self.get_squares_by_color_name()
green_blue_endpoints = None
white_yellow_endpoints = None
red_orange_endpoints = None
# odd cube
if self.width % 2 == 1:
white_center = None
yellow_center = None
orange_center = None
red_center = None
green_center = None
blue_center = None
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
square = self.pos2square[side.mid_pos]
if square.color_name == "Wh":
white_center = square
elif square.color_name == "Ye":
yellow_center = square
elif square.color_name == "OR":
orange_center = square
elif square.color_name == "Rd":
red_center = square
elif square.color_name == "Gr":
green_center = square
elif square.color_name == "Bu":
blue_center = square
if white_center and yellow_center:
white_yellow_endpoints = (white_center, yellow_center)
if green_center and blue_center:
green_blue_endpoints = (green_center, blue_center)
if red_center and orange_center:
red_orange_endpoints = (red_center, orange_center)
# Nuke all color names (they were temporary)
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for square in (
side.center_squares + side.corner_squares + side.edge_squares
):
square.color_name = None
sorted_green_blue = traveling_salesman_two_colors(
green_squares + blue_squares,
endpoints=green_blue_endpoints,
desc="green blue",
)
sorted_white_yellow = traveling_salesman_two_colors(
white_squares + yellow_squares,
endpoints=white_yellow_endpoints,
desc="white yellow",
)
sorted_red_orange = traveling_salesman_two_colors(
red_squares + orange_squares,
endpoints=red_orange_endpoints,
desc="white yellow",
)
sorted_all_squares = (
sorted_green_blue + sorted_white_yellow + sorted_red_orange
)
self.assign_color_names(
"squares for color_box (pass 2)",
sorted_all_squares,
"even_cube_center_color_permutations",
crayola_colors,
)
if self.write_debug_file:
self.write_colors("squares for color_box (pass 2)", sorted_all_squares)
(
white_squares,
yellow_squares,
orange_squares,
red_squares,
green_squares,
blue_squares,
) = self.get_squares_by_color_name()
self.color_box = {}
self.color_box["Wh"] = square_list_to_lab(white_squares)
self.color_box["Ye"] = square_list_to_lab(yellow_squares)
self.color_box["OR"] = square_list_to_lab(orange_squares)
self.color_box["Rd"] = square_list_to_lab(red_squares)
self.color_box["Gr"] = square_list_to_lab(green_squares)
self.color_box["Bu"] = square_list_to_lab(blue_squares)
self.orange_baseline = self.color_box["OR"]
self.red_baseline = self.color_box["Rd"]
# Nuke all color names (they were temporary)
for side in (
self.sideU,
self.sideR,
self.sideF,
self.sideD,
self.sideL,
self.sideB,
):
for square in side.center_squares + side.corner_squares + side.edge_squares:
square.color_name = None
if self.write_debug_file:
self.write_color_box()
# @timed_function
def resolve_corner_squares(self):
"""
Assign names to the corner squares
"""
white = Square(
None,
"Wh",
self.color_box["Wh"].red,
self.color_box["Wh"].green,
self.color_box["Wh"].blue,
)
yellow = Square(
None,
"Ye",
self.color_box["Ye"].red,
self.color_box["Ye"].green,
self.color_box["Ye"].blue,
)
orange = Square(
None,
"OR",
self.color_box["OR"].red,
self.color_box["OR"].green,
self.color_box["OR"].blue,
)
red = Square(
None,
"Rd",
self.color_box["Rd"].red,
self.color_box["Rd"].green,
self.color_box["Rd"].blue,
)
green = Square(
None,
"Gr",
self.color_box["Gr"].red,
self.color_box["Gr"].green,
self.color_box["Gr"].blue,
)
blue = Square(
None,
"Bu",
self.color_box["Bu"].red,
self.color_box["Bu"].green,
self.color_box["Bu"].blue,
)
white.color_name = "Wh"
yellow.color_name = "Ye"
orange.color_name = "OR"
red.color_name = "Rd"
green.color_name = "Gr"
blue.color_name = "Bu"
target_corners = [
(white, green, orange),
(white, red, green),
(white, orange, blue),
(white, blue, red),
(yellow, orange, green),
(yellow, green, red),
(yellow, blue, orange),
(yellow, red, blue),
]
if self.width == 2:
from rubikscolorresolver.cube_222 import corner_tuples
elif self.width == 3:
from rubikscolorresolver.cube_333 import corner_tuples
elif self.width == 4:
from rubikscolorresolver.cube_444 import corner_tuples
elif self.width == 5:
from rubikscolorresolver.cube_555 import corner_tuples
elif self.width == 6:
from rubikscolorresolver.cube_666 import corner_tuples
elif self.width == 7:
from rubikscolorresolver.cube_777 import corner_tuples
corners = []
for corner_tuple in corner_tuples:
corners.append(
[
self.pos2square[corner_tuple[0]],
self.pos2square[corner_tuple[1]],
self.pos2square[corner_tuple[2]],
]
)
sorted_corners = traveling_salesman_corners(target_corners + corners, "corners")
# assign color names
for x in range(0, len(sorted_corners), 2):
corner1 = sorted_corners[x]
corner2 = sorted_corners[x + 1]
corner2[0].color_name = corner1[0].position
corner2[1].color_name = corner1[1].position
corner2[2].color_name = corner1[2].position
if self.write_debug_file:
self.write_color_corners("corners", sorted_corners)
# @timed_function
def resolve_edge_squares(self):
"""
Use traveling salesman algorithm to sort the colors
"""
# Nothing to be done for 2x2x2
if self.width == 2:
return
elif self.width == 3:
from rubikscolorresolver.cube_333 import edge_orbit_id
elif self.width == 4:
from rubikscolorresolver.cube_444 import edge_orbit_id
elif self.width == 5:
from rubikscolorresolver.cube_555 import edge_orbit_id
elif self.width == 6:
from rubikscolorresolver.cube_666 import edge_orbit_id
elif self.width == 7:
from rubikscolorresolver.cube_777 import edge_orbit_id
white = Square(
None,
"Wh",
self.color_box["Wh"].red,
self.color_box["Wh"].green,
self.color_box["Wh"].blue,
)
yellow = Square(
None,
"Ye",
self.color_box["Ye"].red,
self.color_box["Ye"].green,
self.color_box["Ye"].blue,
)
orange = Square(
None,
"OR",
self.color_box["OR"].red,
self.color_box["OR"].green,
self.color_box["OR"].blue,
)
red = Square(
None,
"Rd",
self.color_box["Rd"].red,
self.color_box["Rd"].green,
self.color_box["Rd"].blue,
)
green = Square(
None,
"Gr",
self.color_box["Gr"].red,
self.color_box["Gr"].green,
self.color_box["Gr"].blue,
)
blue = Square(
None,
"Bu",
self.color_box["Bu"].red,
self.color_box["Bu"].green,
self.color_box["Bu"].blue,
)
white.color_name = "Wh"
yellow.color_name = "Ye"
orange.color_name = "OR"
red.color_name = "Rd"
green.color_name = "Gr"
blue.color_name = "Bu"
for target_orbit_id in range(self.orbits):
edge_pairs = []
for side in (self.sideU, self.sideD, self.sideL, self.sideR):
for square in side.edge_squares:
orbit_id = edge_orbit_id[square.position]
if orbit_id == target_orbit_id:
partner_index = side.get_wing_partner(square.position)
partner = self.pos2square[partner_index]
edge_pair = (square, partner)
if (
edge_pair not in edge_pairs
and (edge_pair[1], edge_pair[0]) not in edge_pairs
):
edge_pairs.append(edge_pair)
if len(edge_pairs) == 12:
target_edge_pairs = [
(white, orange),
(white, red),
(white, green),
(white, blue),
(green, orange),
(green, red),
(blue, orange),
(blue, red),
(yellow, orange),
(yellow, red),
(yellow, green),
(yellow, blue),
]
elif len(edge_pairs) == 24:
target_edge_pairs = [
(white, orange),
(white, orange),
(white, red),
(white, red),
(white, green),
(white, green),
(white, blue),
(white, blue),
(green, orange),
(green, orange),
(green, red),
(green, red),
(blue, orange),
(blue, orange),
(blue, red),
(blue, red),
(yellow, orange),
(yellow, orange),
(yellow, red),
(yellow, red),
(yellow, green),
(yellow, green),
(yellow, blue),
(yellow, blue),
]
else:
raise ValueError("found {} edge pairs".format(len(edge_pairs)))
sorted_edge_pairs = traveling_salesman_edge_pairs(
target_edge_pairs + edge_pairs, "edge pairs"
)
# assign color names
for x in range(0, len(sorted_edge_pairs), 2):
pair1 = sorted_edge_pairs[x]
pair2 = sorted_edge_pairs[x + 1]
pair2[0].color_name = pair1[0].position
pair2[1].color_name = pair1[1].position
if self.write_debug_file:
self.write_color_edge_pairs(
"edges - orbit %d" % target_orbit_id, sorted_edge_pairs
)
# @timed_function
def resolve_center_squares(self):
"""
Use traveling salesman algorithm to sort the squares by color
"""
if self.width == 2:
return
elif self.width == 3:
from rubikscolorresolver.cube_333 import center_groups
elif self.width == 4:
from rubikscolorresolver.cube_444 import center_groups
elif self.width == 5:
from rubikscolorresolver.cube_555 import center_groups
elif self.width == 6:
from rubikscolorresolver.cube_666 import center_groups
elif self.width == 7:
from rubikscolorresolver.cube_777 import center_groups
for (desc, centers_squares) in center_groups:
# log.debug("\n\n\n\n")
# log.info("Resolve {}".format(desc))
center_squares = []
for position in centers_squares:
square = self.pos2square[position]
center_squares.append(square)
if desc == "centers":
sorted_center_squares = center_squares[:]
permutations = "odd_cube_center_color_permutations"
else:
sorted_center_squares = traveling_salesman(center_squares, desc)
permutations = "even_cube_center_color_permutations"
self.assign_color_names(
desc, sorted_center_squares, permutations, self.color_box
)
if self.write_debug_file:
self.write_colors(desc, sorted_center_squares)
# @timed_function
def crunch_colors(self):
if self.write_debug_file:
html_init_cube = self.html_cube(
"Initial RGB values", False, "initial_rgb_values"
)
self.write_html(html_init_cube)
self.write_crayola_colors()
gc.collect()
self.resolve_color_box()
# corners
gc.collect()
self.resolve_corner_squares()
# centers
gc.collect()
self.resolve_center_squares()
# edges
gc.collect()
self.resolve_edge_squares()
gc.collect()
self.set_state()
gc.collect()
self.sanity_check_edge_squares()
gc.collect()
self.validate_all_corners_found()
gc.collect()
self.validate_odd_cube_midge_vs_corner_parity()
gc.collect()
if self.write_debug_file:
html_final_cube = self.html_cube("Final Cube", True, "final_cube")
html = "<div id='bottom'>{}{}</div>".format(html_init_cube, html_final_cube)
self.write_html(html)
self.www_footer()
def print_profile_data(self):
# print_profile_data()
pass
# @timed_function
def resolve_colors(argv):
help_string = """usage: rubiks-color-resolver.py [-h] [-j] [--filename FILENAME] [--rgb RGB]
optional arguments:
-h, --help show this help message and exit
-j, --json Print json results
--filename FILENAME Print json results
--rgb RGB RGB json
"""
filename = None
rgb = None
use_json = False
argv_index = 1
while argv_index < len(argv):
if argv[argv_index] == "--help":
print(help_string)
sys.exit(0)
elif argv[argv_index] == "--filename":
filename = argv[argv_index + 1]
argv_index += 2
elif argv[argv_index] == "--rgb":
rgb = argv[argv_index + 1]
argv_index += 2
elif argv[argv_index] == "--json" or argv[argv_index] == "-j":
use_json = True
argv_index += 1
else:
print(help_string)
sys.exit(1)
if filename:
with open(filename, "r") as fh:
rgb = "".join(fh.readlines())
elif rgb:
pass
else:
print("ERROR: Neither --filename or --rgb was specified")
sys.exit(1)
argv = None
scan_data = eval(rgb)
for key, value in scan_data.items():
scan_data[key] = tuple(value)
square_count = len(list(scan_data.keys()))
square_count_per_side = int(square_count / 6)
width = int(sqrt(square_count_per_side))
cube = RubiksColorSolverGeneric(width)
cube.write_debug_file = True
cube.enter_scan_data(scan_data)
cube.crunch_colors()
cube.print_profile_data()
cube.print_cube()
if use_json:
from json import dumps as json_dumps
result = json_dumps(cube.cube_for_json(), indent=4, sort_keys=True)
else:
result = "".join(cube.cube_for_kociemba_strict())
print(result)
return result
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Intel Corp.
#
"""
Test the node_power plugin implementation.
"""
import unittest
import time
import os
import json
from ....os_remote_access.mock.os_remote_access import OsRemoteAccessMock
from ....utilities.utilities import Utilities, SubprocessOutput
from ....plugin.manager import PluginManager
from ....utilities.remote_access_data import RemoteAccessData
from ....bmc.mock.bmc import BmcMock
from ..node_power import NodePower
class MockUtilities(Utilities):
"""Mock class fake low level system call helpers."""
def __init__(self):
super(MockUtilities, self).__init__()
self.stack = []
self.ping_stack = []
def execute_no_capture(self, command):
"""Execute a command list suppressing output and returning the return
code."""
if len(self.stack) == 0:
return 0
else:
result = self.stack[0]
self.stack = self.stack[1:]
return result
def execute_with_capture(self, command):
"""Execute a command list capturing output and returning the return
code, stdout, stderr"""
if len(self.stack) == 0:
return ''
else:
result = self.stack[0]
self.stack = self.stack[1:]
return result
def ping_check(self, address):
"""Check if a network address has a OS responding to pings."""
if len(self.ping_stack) == 0:
return True
else:
value = self.ping_stack[0]
self.ping_stack = self.ping_stack[1:]
return value
class MockSwitch(object):
"""Mock class for switches (PDUs)"""
def __init__(self):
self.state = True
def get_switch_state(self, access, outlet):
"""Return the mocked state."""
return self.state
class MockOsAccess(object):
"""Mock up OS access plugin for certain tests."""
def __init__(self):
self.stack = []
self.dfx_test_stack = []
def execute(self, cmd, remote_access_data, capture=False, other=None):
"""Mocked call to execute."""
if len(self.stack) == 0:
return SubprocessOutput(0, None, None)
else:
result = self.stack[0]
self.stack = self.stack[1:]
if result:
return SubprocessOutput(0, None, None)
else:
return SubprocessOutput(255, None, None)
def test_connection(self, remote_access_data):
if len(self.dfx_test_stack) == 0:
return True
else:
rv = self.dfx_test_stack[0]
self.dfx_test_stack = self.dfx_test_stack[1:]
return rv
class MockBmcAccess(BmcMock):
"""Mock the bmc mock class."""
def __init__(self):
super(MockBmcAccess, self).__init__()
self.state_stack = []
self.set_failure = False
def get_chassis_state(self, remote_access):
"""Get the fake chassis state."""
if len(self.state_stack) == 0:
return True
else:
result = self.state_stack[0]
self.state_stack = self.state_stack[1:]
return result
class MockNodePower(NodePower):
"""Mocking parts of NodePower."""
def __init__(self, **options):
super(MockNodePower, self).__init__(**options)
self.shutdown_succeed = None
self.graceful_fail = False
self.bmc_access = MockBmcAccess()
self.bmc_credentials = RemoteAccessData('127.0.0.2', 0, 'admin', None)
self.os_credentials = RemoteAccessData('127.0.0.1', 22, 'admin', None)
def wait_for_chassis_state(self, state, timeout):
"""Test access to _wait_for_chassis_state()"""
return self._wait_for_chassis_state(state, timeout)
def graceful_os_halt(self):
"""Mock the graceful shutdown to fail."""
return self.shutdown_succeed
def _graceful_os_halt(self):
if self.graceful_fail:
self.graceful_fail = False
return False
else:
return NodePower._graceful_os_halt(self)
def bmc_access_state_stack(self, state_list):
self.bmc_access.state_stack = state_list
def os_access_set_dfx_test_stack(self, state_list):
with open('/tmp/mock_os_test_results', 'w+') as fd:
json.dump(state_list, fd)
class TestNodePower(unittest.TestCase):
"""Test the NodePower class."""
def setUp(self):
self._real_sleep = time.sleep
time.sleep = self._my_sleep
self.__utilities = MockUtilities()
self.manager = PluginManager()
self.manager.register_plugin_class(NodePower)
self.manager.register_plugin_class(BmcMock)
self.manager.register_plugin_class(OsRemoteAccessMock)
self.os_access = RemoteAccessData('127.0.0.1', 22, 'admin', None)
self.bmc_access = RemoteAccessData('127.0.0.2', 0, 'admin', None)
self.bmc_plugin = self.manager.create_instance('bmc', 'mock')
self.bmc_plugin.set_chassis_state(self.bmc_access, 'off')
self.os_plugin = MockOsAccess()
self.switch_access1 = RemoteAccessData('127.0.0.3', 22, 'admin', None)
self.switch_access2 = RemoteAccessData('127.0.0.3', 22, 'admin', None)
self.switch_plugin1 = MockSwitch()
self.switch_plugin2 = MockSwitch()
self.__options = {
'plugin_manager': self.manager,
'device_list': [{
'device_id': 'test_node',
'hostname': 'test_node',
'device_type': 'node',
'access_type': 'mock',
'bmc': 'test_bmc',
'pdu_list': [
(self.switch_access1, self.switch_plugin1, '3'),
(self.switch_access2, self.switch_plugin2, '1')
],
"ip_address": "127.0.0.1",
"port": 21,
'os_shutdown_timeout_seconds': .2,
'os_boot_timeout_seconds': .2,
'os_network_to_halt_time': .2,
'bmc_boot_timeout_seconds': .2,
'bmc_chassis_off_wait': .1
},
{
'device_id': 'test_node_1',
'hostname': 'test_node_1',
'device_type': 'node',
'access_type': 'mock',
'bmc': 'test_bmc_1',
'pdu_list': [
(self.switch_access1, self.switch_plugin1, '3'),
(self.switch_access2, self.switch_plugin2, '1')
],
"ip_address": "127.0.0.1",
"port": 21,
'os_shutdown_timeout_seconds': .2,
'os_boot_timeout_seconds': .2,
'os_network_to_halt_time': .2,
'bmc_boot_timeout_seconds': .2,
'bmc_chassis_off_wait': .1
}
],
'bmc_list': [{
'device_name': 'test_node',
'hostname': 'test_bmc',
'device_type': 'bmc',
'access_type': 'mock',
"ip_address": "127.0.0.2",
"port": 21
}],
}
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.utilities = self.__utilities
def tearDown(self):
time.sleep = self._real_sleep
file_path = os.path.join(os.path.sep, 'tmp', 'mock_os_test_results')
if os.path.exists(file_path):
os.unlink(file_path)
def _my_sleep(self, seconds):
self._real_sleep(float(seconds) / 100.0)
def test_ctor(self):
self.assertIsNotNone(self.controller)
def test_no_options(self):
with self.assertRaises(RuntimeError):
self.controller = self.manager.create_instance('power_control', 'node_power')
def test_get_current_device_power_state(self):
self.__utilities.returned_value = True
result = self.controller.get_current_device_power_state()
self.assertEqual('Off', result['test_node'])
def test_set_device_power_state(self):
result = self.controller.set_device_power_state('On:bios')
self.assertTrue(result)
self.os_plugin.dfx_test_stack = [True, False]
result = self.controller.set_device_power_state('Off')
self.assertTrue(result)
self.os_plugin.dfx_test_stack = [False, True]
result = self.controller.set_device_power_state('On:bmc_on')
self.assertTrue(result)
self.os_plugin.dfx_test_stack = [False, True]
result = self.controller.set_device_power_state('On')
self.assertTrue(result)
def test__parse_options(self):
self.__options['device_list'][0]['device_type'] = 'network_switch'
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
result=self.controller.get_current_device_power_state()
self.assertEqual('NodePower controller used on a non-node type device!',
result[self.__options['device_list'][0]['hostname']])
self.__options['device_list'][0]['device_type'] = 'node'
del self.__options['device_list'][0]['os_shutdown_timeout_seconds']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['os_boot_timeout_seconds']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['os_network_to_halt_time']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['bmc_boot_timeout_seconds']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['bmc_chassis_off_wait']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['pdu_list']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['device_list'][0]['device_id']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
del self.__options['plugin_manager']
self.controller = self.manager.create_instance('power_control', 'node_power', **self.__options)
self.controller.get_current_device_power_state()
self.controller.set_device_power_state('On')
def test_set_with_off(self):
result = self.controller.set_device_power_state('Off')
self.assertTrue(result['test_node'])
def test_wait_for_chassis_state(self):
power = MockNodePower(**self.__options)
power.bmc_access_state_stack([False, True])
result = power.wait_for_chassis_state(True, 3)
self.assertTrue(result)
def test_target_on_to_state(self):
mock_os = MockOsAccess()
self.__options['os'] = (self.os_access, mock_os)
power = NodePower(**self.__options)
power.utilities = self.__utilities
self.assertTrue(power.set_device_power_state('On:on'))
mock_os.dfx_test_stack = [True, False]
self.assertTrue(power.set_device_power_state('On:efi'))
def test_target_on_to_state_2(self):
mock_os = MockOsAccess()
self.__options['os'] = (self.os_access, mock_os)
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
mock_os.dfx_test_stack = [False, True]
self.assertTrue(power.set_device_power_state('On'))
mock_os.dfx_test_stack = [True, False]
self.assertTrue(power.set_device_power_state('On:efi'))
def test_target_off(self):
mock_os = MockOsAccess()
self.__options['os'] = (self.os_access, mock_os)
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
self.assertTrue(power.set_device_power_state('On:efi'))
mock_os.dfx_test_stack = [False]
result = power.get_current_device_power_state()
self.assertEqual('On', result['test_node'])
mock_os.dfx_test_stack = [False]
self.assertTrue(power.set_device_power_state('Off'))
def test_target_off_force(self):
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
self.assertTrue(power.set_device_power_state('On:bmc_on'))
power.os_access_set_dfx_test_stack([True])
result = power.get_current_device_power_state()
self.assertEqual('On:bmc_on', result['test_node'])
power.os_access_set_dfx_test_stack([True])
self.assertTrue(power.set_device_power_state('Off', True))
def test_target_on_force(self):
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
self.assertTrue(power.set_device_power_state('On:bmc_on'))
power.os_access_set_dfx_test_stack([True])
result = power.get_current_device_power_state()
self.assertEqual('On:bmc_on', result['test_node'])
power.os_access_set_dfx_test_stack([False, False])
self.graceful_fail = True
power.os_access_set_dfx_test_stack([True, False])
self.assertFalse(power.set_device_power_state('On:bmc_on', True)['test_node'])
def test__do_bmc_power_state(self):
mock_os = MockOsAccess()
mock_bmc = MockBmcAccess()
self.__options['os'] = (self.os_access, mock_os)
self.__options['bmc'] = (self.bmc_access, mock_bmc)
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
mock_bmc.state_stack = [False, False]
mock_bmc.set_failure = True
self.assertFalse(power.set_device_power_state('On:bmc_on')['test_node'])
def test_target_on_shutdown_failed(self):
power = MockNodePower(**self.__options)
power.utilities = self.__utilities
self.assertTrue(power.set_device_power_state('On:bmc_on'))
power.os_access_set_dfx_test_stack([True])
result = power.get_current_device_power_state()
self.assertEqual('On:bmc_on', result['test_node'])
self.graceful_fail = True
power.os_access_set_dfx_test_stack([True])
self.assertFalse(power.set_device_power_state('On:bmc_on')['test_node'])
|
|
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# COMMAND ----------
# MAGIC %md
# MAGIC # U-Net model for image segmentation
# MAGIC
# MAGIC This is a modified version of Tensorflows tutorial regarding image segmentation which can be found [here](https://www.tensorflow.org/tutorials/images/segmentation). Using a modified [U-Net](https://arxiv.org/abs/1505.04597) approach, with a [VGG16](https://arxiv.org/abs/1409.1556) as the encoder and then using traditional Conv2DTranspose layers for upsampling the dimensions. After 1 epoch a validation accuracy of 84.5 % was achieved on the [Oxford Pets Data Set](https://www.robots.ox.ac.uk/~vgg/data/pets/).
# COMMAND ----------
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from IPython.display import clear_output
dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
@tf.function
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test.batch(BATCH_SIZE)
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
for image, mask in train.take(1):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask])
# COMMAND ----------
# MAGIC %md
# MAGIC Now that the dataset has been loaded into memory, the model can further be defined.
# COMMAND ----------
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.applications import VGG16
def encoder_VGG16(input_shape):
base_model=VGG16(include_top=False, weights='imagenet', input_shape=input_shape)
layers=[layer.output for layer in base_model.layers]
base_model = tf.keras.Model(inputs=base_model.input, outputs=layers[-2])
base_model.summary()
x = []
fourth_layer = base_model.get_layer('block1_conv1').output
x.append(fourth_layer)
third_layer = base_model.get_layer('block2_conv2').output
x.append(third_layer)
secondary_layer = base_model.get_layer('block3_conv3').output
x.append(secondary_layer)
last_layer = base_model.get_layer('block4_conv3').output
x.append(last_layer)
output_layer = base_model.get_layer('block5_conv3').output
x.append(output_layer)
return base_model, x
# COMMAND ----------
# MAGIC %md
# MAGIC Here, the decoder part is defined where upsampling takes place to convert the encoded part to the same dimensions as the input image for height and width, with the same amount of channels as there are classes.
# COMMAND ----------
def unet(image_width: int,
image_heigth: int,
n_channels: int,
n_depth: int,
n_classes: int):
#if n_depth<1 or n_depth>5: #+ add more cases
# raise Exception("Unsupported number of layers/upsamples")
input_shape = [image_heigth, image_width, n_channels]
encoded_model, x = encoder_VGG16(input_shape)
encoded_model.trainable=False
intermediate_model = x[n_depth-1]
intermediate_model = tf.keras.layers.Dropout(0.5)(intermediate_model)
for i in reversed(range(0,n_depth-1)):
next_filters = x[i+1].shape[3]/2
intermediate_model = Conv2DTranspose(filters=next_filters ,kernel_size=3,strides=2,padding='same')(intermediate_model)
intermediate_model = tf.keras.layers.Concatenate()([intermediate_model,x[i]])
intermediate_model = tf.keras.layers.BatchNormalization()(intermediate_model)
intermediate_model = tf.keras.layers.ReLU()(intermediate_model)
intermediate_model = Conv2D(filters=next_filters, kernel_size=3, activation ='relu', padding='same')(intermediate_model)
intermediate_model = Conv2D(filters=next_filters, kernel_size=3, activation ='relu', padding='same')(intermediate_model)
outputs=Conv2D(filters=n_classes,kernel_size=(1,1),strides=(1),padding='same')(intermediate_model)
x = Reshape((image_heigth*image_width, n_classes))(outputs)
x = Activation(tf.nn.softmax)(x)
outputs = Reshape((image_heigth,image_width, n_classes))(x)
print(outputs.shape[2])
final_model=tf.keras.models.Model(inputs=encoded_model.input ,outputs=[outputs])
return(final_model)
shape=[128, 128, 3]
this_model = unet(shape[0],shape[1],shape[2],5,3)
this_model.summary()
this_model.outputs
# COMMAND ----------
# MAGIC %md
# MAGIC The model is then compiled.
# COMMAND ----------
this_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
def show_predictions(dataset=None, num=1):
if dataset:
for image, mask in dataset.take(num):
pred_mask = this_model.predict(image)
display([image[0], mask[0], create_mask(pred_mask)])
else:
display([sample_image, sample_mask,
create_mask(this_model.predict(sample_image[tf.newaxis, ...]))])
show_predictions()
# COMMAND ----------
# MAGIC %md
# MAGIC Below, the model is fitted against the training data and validated on the validation set after each epoch. A validation accuracy of 84.5 % is achieved after one epoch.
# COMMAND ----------
TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
EPOCHS = 20
VAL_SUBSPLITS = 5
VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS
model_history = this_model.fit(train_dataset, epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_steps=VALIDATION_STEPS,
validation_data=test_dataset)
#scores = model_history.evaluate(X_test, y_test, verbose=2)
# COMMAND ----------
show_predictions(test_dataset,num=10)
# COMMAND ----------
loss = model_history.history['loss']
val_loss = model_history.history['val_loss']
epochs = range(EPOCHS)
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'bo', label='Validation loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss Value')
plt.ylim([0, 1])
plt.legend()
plt.show()
# COMMAND ----------
|
|
import os
import re
import json
import base64
import logging
import datetime
import time
import copy
import decimal
import cgi
import numpy
import pymongo
from lib import config, util, util_litecoin
D = decimal.Decimal
def get_market_price(price_data, vol_data):
assert len(price_data) == len(vol_data)
assert len(price_data) <= config.MARKET_PRICE_DERIVE_NUM_POINTS
market_price = numpy.average(price_data, weights=vol_data)
return market_price
def get_market_price_summary(asset1, asset2, with_last_trades=0, start_dt=None, end_dt=None):
"""Gets a synthesized trading "market price" for a specified asset pair (if available), as well as additional info.
If no price is available, False is returned.
"""
mongo_db = config.mongo_db
if not end_dt:
end_dt = datetime.datetime.utcnow()
if not start_dt:
start_dt = end_dt - datetime.timedelta(days=10) #default to 10 days in the past
#look for the last max 6 trades within the past 10 day window
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not isinstance(with_last_trades, int) or with_last_trades < 0 or with_last_trades > 30:
raise Exception("Invalid with_last_trades")
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
last_trades = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
'block_time': { "$gte": start_dt, "$lte": end_dt }
},
{'_id': 0, 'block_index': 1, 'block_time': 1, 'unit_price': 1, 'base_quantity_normalized': 1, 'quote_quantity_normalized': 1}
).sort("block_time", pymongo.DESCENDING).limit(max(config.MARKET_PRICE_DERIVE_NUM_POINTS, with_last_trades))
if not last_trades.count():
return None #no suitable trade data to form a market price (return None, NOT False here)
last_trades = list(last_trades)
last_trades.reverse() #from newest to oldest
market_price = get_market_price(
[last_trades[i]['unit_price'] for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))],
[(last_trades[i]['base_quantity_normalized'] + last_trades[i]['quote_quantity_normalized']) for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))])
result = {
'market_price': float(D(market_price)),
'base_asset': base_asset,
'quote_asset': quote_asset,
}
if with_last_trades:
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
result['last_trades'] = [[
t['block_time'],
t['unit_price'],
t['base_quantity_normalized'],
t['quote_quantity_normalized'],
t['block_index']
] for t in last_trades]
else:
result['last_trades'] = []
return result
def calc_inverse(quantity):
return float( (D(1) / D(quantity) ))
def calc_price_change(open, close):
return float((D(100) * (D(close) - D(open)) / D(open)))
def get_price_primatives(start_dt=None, end_dt=None):
mps_xlt_ltc = get_market_price_summary(config.XLT, config.LTC, start_dt=start_dt, end_dt=end_dt)
xlt_ltc_price = mps_xlt_ltc['market_price'] if mps_xlt_ltc else None # == XLT/LTC
ltc_xlt_price = calc_inverse(mps_xlt_ltc['market_price']) if mps_xlt_ltc else None #LTC/XLT
return mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price
def get_asset_info(asset, at_dt=None):
mongo_db = config.mongo_db
asset_info = mongo_db.tracked_assets.find_one({'asset': asset})
if asset not in (config.XLT, config.LTC) and at_dt and asset_info['_at_block_time'] > at_dt:
#get the asset info at or before the given at_dt datetime
for e in reversed(asset_info['_history']): #newest to oldest
if e['_at_block_time'] <= at_dt:
asset_info = e
break
else: #asset was created AFTER at_dt
asset_info = None
if asset_info is None: return None
assert asset_info['_at_block_time'] <= at_dt
#modify some of the properties of the returned asset_info for LTC and XLT
if asset == config.LTC:
if at_dt:
start_block_index, end_block_index = util.get_block_indexes_for_dates(end_dt=at_dt)
asset_info['total_issued'] = util_litecoin.get_ltc_supply(normalize=False, at_block_index=end_block_index)
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
else:
asset_info['total_issued'] = util_litecoin.get_ltc_supply(normalize=False)
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
elif asset == config.XLT:
#BUG: this does not take end_dt (if specified) into account. however, the deviation won't be too big
# as XLT doesn't deflate quickly at all, and shouldn't matter that much since there weren't any/much trades
# before the end of the burn period (which is what is involved with how we use at_dt with currently)
asset_info['total_issued'] = util.call_jsonrpc_api("get_xlt_supply", abort_on_error=True)['result']
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
if not asset_info:
raise Exception("Invalid asset: %s" % asset)
return asset_info
def get_xlt_ltc_price_info(asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price, with_last_trades=0, start_dt=None, end_dt=None):
if asset not in [config.LTC, config.XLT]:
#get price data for both the asset with XLT, as well as LTC
price_summary_in_xlt = get_market_price_summary(asset, config.XLT,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
price_summary_in_ltc = get_market_price_summary(asset, config.LTC,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
#aggregated (averaged) price (expressed as XLT) for the asset on both the XLT and LTC markets
if price_summary_in_xlt: # no trade data
price_in_xlt = price_summary_in_xlt['market_price']
if xlt_ltc_price:
aggregated_price_in_xlt = float(((D(price_summary_in_xlt['market_price']) + D(xlt_ltc_price)) / D(2)))
else: aggregated_price_in_xlt = None
else:
price_in_xlt = None
aggregated_price_in_xlt = None
if price_summary_in_ltc: # no trade data
price_in_ltc = price_summary_in_ltc['market_price']
if ltc_xlt_price:
aggregated_price_in_ltc = float(((D(price_summary_in_ltc['market_price']) + D(ltc_xlt_price)) / D(2)))
else: aggregated_price_in_ltc = None
else:
aggregated_price_in_ltc = None
price_in_ltc = None
else:
#here we take the normal XLT/LTC pair, and invert it to LTC/XLT, to get XLT's data in terms of a LTC base
# (this is the only area we do this, as LTC/XLT is NOT standard pair ordering)
price_summary_in_xlt = mps_xlt_ltc #might be None
price_summary_in_ltc = copy.deepcopy(mps_xlt_ltc) if mps_xlt_ltc else None #must invert this -- might be None
if price_summary_in_ltc:
price_summary_in_ltc['market_price'] = calc_inverse(price_summary_in_ltc['market_price'])
price_summary_in_ltc['base_asset'] = config.LTC
price_summary_in_ltc['quote_asset'] = config.XLT
for i in xrange(len(price_summary_in_ltc['last_trades'])):
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
price_summary_in_ltc['last_trades'][i][1] = calc_inverse(price_summary_in_ltc['last_trades'][i][1])
price_summary_in_ltc['last_trades'][i][2], price_summary_in_ltc['last_trades'][i][3] = \
price_summary_in_ltc['last_trades'][i][3], price_summary_in_ltc['last_trades'][i][2] #swap
if asset == config.XLT:
price_in_xlt = 1.0
price_in_ltc = price_summary_in_ltc['market_price'] if price_summary_in_ltc else None
aggregated_price_in_xlt = 1.0
aggregated_price_in_ltc = ltc_xlt_price #might be None
else:
assert asset == config.LTC
price_in_xlt = price_summary_in_xlt['market_price'] if price_summary_in_xlt else None
price_in_ltc = 1.0
aggregated_price_in_xlt = xlt_ltc_price #might be None
aggregated_price_in_ltc = 1.0
return (price_summary_in_xlt, price_summary_in_ltc, price_in_xlt, price_in_ltc, aggregated_price_in_xlt, aggregated_price_in_ltc)
def calc_market_cap(asset_info, price_in_xlt, price_in_ltc):
market_cap_in_xlt = float( (D(asset_info['total_issued_normalized']) / D(price_in_xlt))) if price_in_xlt else None
market_cap_in_ltc = float( (D(asset_info['total_issued_normalized']) / D(price_in_ltc))) if price_in_ltc else None
return market_cap_in_xlt, market_cap_in_ltc
def compile_summary_market_info(asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price):
"""Returns information related to capitalization, volume, etc for the supplied asset(s)
NOTE: in_ltc == base asset is LTC, in_xlt == base asset is XLT
@param assets: A list of one or more assets
"""
asset_info = get_asset_info(asset)
(price_summary_in_xlt, price_summary_in_ltc, price_in_xlt, price_in_ltc, aggregated_price_in_xlt, aggregated_price_in_ltc
) = get_xlt_ltc_price_info(asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price, with_last_trades=30)
market_cap_in_xlt, market_cap_in_ltc = calc_market_cap(asset_info, price_in_xlt, price_in_ltc)
return {
'price_in_{}'.format(config.XLT.lower()): price_in_xlt, #current price of asset vs XLT (e.g. how many units of asset for 1 unit XLT)
'price_in_{}'.format(config.LTC.lower()): price_in_ltc, #current price of asset vs LTC (e.g. how many units of asset for 1 unit LTC)
'price_as_{}'.format(config.XLT.lower()): calc_inverse(price_in_xlt) if price_in_xlt else None, #current price of asset AS XLT
'price_as_{}'.format(config.LTC.lower()): calc_inverse(price_in_ltc) if price_in_ltc else None, #current price of asset AS LTC
'aggregated_price_in_{}'.format(config.XLT.lower()): aggregated_price_in_xlt,
'aggregated_price_in_{}'.format(config.LTC.lower()): aggregated_price_in_ltc,
'aggregated_price_as_{}'.format(config.XLT.lower()): calc_inverse(aggregated_price_in_xlt) if aggregated_price_in_xlt else None,
'aggregated_price_as_{}'.format(config.LTC.lower()): calc_inverse(aggregated_price_in_ltc) if aggregated_price_in_ltc else None,
'total_supply': asset_info['total_issued_normalized'],
'market_cap_in_{}'.format(config.XLT.lower()): market_cap_in_xlt,
'market_cap_in_{}'.format(config.LTC.lower()): market_cap_in_ltc,
}
def compile_24h_market_info(asset):
asset_data = {}
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
mongo_db = config.mongo_db
#perform aggregation to get 24h statistics
#TOTAL volume and count across all trades for the asset (on ALL markets, not just XLT and LTC pairings)
_24h_vols = {'vol': 0, 'count': 0}
_24h_vols_as_base = mongo_db.trades.aggregate([
{"$match": {
"base_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_base = {} if not _24h_vols_as_base['ok'] \
or not len(_24h_vols_as_base['result']) else _24h_vols_as_base['result'][0]
_24h_vols_as_quote = mongo_db.trades.aggregate([
{"$match": {
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"quote_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_quote = {} if not _24h_vols_as_quote['ok'] \
or not len(_24h_vols_as_quote['result']) else _24h_vols_as_quote['result'][0]
_24h_vols['vol'] = _24h_vols_as_base.get('vol', 0) + _24h_vols_as_quote.get('vol', 0)
_24h_vols['count'] = _24h_vols_as_base.get('count', 0) + _24h_vols_as_quote.get('count', 0)
#XLT market volume with stats
if asset != config.XLT:
_24h_ohlc_in_xlt = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XLT,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_xlt = {} if not _24h_ohlc_in_xlt['ok'] \
or not len(_24h_ohlc_in_xlt['result']) else _24h_ohlc_in_xlt['result'][0]
if _24h_ohlc_in_xlt: del _24h_ohlc_in_xlt['_id']
else:
_24h_ohlc_in_xlt = {}
#LTC market volume with stats
if asset != config.LTC:
_24h_ohlc_in_ltc = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.LTC,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_ltc = {} if not _24h_ohlc_in_ltc['ok'] \
or not len(_24h_ohlc_in_ltc['result']) else _24h_ohlc_in_ltc['result'][0]
if _24h_ohlc_in_ltc: del _24h_ohlc_in_ltc['_id']
else:
_24h_ohlc_in_ltc = {}
return {
'24h_summary': _24h_vols,
#^ total quantity traded of that asset in all markets in last 24h
'24h_ohlc_in_{}'.format(config.XLT.lower()): _24h_ohlc_in_xlt,
#^ quantity of asset traded with LTC in last 24h
'24h_ohlc_in_{}'.format(config.LTC.lower()): _24h_ohlc_in_ltc,
#^ quantity of asset traded with XLT in last 24h
'24h_vol_price_change_in_{}'.format(config.XLT.lower()): calc_price_change(_24h_ohlc_in_xlt['open'], _24h_ohlc_in_xlt['close'])
if _24h_ohlc_in_xlt else None,
#^ aggregated price change from 24h ago to now, expressed as a signed float (e.g. .54 is +54%, -1.12 is -112%)
'24h_vol_price_change_in_{}'.format(config.LTC.lower()): calc_price_change(_24h_ohlc_in_ltc['open'], _24h_ohlc_in_ltc['close'])
if _24h_ohlc_in_ltc else None,
}
def compile_7d_market_info(asset):
mongo_db = config.mongo_db
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
#get XLT and LTC market summarized trades over a 7d period (quantize to hour long slots)
_7d_history_in_xlt = None # xlt/asset market (or xlt/ltc for xlt or ltc)
_7d_history_in_ltc = None # ltc/asset market (or ltc/xlt for xlt or ltc)
if asset not in [config.LTC, config.XLT]:
for a in [config.XLT, config.LTC]:
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": a,
"quote_asset": asset,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
if a == config.XLT: _7d_history_in_xlt = _7d_history
else: _7d_history_in_ltc = _7d_history
else: #get the XLT/LTC market and invert for LTC/XLT (_7d_history_in_ltc)
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XLT,
"quote_asset": config.LTC,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
_7d_history_in_xlt = _7d_history
_7d_history_in_ltc = copy.deepcopy(_7d_history_in_xlt)
for i in xrange(len(_7d_history_in_ltc)):
_7d_history_in_ltc[i]['price'] = calc_inverse(_7d_history_in_ltc[i]['price'])
_7d_history_in_ltc[i]['vol'] = calc_inverse(_7d_history_in_ltc[i]['vol'])
for l in [_7d_history_in_xlt, _7d_history_in_ltc]:
for e in l: #convert our _id field out to be an epoch ts (in ms), and delete _id
e['when'] = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000
del e['_id']
return {
'7d_history_in_{}'.format(config.XLT.lower()): [[e['when'], e['price']] for e in _7d_history_in_xlt],
'7d_history_in_{}'.format(config.LTC.lower()): [[e['when'], e['price']] for e in _7d_history_in_ltc],
}
def compile_asset_pair_market_info():
"""Compiles the pair-level statistics that show on the View Prices page of litetokenswallet, for instance"""
#loop through all open orders, and compile a listing of pairs, with a count of open orders for each pair
mongo_db = config.mongo_db
end_dt = datetime.datetime.utcnow()
start_dt = end_dt - datetime.timedelta(days=1)
start_block_index, end_block_index = util.get_block_indexes_for_dates(start_dt=start_dt, end_dt=end_dt)
open_orders = util.call_jsonrpc_api("get_orders",
{ 'filters': [
{'field': 'give_remaining', 'op': '>', 'value': 0},
{'field': 'get_remaining', 'op': '>', 'value': 0},
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
],
'status': 'open',
'show_expired': False,
}, abort_on_error=True)['result']
pair_data = {}
asset_info = {}
def get_price(base_quantity_normalized, quote_quantity_normalized):
return float(D(quote_quantity_normalized / base_quantity_normalized ))
#COMPOSE order depth, lowest ask, and highest bid column data
for o in open_orders:
(base_asset, quote_asset) = util.assets_to_asset_pair(o['give_asset'], o['get_asset'])
pair = '%s/%s' % (base_asset, quote_asset)
base_asset_info = asset_info.get(base_asset, mongo_db.tracked_assets.find_one({ 'asset': base_asset }))
if base_asset not in asset_info: asset_info[base_asset] = base_asset_info
quote_asset_info = asset_info.get(quote_asset, mongo_db.tracked_assets.find_one({ 'asset': quote_asset }))
if quote_asset not in asset_info: asset_info[quote_asset] = quote_asset_info
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None,
'completed_trades_count': 0, 'vol_base': 0, 'vol_quote': 0})
#^ highest ask = open order selling base, highest bid = open order buying base
#^ we also initialize completed_trades_count, vol_base, vol_quote because every pair inited here may
# not have cooresponding data out of the trades_data_by_pair aggregation below
pair_data[pair]['open_orders_count'] += 1
base_quantity_normalized = util_litecoin.normalize_quantity(o['give_quantity'] if base_asset == o['give_asset'] else o['get_quantity'], base_asset_info['divisible'])
quote_quantity_normalized = util_litecoin.normalize_quantity(o['give_quantity'] if quote_asset == o['give_asset'] else o['get_quantity'], quote_asset_info['divisible'])
order_price = get_price(base_quantity_normalized, quote_quantity_normalized)
if base_asset == o['give_asset']: #selling base
if pair_data[pair]['lowest_ask'] is None or order_price < pair_data[pair]['lowest_ask']:
pair_data[pair]['lowest_ask'] = order_price
elif base_asset == o['get_asset']: #buying base
if pair_data[pair]['highest_bid'] is None or order_price > pair_data[pair]['highest_bid']:
pair_data[pair]['highest_bid'] = order_price
#COMPOSE volume data (in XLT and LTC), and % change data
#loop through all trade volume over the past 24h, and match that to the open orders
trades_data_by_pair = mongo_db.trades.aggregate([
{"$match": {
"block_time": {"$gte": start_dt, "$lte": end_dt } }
},
{"$project": {
"base_asset": 1,
"quote_asset": 1,
"base_quantity_normalized": 1, #to derive base volume
"quote_quantity_normalized": 1 #to derive quote volume
}},
{"$group": {
"_id": {"base_asset": "$base_asset", "quote_asset": "$quote_asset"},
"vol_base": {"$sum": "$base_quantity_normalized"},
"vol_quote": {"$sum": "$quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
trades_data_by_pair = [] if not trades_data_by_pair['ok'] else trades_data_by_pair['result']
for e in trades_data_by_pair:
pair = '%s/%s' % (e['_id']['base_asset'], e['_id']['quote_asset'])
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None})
#^ initialize an empty pair in the event there are no open orders for that pair, but there ARE completed trades for it
pair_data[pair]['completed_trades_count'] = e['count']
pair_data[pair]['vol_base'] = e['vol_base']
pair_data[pair]['vol_quote'] = e['vol_quote']
#compose price data, relative to LTC and XLT
mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price = get_price_primatives()
for pair, e in pair_data.iteritems():
base_asset, quote_asset = pair.split('/')
_24h_vol_in_ltc = None
_24h_vol_in_xlt = None
#derive asset price data, expressed in LTC and XLT, for the given volumes
if base_asset == config.XLT:
_24h_vol_in_xlt = e['vol_base']
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_base'] * xlt_ltc_price) if xlt_ltc_price else 0
elif base_asset == config.LTC:
_24h_vol_in_xlt = util_litecoin.round_out(e['vol_base'] * ltc_xlt_price) if ltc_xlt_price else 0
_24h_vol_in_ltc = e['vol_base']
else: #base is not XLT or LTC
price_summary_in_xlt, price_summary_in_ltc, price_in_xlt, price_in_ltc, aggregated_price_in_xlt, aggregated_price_in_ltc = \
get_xlt_ltc_price_info(base_asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if price_in_xlt:
_24h_vol_in_xlt = util_litecoin.round_out(e['vol_base'] * price_in_xlt)
if price_in_ltc:
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_base'] * price_in_ltc)
if _24h_vol_in_xlt is None or _24h_vol_in_ltc is None:
#the base asset didn't have price data against LTC or XLT, or both...try against the quote asset instead
price_summary_in_xlt, price_summary_in_ltc, price_in_xlt, price_in_ltc, aggregated_price_in_xlt, aggregated_price_in_ltc = \
get_xlt_ltc_price_info(quote_asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if _24h_vol_in_xlt is None and price_in_xlt:
_24h_vol_in_xlt = util_litecoin.round_out(e['vol_quote'] * price_in_xlt)
if _24h_vol_in_ltc is None and price_in_ltc:
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_quote'] * price_in_ltc)
pair_data[pair]['24h_vol_in_{}'.format(config.XLT.lower())] = _24h_vol_in_xlt #might still be None
pair_data[pair]['24h_vol_in_{}'.format(config.LTC.lower())] = _24h_vol_in_ltc #might still be None
#get % change stats -- start by getting the first trade directly before the 24h period starts
prev_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {'$lt': start_dt}}).sort('block_time', pymongo.DESCENDING).limit(1)
latest_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset}).sort('block_time', pymongo.DESCENDING).limit(1)
if not prev_trade.count(): #no previous trade before this 24hr period
pair_data[pair]['24h_pct_change'] = None
else:
prev_trade = prev_trade[0]
latest_trade = latest_trade[0]
prev_trade_price = get_price(prev_trade['base_quantity_normalized'], prev_trade['quote_quantity_normalized'])
latest_trade_price = get_price(latest_trade['base_quantity_normalized'], latest_trade['quote_quantity_normalized'])
pair_data[pair]['24h_pct_change'] = ((latest_trade_price - prev_trade_price) / prev_trade_price) * 100
pair_data[pair]['last_updated'] = end_dt
#print "PRODUCED", pair, pair_data[pair]
mongo_db.asset_pair_market_info.update( {'base_asset': base_asset, 'quote_asset': quote_asset}, {"$set": pair_data[pair]}, upsert=True)
#remove any old pairs that were not just updated
mongo_db.asset_pair_market_info.remove({'last_updated': {'$lt': end_dt}})
logging.info("Recomposed 24h trade statistics for %i asset pairs: %s" % (len(pair_data), ', '.join(pair_data.keys())))
def compile_asset_market_info():
"""Run through all assets and compose and store market ranking information."""
mongo_db = config.mongo_db
if not config.CAUGHT_UP:
logging.warn("Not updating asset market info as CAUGHT_UP is false.")
return False
#grab the last block # we processed assets data off of
last_block_assets_compiled = mongo_db.app_config.find_one()['last_block_assets_compiled']
last_block_time_assets_compiled = util.get_block_time(last_block_assets_compiled)
#logging.debug("Comping info for assets traded since block %i" % last_block_assets_compiled)
current_block_index = config.CURRENT_BLOCK_INDEX #store now as it may change as we are compiling asset data :)
current_block_time = util.get_block_time(current_block_index)
if current_block_index == last_block_assets_compiled:
#all caught up -- call again in 10 minutes
return True
mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price = get_price_primatives()
all_traded_assets = list(set(list([config.LTC, config.XLT]) + list(mongo_db.trades.find({}, {'quote_asset': 1, '_id': 0}).distinct('quote_asset'))))
#######################
#get a list of all assets with a trade within the last 24h (not necessarily just against XLT and LTC)
# ^ this is important because compiled market info has a 24h vol parameter that designates total volume for the asset across ALL pairings
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('base_asset'))
))
for asset in assets:
market_info_24h = compile_24h_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_24h})
#for all others (i.e. no trade in the last 24 hours), zero out the 24h trade data
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'24h_summary': {'vol': 0, 'count': 0},
'24h_ohlc_in_{}'.format(config.XLT.lower()): {},
'24h_ohlc_in_{}'.format(config.LTC.lower()): {},
'24h_vol_price_change_in_{}'.format(config.XLT.lower()): None,
'24h_vol_price_change_in_{}'.format(config.LTC.lower()): None,
}}, multi=True)
logging.info("Block: %s -- Calculated 24h stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#get a list of all assets with a trade within the last 7d up against XLT and LTC
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}, 'base_asset': {'$in': [config.XLT, config.LTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}}).distinct('base_asset'))
))
for asset in assets:
market_info_7d = compile_7d_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_7d})
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'7d_history_in_{}'.format(config.XLT.lower()): [],
'7d_history_in_{}'.format(config.LTC.lower()): [],
}}, multi=True)
logging.info("Block: %s -- Calculated 7d stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#update summary market data for assets traded since last_block_assets_compiled
#get assets that were traded since the last check with either LTC or XLT, and update their market summary data
assets = list(set(
list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}, 'base_asset': {'$in': [config.XLT, config.LTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).distinct('base_asset'))
))
#update our storage of the latest market info in mongo
for asset in assets:
logging.info("Block: %s -- Updating asset market info for %s ..." % (current_block_index, asset))
summary_info = compile_summary_market_info(asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price)
mongo_db.asset_market_info.update( {'asset': asset}, {"$set": summary_info}, upsert=True)
#######################
#next, compile market cap historicals (and get the market price data that we can use to update assets with new trades)
#NOTE: this algoritm still needs to be fleshed out some...I'm not convinced it's laid out/optimized like it should be
#start by getting all trades from when we last compiled this data
trades = mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).sort('block_index', pymongo.ASCENDING)
trades_by_block = [] #tracks assets compiled per block, as we only want to analyze any given asset once per block
trades_by_block_mapping = {}
#organize trades by block
for t in trades:
if t['block_index'] in trades_by_block_mapping:
assert trades_by_block_mapping[t['block_index']]['block_index'] == t['block_index']
assert trades_by_block_mapping[t['block_index']]['block_time'] == t['block_time']
trades_by_block_mapping[t['block_index']]['trades'].append(t)
else:
e = {'block_index': t['block_index'], 'block_time': t['block_time'], 'trades': [t,]}
trades_by_block.append(e)
trades_by_block_mapping[t['block_index']] = e
for t_block in trades_by_block:
#reverse the tradelist per block, and ensure that we only process an asset that hasn't already been processed for this block
# (as there could be multiple trades in a single block for any specific asset). we reverse the list because
# we'd rather process a later trade for a given asset, as the market price for that will take into account
# the earlier trades on that same block for that asset, and we don't want/need multiple cap points per block
assets_in_block = {}
mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price = get_price_primatives(end_dt=t_block['block_time'])
for t in reversed(t_block['trades']):
assets = []
if t['base_asset'] not in assets_in_block:
assets.append(t['base_asset'])
assets_in_block[t['base_asset']] = True
if t['quote_asset'] not in assets_in_block:
assets.append(t['quote_asset'])
assets_in_block[t['quote_asset']] = True
if not len(assets): continue
for asset in assets:
#recalculate the market cap for the asset this trade is for
asset_info = get_asset_info(asset, at_dt=t['block_time'])
(price_summary_in_xlt, price_summary_in_ltc, price_in_xlt, price_in_ltc, aggregated_price_in_xlt, aggregated_price_in_ltc
) = get_xlt_ltc_price_info(asset, mps_xlt_ltc, xlt_ltc_price, ltc_xlt_price, with_last_trades=0, end_dt=t['block_time'])
market_cap_in_xlt, market_cap_in_ltc = calc_market_cap(asset_info, price_in_xlt, price_in_ltc)
#^ this will get price data from the block time of this trade back the standard number of days and trades
# to determine our standard market price, relative (anchored) to the time of this trade
for market_cap_as in (config.XLT, config.LTC):
market_cap = market_cap_in_xlt if market_cap_as == config.XLT else market_cap_in_ltc
#if there is a previously stored market cap for this asset, add a new history point only if the two caps differ
prev_market_cap_history = mongo_db.asset_marketcap_history.find({'market_cap_as': market_cap_as, 'asset': asset,
'block_index': {'$lt': t['block_index']}}).sort('block_index', pymongo.DESCENDING).limit(1)
prev_market_cap_history = list(prev_market_cap_history)[0] if prev_market_cap_history.count() == 1 else None
if market_cap and (not prev_market_cap_history or prev_market_cap_history['market_cap'] != market_cap):
mongo_db.asset_marketcap_history.insert({
'block_index': t['block_index'],
'block_time': t['block_time'],
'asset': asset,
'market_cap': market_cap,
'market_cap_as': market_cap_as,
})
logging.info("Block %i -- Calculated market cap history point for %s as %s (mID: %s)" % (t['block_index'], asset, market_cap_as, t['message_index']))
mongo_db.app_config.update({}, {'$set': {'last_block_assets_compiled': current_block_index}})
return True
|
|
#!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a full scan against a target URL using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# By default it will spider the target URL with no time limit, but you can change
# that via the -m parameter.
# It will then perform an active scan of all of the URLs found by the spider.
# This may take a significant amount of time.
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
# By default all of the active scan rules run but you can prevent rules from
# running by supplying a configuration file with the rules set to IGNORE.
import getopt
import json
import logging
import os
import os.path
import sys
import time
import urllib2
from datetime import datetime
from zapv2 import ZAPv2
from zap_common import *
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
levels = ["PASS", "IGNORE", "INFO", "WARN", "FAIL"]
min_level = 0
# Scan rules that aren't really relevant, eg the examples rules in the alpha set
blacklist = ['-1', '50003', '60000', '60001']
# Scan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Hide "Starting new HTTP connection" messages
logging.getLogger("requests").setLevel(logging.WARNING)
def usage():
print('Usage: zap-full-scan.py -t <target> [options]')
print(' -t target target URL including the protocol, eg https://www.example.com')
print('Options:')
print(' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print(' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print(' -g gen_file generate default config file(all rules set to WARN)')
print(' -m mins the number of minutes to spider for (default 1)')
print(' -r report_html file to write the full ZAP HTML report')
print(' -w report_md file to write the full ZAP Wiki(Markdown) report')
print(' -x report_xml file to write the full ZAP XML report')
print(' -J report_json file to write the full ZAP JSON document')
print(' -a include the alpha passive scan rules as well')
print(' -d show debug messages')
print(' -P specify listen port')
print(' -D delay in seconds to wait for passive scanning ')
print(' -i default rules not in the config file to INFO')
print(' -j use the Ajax spider in addition to the traditional one')
print(' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print(' -n context_file context file which will be loaded prior to scanning the target')
print(' -p progress_file progress file which specifies issues that are being addressed')
print(' -s short output format - dont show PASSes or example URLs')
print(' -T max time in minutes to wait for ZAP to start and the passive scan to run')
print(' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print('')
print('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-Full-Scan')
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
mins = 0
generate = ''
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
report_json = ''
target = ''
zap_alpha = False
info_unspecified = False
ajax = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
timeout = 0
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
check_zap_client_version()
try:
opts, args = getopt.getopt(argv, "t:c:u:g:m:n:r:J:w:x:l:daijp:sz:P:D:T:")
except getopt.GetoptError as exc:
logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-t':
target = arg
logging.debug('Target: ' + target)
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-m':
mins = int(arg)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
elif opt == '-n':
context_file = arg
elif opt == '-p':
progress_file = arg
elif opt == '-r':
report_html = arg
elif opt == '-J':
report_json = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-j':
ajax = True
elif opt == '-l':
try:
min_level = levels.index(arg)
except ValueError:
logging.warning('Level must be one of ' + str(levels))
usage()
sys.exit(3)
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
elif opt == '-T':
timeout = int(arg)
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if not (target.startswith('http://') or target.startswith('https://')):
logging.warning('Target must start with \'http://\' or \'https://\'')
usage()
sys.exit(3)
if running_in_docker():
base_dir = '/zap/wrk/'
if config_file or generate or report_html or report_xml or report_json or progress_file or context_file:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
port = get_free_port()
logging.debug('Using port: ' + str(port))
if config_file:
# load config file from filestore
with open(base_dir + config_file) as f:
load_config(f, config_dict, config_msg, out_of_scope_dict)
elif config_url:
# load config file from url
try:
load_config(urllib2.urlopen(config_url), config_dict, config_msg, out_of_scope_dict)
except:
logging.warning('Failed to read configs from ' + config_url)
sys.exit(3)
if progress_file:
# load progress file from filestore
with open(base_dir + progress_file) as f:
progress = json.load(f)
# parse into something more useful...
# in_prog_issues = map of vulnid -> {object with everything in}
for issue in progress["issues"]:
if issue["state"] == "inprogress":
in_progress_issues[issue["id"]] = issue
if running_in_docker():
try:
params = [
'-config', 'spider.maxDuration=' + str(mins),
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if zap_alpha:
params.append('-addoninstall')
params.append('pscanrulesAlpha')
if zap_options:
for zap_opt in zap_options.split(" "):
params.append(zap_opt)
start_zap(port, params)
except OSError:
logging.warning('Failed to start ZAP :(')
sys.exit(3)
else:
# Not running in docker, so start one
mount_dir = ''
if context_file:
mount_dir = os.path.dirname(os.path.abspath(context_file))
params = [
'-config', 'spider.maxDuration=' + str(mins),
'-addonupdate',
'-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container
if (zap_alpha):
params.extend(['-addoninstall', 'pscanrulesAlpha'])
if zap_options:
for zap_opt in zap_options.split(" "):
params.append(zap_opt)
try:
cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
zap_ip = ipaddress_for_cid(cid)
logging.debug('Docker ZAP IP Addr: ' + zap_ip)
except OSError:
logging.warning('Failed to start ZAP in docker :(')
sys.exit(3)
try:
zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})
wait_for_zap_start(zap, timeout * 60)
if context_file:
# handle the context file, cant use base_dir as it might not have been set up
res = zap.context.import_context('/zap/wrk/' + os.path.basename(context_file))
if res.startswith("ZAP Error"):
logging.error('Failed to load context file ' + context_file + ' : ' + res)
zap_access_target(zap, target)
if target.count('/') > 2:
# The url can include a valid path, but always reset to spider the host
target = target[0:target.index('/', 8)+1]
time.sleep(2)
# Spider target
zap_spider(zap, target)
if (ajax):
zap_ajax_spider(zap, target, mins)
if (delay):
start_scan = datetime.now()
while ((datetime.now() - start_scan).seconds < delay):
time.sleep(5)
logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds')
if target.count('/') > 2:
# The url can include a valid path, but always reset to scan the host
target = target[0:target.index('/', 8)+1]
# Set up the scan policy
scan_policy = 'Default Policy'
if config_dict:
# They have supplied a config file, use this to define the ascan rules
zap.ascan.enable_all_scanners(scanpolicyname=scan_policy)
for scanner, state in config_dict.items():
if state == 'IGNORE':
# Dont bother checking the result - this will fail for pscan rules
zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy)
zap_active_scan(zap, target, scan_policy)
zap_wait_for_passive_scan(zap, timeout * 60)
# Print out a count of the number of urls
num_urls = len(zap.core.urls)
if num_urls == 0:
logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
else:
if detailed_output:
print('Total of ' + str(num_urls) + ' URLs')
alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict)
all_ascan_rules = zap.ascan.scanners('Default Policy')
all_pscan_rules = zap.pscan.scanners
all_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality')
if generate:
# Create the config file
with open(base_dir + generate, 'w') as f:
f.write('# zap-full-scan rule configuration file\n')
f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n')
f.write('# Only the rule identifiers are used - the names are just for info\n')
f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
for key, rule in sorted(all_dict.iteritems()):
f.write(key + '\tWARN\t(' + rule + ')\n')
# print out the passing rules
pass_dict = {}
for rule in all_pscan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if (not alert_dict.has_key(plugin_id)):
pass_dict[plugin_id] = rule.get('name')
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if not alert_dict.has_key(plugin_id) and not(config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE'):
pass_dict[plugin_id] = rule.get('name')
if min_level == levels.index("PASS") and detailed_output:
for key, rule in sorted(pass_dict.iteritems()):
print('PASS: ' + rule + ' [' + key + ']')
pass_count = len(pass_dict)
if detailed_output:
# print out the ignored ascan rules(there will be no alerts for these as they were not run)
for rule in all_ascan_rules:
plugin_id = rule.get('id')
if plugin_id in blacklist:
continue
if config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE':
print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']')
# print out the ignored rules
ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level, levels,
inc_ignore_rules, True, detailed_output, {})
# print out the info rules
info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level, levels,
inc_info_rules, info_unspecified, detailed_output, in_progress_issues)
# print out the warning rules
warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level, levels,
inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)
# print out the failing rules
fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level, levels,
inc_fail_rules, True, detailed_output, in_progress_issues)
if report_html:
# Save the report
write_report(base_dir + report_html, zap.core.htmlreport())
if report_json:
# Save the report
write_report(base_dir + report_json, zap._request_other(zap.base_other + 'core/other/jsonreport/'))
if report_md:
# Save the report
write_report(base_dir + report_md, zap.core.mdreport())
if report_xml:
# Save the report
write_report(base_dir + report_xml, zap.core.xmlreport())
print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
'\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
'\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count))
# Stop ZAP
zap.core.shutdown()
except IOError as e:
if hasattr(e, 'args') and len(e.args) > 1:
errno, strerror = e
print("ERROR " + str(strerror))
logging.warning('I/O error(' + str(errno) + '): ' + str(strerror))
else:
print("ERROR %s" % e)
logging.warning('I/O error: ' + str(e))
dump_log_file(cid)
except:
print("ERROR " + str(sys.exc_info()[0]))
logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
dump_log_file(cid)
if not running_in_docker():
stop_docker(cid)
if fail_count > 0:
sys.exit(1)
elif warn_count > 0:
sys.exit(2)
elif pass_count > 0:
sys.exit(0)
else:
sys.exit(3)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import unittest
from mock import Mock, MagicMock, patch, call
from test_treeSetUp import TreeSetUp, node1FEMmembers, node1XFEMmembers, node2FEMmembers, node3FEMmembers, node3XFEMmembers, node5XFEMmembers, node10FEMmembers, node10XFEMmembers
from trees import TreeNode, createTreeFromDbKeys, nodesPerLevel, tracePath, createTreeOfKeys, maxNodesPerLevel, nodeNamesPerLevel
class TestTreeNode_constructor_parent_child_members_methods(unittest.TestCase):
def test_constructor(self):
tn = TreeNode('nodeName')
self.assertEqual('nodeName', tn.name)
self.assertIsNone(tn.parent)
self.assertEqual(0, len(tn.children))
self.assertTrue(isinstance(tn.children, (set, list, tuple)))
self.assertEqual(set([]), tn.failedMembers)
self.assertEqual(set([]), tn.successfulMembers)
def test_setParent_with_None(self):
tn = TreeNode('nodeName')
tn.setParent(None)
self.assertIsNone(tn.parent)
def test_setParent_with_TreeNode_instance(self):
tn = TreeNode('nodeName')
pn = TreeNode('parentNodeName')
tn.setParent(pn)
self.assertIs(pn, tn.parent)
def test_setParent_with_Str_set_tuple(self):
tn = TreeNode('nodeName')
self.assertRaises(TypeError, tn.setParent, 'pnode')
self.assertRaises(TypeError, tn.setParent, [1, 2])
self.assertRaises(TypeError, tn.setParent, (1, 2))
self.assertRaises(TypeError, tn.setParent, set([1, 2]))
def test_setChild_with_invalid_argument(self):
tn = TreeNode('nodeName')
sortMock = MagicMock()
with patch('trees.TreeNode.sortChildren') as sortMock:
self.assertRaises(TypeError, tn.setChild, 'child')
self.assertRaises(TypeError, tn.setChild, ('child',))
self.assertRaises(TypeError, tn.setChild, ['child'])
self.assertRaises(TypeError, tn.setChild, set(['child']))
self.assertFalse(sortMock.called)
def test_setChild_with_TreeNode_instances(self):
tn = TreeNode('nodeName')
cn1 = TreeNode('childNode1')
cn2 = TreeNode('childNode2')
sortMock = MagicMock()
with patch('trees.TreeNode.sortChildren') as sortMock:
tn.setChild(cn1)
self.assertEqual([cn1], tn.children)
sortMock.assert_called_once_with()
tn.setChild(cn2)
self.assertEqual(set([cn1, cn2]), set(tn.children))
self.assertEqual([call(), call()], sortMock.mock_calls)
def test_sortChildren_with_children_names_string_numbers(self):
tn = TreeNode('nodeName')
ch1Mock = MagicMock()
ch2Mock = MagicMock()
ch3Mock = MagicMock()
ch1Mock.getName.return_value = '10'
ch2Mock.getName.return_value = '9'
ch3Mock.getName.return_value = '9.5'
tn.children = [ch1Mock, ch2Mock, ch3Mock]
tn.sortChildren()
self.assertEqual([ch2Mock, ch3Mock, ch1Mock], tn.children)
def test_sortChildren_with_strings(self):
tn = TreeNode('nodeName')
ch1Mock = MagicMock()
ch2Mock = MagicMock()
ch3Mock = MagicMock()
ch1Mock.getName.return_value = 'a'
ch2Mock.getName.return_value = 'b'
ch3Mock.getName.return_value = 'c'
tn.children = [ch2Mock, ch3Mock, ch1Mock]
tn.sortChildren()
self.assertEqual([ch1Mock, ch2Mock, ch3Mock], tn.children)
def test_addMembers_with_str_member(self):
tn = TreeNode('nodeName')
tn.addMembers('simid1', 'successful')
self.assertEqual(set(['simid1']), tn.successfulMembers)
tn.addMembers('simid2', 'successful')
tn.addMembers('simid3', 'failed')
self.assertEqual(set(['simid3']), tn.failedMembers)
tn.addMembers('simid4', 'failed')
self.assertEqual(set(['simid3', 'simid4']), tn.failedMembers)
self.assertEqual(set(['simid1', 'simid2']), tn.successfulMembers)
def test_addMembers_with_sequence(self):
tn = TreeNode('nodeName')
tn.addMembers(['simid1', 'simid2'], 'successful')
self.assertEqual(set(['simid1', 'simid2']), tn.successfulMembers)
self.assertEqual(set([]), tn.failedMembers)
tn.addMembers(['simid3', 'simid4'], 'failed')
self.assertEqual(set(['simid3', 'simid4']), tn.failedMembers)
self.assertEqual(set(['simid1', 'simid2']), tn.successfulMembers)
def test_addMembers_with_None(self):
tn = TreeNode('nodeName')
tn.addMembers(None, 'successful')
tn.addMembers(None, 'failed')
self.assertEqual(set([]), tn.successfulMembers)
self.assertEqual(set([]), tn.failedMembers)
def test_addMembers_with_invalid_arguments(self):
tn = TreeNode('nodeName')
self.assertRaises(AssertionError, tn.addMembers, None, 'unknownType')
self.assertRaises(AssertionError, tn.addMembers, 1, 'successful')
def test_addFailedMember(self):
tn = TreeNode('nodeName')
tn.addFailedMember(None)
self.assertEqual(set([]), tn.failedMembers)
tn.addFailedMember('simid1')
self.assertEqual(set(['simid1']), tn.failedMembers)
tn.addFailedMember('simid2')
self.assertEqual(set(['simid1', 'simid2']), tn.failedMembers)
def test_addSuccessfulMember(self):
tn = TreeNode('nodeName')
tn.addSuccessfulMember(None)
self.assertEqual(set([]), tn.successfulMembers)
tn.addSuccessfulMember('simid1')
self.assertEqual(set(['simid1']), tn.successfulMembers)
tn.addSuccessfulMember('simid2')
self.assertEqual(set(['simid1', 'simid2']), tn.successfulMembers)
def test_addMember_with_successful_simid(self):
sMock1 = MagicMock()
sMock1.getEntryKey.return_value = 'simid1'
sMock1.getAnalysisSuccess.return_value = True
sMock2 = MagicMock()
sMock2.getEntryKey.return_value = 'simid2'
sMock2.getAnalysisSuccess.return_value = True
tn = TreeNode('nodeName')
tn.addMember(sMock1)
self.assertEqual(set(['simid1']), tn.successfulMembers)
tn.addMember(sMock2)
self.assertEqual(set(['simid1', 'simid2']), tn.successfulMembers)
self.assertEqual(set([]), tn.failedMembers)
def test_addMember_with_failed_simid(self):
fMock1 = MagicMock()
fMock1.getEntryKey.return_value = 'simid1'
fMock1.getAnalysisSuccess.return_value = False
fMock2 = MagicMock()
fMock2.getEntryKey.return_value = 'simid2'
fMock2.getAnalysisSuccess.return_value = False
tn = TreeNode('nodeName')
tn.addMember(fMock1)
self.assertEqual(set(['simid1']), tn.failedMembers)
self.assertEqual(set([]), tn.successfulMembers)
tn.addMember(fMock2)
self.assertEqual(set(['simid1', 'simid2']), tn.failedMembers)
self.assertEqual(set([]), tn.successfulMembers)
def test__eq__(self):
tn0 = TreeNode(0)
tn1 = TreeNode(1)
tn01 = TreeNode(0)
self.assertFalse(tn0.__eq__(tn1))
self.assertTrue(tn0.__eq__(tn01))
self.assertRaises(AssertionError, tn0.__eq__, 0)
class TestTreeNode_tree_browsing(TreeSetUp):
def test_getRootNode_with_nonroot_node_argument(self):
self.assertIs(self.root, self.node1FEMs.getRootNode())
self.assertIs(self.root, self.node1XFEMmpLR.getRootNode())
self.assertIs(self.root, self.node2FEMsQF.getRootNode())
self.assertIs(self.root, self.node3FEMeQR.getRootNode())
self.assertIs(self.root, self.node5XFEMcp.getRootNode())
self.assertIs(self.root, self.node10.getRootNode())
self.assertIs(self.root, self.node10FEMeLF.getRootNode())
def test_getRootNode_with_root_node_argument(self):
self.assertIs(self.root, self.root.getRootNode())
def test_getNodeLevelInTree(self):
self.assertEqual(0, self.root.getNodeLevelInTree())
self.assertEqual(1, self.node1.getNodeLevelInTree())
self.assertEqual(1, self.node10.getNodeLevelInTree())
self.assertEqual(1, self.node3.getNodeLevelInTree())
self.assertEqual(2, self.node1FEM.getNodeLevelInTree())
self.assertEqual(2, self.node1XFEM.getNodeLevelInTree())
self.assertEqual(2, self.node2FEM.getNodeLevelInTree())
self.assertEqual(3, self.node1FEMe.getNodeLevelInTree())
self.assertEqual(3, self.node1FEMs.getNodeLevelInTree())
self.assertEqual(3, self.node1XFEMcp.getNodeLevelInTree())
self.assertEqual(3, self.node2FEMs.getNodeLevelInTree())
self.assertEqual(4, self.node1FEMsQR.getNodeLevelInTree())
self.assertEqual(4, self.node1FEMeQF.getNodeLevelInTree())
self.assertEqual(4, self.node10FEMeLF.getNodeLevelInTree())
self.assertEqual(4, self.node2FEMsQF.getNodeLevelInTree())
def test_hasChildNode_with_existing_child_nodes(self):
self.assertTrue(self.root.hasChildNode('1.0'))
self.assertTrue(self.root.hasChildNode('10.0'))
self.assertTrue(self.root.hasChildNode('5.0'))
self.assertTrue(self.node1.hasChildNode('FEM'))
self.assertTrue(self.node2FEMs.hasChildNode('QF'))
def test_hasChildNode_with_non_existing_child_nodes(self):
self.assertFalse(self.node10XFEMcp.hasChildNode(''))
self.assertFalse(self.node5XFEM.hasChildNode('mp'))
self.assertFalse(self.node2FEMs.hasChildNode('QR'))
self.assertFalse(self.root.hasChildNode('sampleNode'))
self.assertFalse(self.node1FEM.hasChildNode('FEM'))
self.assertFalse(self.node1FEMsQR.hasChildNode('FEM'))
self.assertFalse(self.node1XFEMmpLR.hasChildNode('mp'))
self.assertFalse(self.node2FEMsQF.hasChildNode('2.0'))
self.assertFalse(self.node10XFEMcpLR.hasChildNode('LF'))
def test_getTreeBranch_with_ambiguous_path(self):
self.assertRaises(
KeyError, self.root.getTreeBranch, [
'XFEM', 'cp', 'LT'])
self.assertRaises(KeyError, self.root.getTreeBranch, ['QR'])
self.assertRaises(KeyError, self.root.getTreeBranch,
['FEM', 'elliptic', 'QF'])
self.assertRaises(KeyError, self.root.getTreeBranch,
['FEM', 'elliptic', 'LF'])
def test_getTreeBranch_with_nonexisting_path(self):
self.assertRaises(KeyError, self.root.getTreeBranch,
['1.0', 'FEM', 'scale', 'QF'])
self.assertRaises(
KeyError, self.node2.getTreeBranch, [
'11', 'FEM', 'cp'])
def test_getTreeBranch_with_existing_and_unique_path(self):
self.assertIs(self.node1FEMs,
self.root.getTreeBranch(['1.0', 'FEM', 'scale']))
self.assertIs(self.node1FEMsQR,
self.root.getTreeBranch(['1.0', 'FEM', 'scale', 'QR']))
self.assertIs(self.node1XFEMcpLT,
self.node1.getTreeBranch(['1.0', 'XFEM', 'cp', 'LT']))
self.assertIs(self.node5XFEMcpLT,
self.root.getTreeBranch(['5.0', 'XFEM', 'cp', 'LT']))
self.assertIs(self.node1XFEMmpLR,
self.node1XFEM.getTreeBranch(['mp', 'LR']))
def test_countNumberOfTreeLevels(self):
self.assertEqual(4, self.root.countNumberOfTreeLevels())
tn0 = TreeNode('nodeLevel0')
self.assertEqual(0, tn0.countNumberOfTreeLevels())
tn1a = TreeNode('nodeLevel1a')
tn1b = TreeNode('nodeLevel1b')
tn0.setChild(tn1a)
tn0.setChild(tn1b)
tn1a.setParent(tn0)
tn1b.setParent(tn0)
self.assertEqual(1, tn0.countNumberOfTreeLevels())
tn2a = TreeNode('nodeLevel2a')
tn2a.setParent(tn1a)
tn1a.setChild(tn2a)
self.assertEqual(2, tn1b.countNumberOfTreeLevels())
tn3a = TreeNode('nodeLevel3a')
tn3a.setParent(tn2a)
tn2a.setChild(tn3a)
self.assertEqual(3, tn1b.countNumberOfTreeLevels())
tn2b = TreeNode('nodeLevel2b')
tn1b.setChild(tn2b)
tn2b.setParent(tn1b)
self.assertEqual(3, tn2b.countNumberOfTreeLevels())
def test_getNodeLevel(self):
self.assertEqual(0, self.root.getNodeLevel(self.root))
self.assertEqual(1, self.node1.getNodeLevel(self.node1))
self.assertEqual(1, self.root.getNodeLevel(self.node10))
self.assertEqual(2, self.node1FEMs.getNodeLevel(self.node10XFEM))
self.assertEqual(3, self.node5.getNodeLevel(self.node10XFEMcp))
self.assertEqual(3, self.root.getNodeLevel(self.node3XFEMmp))
self.assertEqual(4, self.root.getNodeLevel(self.node1FEMsQR))
def test_getChildLeafNodes(self):
self.assertEqual(set([]), set(
self.node1FEMsQR.getChildLeafNodes(self.node10XFEMcpLR)))
self.assertEqual(set([self.node1FEMsQR, self.node1FEMsLR]),
set(self.root.getChildLeafNodes(self.node1FEMs)))
self.assertEqual(set([self.node1FEMsQR,
self.node1FEMsLR,
self.node1FEMeQF,
self.node1FEMeLF]),
set(self.root.getChildLeafNodes(self.node1FEM)))
self.assertEqual(set([self.node2FEMsQF]),
set(self.root.getChildLeafNodes(self.node2)))
self.assertEqual(set([self.node3FEMeQR, self.node3XFEMmpLF]),
set(self.root.getChildLeafNodes(self.node3)))
self.assertEqual(set([
self.node1FEMsQR, self.node1FEMsLR,
self.node1FEMeQF, self.node1FEMeLF,
self.node1XFEMcpLT, self.node1XFEMmpLR,
self.node2FEMsQF,
self.node3FEMeQR, self.node3XFEMmpLF,
self.node5XFEMcpLT,
self.node10FEMeQF, self.node10FEMeLF,
self.node10XFEMcpLR]),
set(self.root.getChildLeafNodes(self.root)))
class TestTreeNode_member_assignment(TreeSetUp):
def setUp_assignMemberAsFailed(self):
tn0 = TreeNode('nodeLevel0')
tn1a = TreeNode('nodeLevel1a')
tn1b = TreeNode('nodeLevel1b')
tn0.setChild(tn1a)
tn0.setChild(tn1b)
tn1a.setParent(tn0)
tn1b.setParent(tn0)
tn1a.successfulMembers = set(['a1', 'a2', 'a3', 'a4'])
tn1b.successfulMembers = set(['b1', 'b2', 'b3', 'b4'])
return tn0, tn1a, tn1b
def test_assignMemberAsFailed_existing_member_called_on_root_node_with_print(
self):
prMock1 = MagicMock()
with patch('trees.TreeNode.printNode') as prMock1:
res = self.root.assignMemberAsFailed(
'node2FEMsQF_sm_1', printChanges=True, rowlen=21)
prMock1.assert_called_once_with(self.node2FEMsQF, 21)
self.assertEqual(1, res)
self.assertEqual(set(['node2FEMsQF_sm_2']),
self.node2FEMsQF.successfulMembers)
self.assertEqual(set(['node2FEMsQF_sm_1']),
self.node2FEMsQF.failedMembers)
node1FEMmembers(self)
node1XFEMmembers(self)
node3FEMmembers(self)
node3XFEMmembers(self)
node5XFEMmembers(self)
node10FEMmembers(self)
node10XFEMmembers(self)
def test_assignMemberAsFailed_existing_member_called_on_root_without_print(
self):
prMock1 = MagicMock()
with patch('trees.TreeNode.printNode') as prMock1:
res = self.root.assignMemberAsFailed(
'node3FEMeQR_sm_1', printChanges=False)
self.assertFalse(prMock1.called)
self.assertEqual(1, res)
self.assertEqual(set([]), set(self.node3FEMeQR.successfulMembers))
self.assertEqual(set([
'node3FEMeQR_sm_1', 'node3FEMeQR_fm_1', 'node3FEMeQR_fm_2']),
set(self.node3FEMeQR.failedMembers))
node1FEMmembers(self)
node1XFEMmembers(self)
node2FEMmembers(self)
node3XFEMmembers(self)
node5XFEMmembers(self)
node10FEMmembers(self)
node10XFEMmembers(self)
def test_assignMemberAsFailed_existing_member_called_on_nonroot_without_print(
self):
res = self.node1FEM.assignMemberAsFailed(
'node5XFEMcpLT_sm_2', printChanges=False)
self.assertEqual(1, res)
self.assertEqual(set(['node5XFEMcpLT_sm_1']),
set(self.node5XFEMcpLT.successfulMembers))
self.assertEqual(set(['node5XFEMcpLT_sm_2',
'node5XFEMcpLT_fm_1',
'node5XFEMcpLT_fm_2',
'node5XFEMcpLT_fm_3']),
self.node5XFEMcpLT.failedMembers)
node1FEMmembers(self)
node1XFEMmembers(self)
node2FEMmembers(self)
node3FEMmembers(self)
node3XFEMmembers(self)
node10FEMmembers(self)
node10XFEMmembers(self)
def test_assignMemberAsFailed_nonexisting_member_without_print(self):
res = self.root.assignMemberAsFailed('c1', printChanges=False)
self.assertEqual(0, res)
node1FEMmembers(self)
node1XFEMmembers(self)
node2FEMmembers(self)
node3FEMmembers(self)
node3XFEMmembers(self)
node5XFEMmembers(self)
node10FEMmembers(self)
node10XFEMmembers(self)
def test_assignMemberAsFailed_with_failed_member_without_print(self):
res = self.root.assignMemberAsFailed(
'node5XFEMcpLT_fm_1', printChanges=False)
self.assertEqual(0, res)
node1FEMmembers(self)
node1XFEMmembers(self)
node2FEMmembers(self)
node3FEMmembers(self)
node3XFEMmembers(self)
node5XFEMmembers(self)
node10FEMmembers(self)
node10XFEMmembers(self)
class TestCreateTreeFromDbKeys(unittest.TestCase):
def adMock_sf_1(self, param):
parDict = {
'crackRatio': '1.0', 'analysisType': 'FEM',
'modelType': 'elliptic', 'elements': 'LinearRI'}
return parDict[param]
def adMock_sf_2(self, param):
parDict = {
'crackRatio': '1.0', 'analysisType': 'XFEM',
'modelType': 'elliptic', 'elements': 'QuadraticFI'}
return parDict[param]
def mockFunc(self, key):
if key == 'key1':
self.adMock = MagicMock()
self.adMock.getParameter.side_effect = self.adMock_sf_1
self.adMock.getEntryKey.return_value = key
self.adMock.getAnalysisSuccess.return_value = True
elif key == 'key2':
self.adMock = MagicMock()
self.adMock.getParameter.side_effect = self.adMock_sf_2
self.adMock.getAnalysisSuccess.return_value = False
self.adMock.getEntryKey.return_value = key
return self.adMock
def setUp(self):
self.adMock = MagicMock(side_effect=self.mockFunc)
self.adPatch = patch(
'dataProcessing.AnalysisData', self.adMock)
self.adPatch.start()
def tearDown(self):
self.adPatch.stop()
def test_createTreeFromDbKeys(self):
root = createTreeFromDbKeys(['key1', 'key2'])
self.assertIsNone(root.getParent())
self.assertEqual(1, len(root.getChildren()))
cnode = root.getChildren()[0]
self.assertEqual('1.0', cnode.getName())
self.assertEqual(2, len(cnode.getChildren()))
self.assertEqual(set(['XFEM', 'FEM']), set(
[n.getName() for n in cnode.getChildren()]))
nodeFEM, nodeXFEM = cnode.getChildren()
if nodeXFEM.getName() == 'FEM':
nodeFEM, nodeXFEM = nodeXFEM, nodeFEM
nodeXmt = nodeXFEM.getChildren()[0]
nodeXel = nodeXmt.getChildren()[0]
self.assertEqual(1, len(nodeXFEM.getChildren()))
self.assertEqual(1, len(nodeXmt.getChildren()))
self.assertEqual(0, len(nodeXel.getChildren()))
self.assertEqual('XFEM', nodeXFEM.getName())
self.assertEqual('elliptic', nodeXmt.getName())
self.assertEqual('QuadraticFI', nodeXel.getName())
self.assertEqual(set(['key2']), nodeXel.failedMembers)
nodeFmt = nodeFEM.getChildren()[0]
nodeFel = nodeFmt.getChildren()[0]
self.assertEqual(1, len(nodeFEM.getChildren()))
self.assertEqual(1, len(nodeFmt.getChildren()))
self.assertEqual(0, len(nodeFel.getChildren()))
self.assertEqual('FEM', nodeFEM.getName())
self.assertEqual('elliptic', nodeFmt.getName())
self.assertEqual('LinearRI', nodeFel.getName())
self.assertEqual(set(['key1']), nodeFel.successfulMembers)
class TreeSetUp(unittest.TestCase):
def setUp(self):
self.root = TreeNode('root_0')
self.node_1a_0 = TreeNode('nodeLevel_1a_0')
self.node_1b_0 = TreeNode('nodeLevel_1b_0')
self.root.setChild(self.node_1a_0)
self.root.setChild(self.node_1b_0)
self.node_1a_0.setParent(self.root)
self.node_1b_0.setParent(self.root)
self.levels = {0: set([self.root]), 1: set(
[self.node_1a_0, self.node_1b_0])}
self.node_2_1a = TreeNode('nodeLevel_2_1a')
self.node_1a_0.setChild(self.node_2_1a)
self.node_2_1a.setParent(self.node_1a_0)
self.node_2a_1b = TreeNode('nodeLevel_2a_1b')
self.node_2a_1b.setParent(self.node_1b_0)
self.node_1b_0.setChild(self.node_2a_1b)
self.node_2b_1b = TreeNode('nodeLevel_2b_1b')
self.node_2b_1b.setParent(self.node_1b_0)
self.node_1b_0.setChild(self.node_2b_1b)
self.levels[2] = set(
[self.node_2_1a, self.node_2a_1b, self.node_2b_1b])
self.node_3_2 = TreeNode('nodeLevel_3_2')
self.node_3_2.setParent(self.node_2_1a)
self.node_2_1a.setChild(self.node_3_2)
self.node_3_2a = TreeNode('nodeLevel_3_2a')
self.node_3_2a.setParent(self.node_2a_1b)
self.node_2a_1b.setChild(self.node_3_2a)
self.levels[3] = set([self.node_3_2a, self.node_3_2])
class TestMaxNodesPerLevel(TreeSetUp):
def testMaxNodesPerLevel(self):
exp = {0: 1, 1: 2, 2: 2, 3: 1}
self.assertEqual(exp, maxNodesPerLevel(self.root))
class TestNodeNamesPerLevel(TreeSetUp):
def test_nodeNamesPerLevel(self):
exp = {
0: ['root_0'], 1: sorted(['nodeLevel_1a_0', 'nodeLevel_1b_0']),
2: sorted(['nodeLevel_2_1a', 'nodeLevel_2a_1b', 'nodeLevel_2b_1b']),
3: sorted(['nodeLevel_3_2', 'nodeLevel_3_2a'])}
self.assertEqual(exp, nodeNamesPerLevel(self.root))
class TestNodesPerLevel(TreeSetUp):
def test_nodesPerLevel(self):
self.assertEqual(self.levels, nodesPerLevel(self.root))
class TestTracePath(TreeSetUp):
def test_tracePath_with_limitLevel_None(self):
exp = [self.root, self.node_1a_0, self.node_2_1a, self.node_3_2]
self.assertEqual(exp, tracePath(self.node_3_2, limitLevel=None))
exp = [self.root, self.node_1b_0, self.node_2a_1b, self.node_3_2a]
self.assertEqual(exp, tracePath(self.node_3_2a, limitLevel=None))
exp = [self.root, self.node_1b_0, self.node_2b_1b]
self.assertEqual(exp, tracePath(self.node_2b_1b, limitLevel=None))
def test_tracePath_with_limitLevel_zero(self):
exp = [self.root, self.node_1a_0, self.node_2_1a, self.node_3_2]
self.assertEqual(exp, tracePath(self.node_3_2, limitLevel=0))
def test_tracePath_with_limitLevel_between_zero_and_max_tree_depth(self):
exp = [self.node_1a_0, self.node_2_1a, self.node_3_2]
self.assertEqual(exp, tracePath(self.node_3_2, limitLevel=1))
exp = [self.node_2a_1b, self.node_3_2a]
self.assertEqual(exp, tracePath(self.node_3_2a, limitLevel=2))
exp = [self.node_3_2a]
self.assertEqual(exp, tracePath(self.node_3_2a, limitLevel=3))
exp = []
self.assertEqual(exp, tracePath(self.node_3_2, limitLevel=4))
def test_tracePath_with_limitLevel_higher_than_tree_depth(self):
self.assertRaises(IndexError, tracePath, self.node_3_2, limitLevel=5)
class TestCreateTreeOfKeys(unittest.TestCase):
def setUp(self):
self.root = TreeNode('root')
node1 = TreeNode('1.0')
self.root.setChild(node1)
node1.setParent(self.root)
node1fem = TreeNode('FEM')
node1fem.setParent(node1)
node1.setChild(node1fem)
node1femellip = TreeNode('elliptic')
node1femellip.setParent(node1fem)
node1fem.setChild(node1femellip)
node1femellipL = TreeNode('LinearRI')
node1femellipL.setParent(node1femellip)
node1femellip.setChild(node1femellipL)
node1xfem = TreeNode('XFEM')
node1xfem.setParent(node1)
node1.setChild(node1xfem)
node1xfemCP = TreeNode('cp')
node1xfemCP.setParent(node1xfem)
node1xfem.setChild(node1xfemCP)
node1xfemCPLT = TreeNode('LinearTet')
node1xfemCPLT.setParent(node1xfemCP)
node1xfemCP.setChild(node1xfemCPLT)
node2 = TreeNode('2.0')
self.root.setChild(node2)
node2.setParent(self.root)
node2fem = TreeNode('FEM')
node2fem.setParent(node2)
node2.setChild(node2fem)
node2femscale = TreeNode('scale')
node2femscale.setParent(node2fem)
node2fem.setChild(node2femscale)
node2femscaleQR = TreeNode('QuadRI')
node2femscaleQR.setParent(node2femscale)
node2femscale.setChild(node2femscaleQR)
node2xfem = TreeNode('XFEM')
node2xfem.setParent(node2)
node2.setChild(node2xfem)
node2xfemmp = TreeNode('mp')
node2xfem.setChild(node2xfemmp)
node2xfemmp.setParent(node2xfem)
node2xfemmpLF = TreeNode('LinearFI')
node2xfemmp.setChild(node2xfemmpLF)
node2xfemmpLF.setParent(node2xfemmp)
node3 = TreeNode('3.0')
self.root.setChild(node3)
node3.setParent(self.root)
node3fem = TreeNode('FEM')
node3.setChild(node3fem)
node3fem.setParent(node3)
node3femellip = TreeNode('elliptic')
node3fem.setChild(node3femellip)
node3femellip.setParent(node3fem)
node3femellipQR = TreeNode('QuadRI')
node3femellipQR.setParent(node3femellip)
node3femellip.setChild(node3femellipQR)
node3xfem = TreeNode('XFEM')
node3xfem.setParent(node3)
node3.setChild(node3xfem)
node3xfemmp = TreeNode('mp')
node3xfem.setChild(node3xfemmp)
node3xfemmp.setParent(node3xfem)
node3xfemmpQF = TreeNode('QuadFI')
node3xfemmp.setChild(node3xfemmpQF)
node3xfemmpQF.setParent(node3xfemmp)
def test_createTreeOfKeys(self):
nr = createTreeOfKeys(self.root)
self.assertEqual(set(['FEM', 'XFEM']),
set([n.getName() for n in nr.getChildren()]))
nodeF, nodeX = nr.getChildren()
if nodeX.getName() == 'FEM':
nodeX, nodeF = nodeF, nodeX
self.assertEqual('FEM', nodeF.getName())
self.assertEqual('XFEM', nodeX.getName())
self.assertEqual(sorted(['elliptic', 'scale']),
sorted([n.getName() for n in nodeF.getChildren()]))
nodeFe, nodeFs = nodeF.getChildren()
if nodeFe.getName() == 'scale':
nodeFe, nodeFs = nodeFs, nodeFe
self.assertEqual('scale', nodeFs.getName())
self.assertEqual('elliptic', nodeFe.getName())
self.assertEqual(sorted(['LinearRI', 'QuadRI']),
sorted([n.getName() for n in nodeFe.getChildren()]))
self.assertEqual(1, len(nodeFs.getChildren()))
self.assertEqual('QuadRI', nodeFs.getChildren()[0].getName())
self.assertEqual(sorted(['mp', 'cp']),
sorted([n.getName() for n in nodeX.getChildren()]))
nodeXm, nodeXc = nodeX.getChildren()
if nodeXm.getName() == 'cp':
nodeXm, nodeXc = nodeXc, nodeXm
self.assertEqual('cp', nodeXc.getName())
self.assertEqual('mp', nodeXm.getName())
self.assertEqual(1, len(nodeXc.getChildren()))
self.assertEqual('LinearTet', nodeXc.getChildren()[0].getName())
self.assertEqual(sorted(['LinearFI', 'QuadFI']),
sorted([n.getName() for n in nodeXm.getChildren()]))
|
|
"""
Some models for pulling data from Trac.
Initially generated by inspectdb then modified heavily by hand, often by
consulting http://trac.edgewall.org/wiki/TracDev/DatabaseSchema.
These are far from perfect: many (most?) Trac tables have composite primary
keys, which Django can't represent. This means a lot of built-in Django stuff
(the admin, for example) won't work at all with these models. I haven't
investigated just how deeply down thess failures go, but I suspect all sorts
of things just won't work.
However, they're Good Enough(tm) to let me pull some basic (read-only) data out,
and that's all I really need.
Some potential TODOs:
* Add some convienance manager functions to deal with ticket_custom. Right
now you can query with a join::
Ticket.objects.filter(custom_fields__name='ui_ux',
custom_fields__value='1')
Perhaps we might be able to get something like::
Ticket.objects.with_custom(ui_ux=True)
Or even a custom .filter() that intercepts and figures it out?
* Trac stores SVN repository revisions as '0000003744' grar. This
makes querying awkward. There's probably some tricky manager manger
that we could do here.
* The whole Revision model will fall apart if we ever had a second
repository to Trac.
And a few notes on tables that're left out and why:
* All the session and permission tables: they're just not needd.
* Enum: I don't know what this is or what it's for.
* NodeChange: Ditto.
"""
import datetime
from django.db import models
_epoc = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
class time_property:
"""
Convert Trac timestamps into UTC datetimes.
See http://trac.edgewall.org/browser//branches/0.12-stable/trac/util/datefmt.py
for Trac's version of all this. Mine's something of a simplification.
Like the rest of this module this is far from perfect -- no setters, for
example! That's good enough for now.
"""
def __init__(self, fieldname):
self.fieldname = fieldname
def __get__(self, instance, owner):
if instance is None:
return self
timestamp = getattr(instance, self.fieldname)
return _epoc + datetime.timedelta(microseconds=timestamp)
class Ticket(models.Model):
id = models.IntegerField(primary_key=True)
type = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
_changetime = models.BigIntegerField(db_column='changetime')
changetime = time_property('_changetime')
component = models.ForeignKey(
'Component',
related_name='tickets',
db_column='component',
on_delete=models.DO_NOTHING,
)
severity = models.TextField()
owner = models.TextField()
reporter = models.TextField()
cc = models.TextField()
version = models.ForeignKey(
'Version',
related_name='tickets',
db_column='version',
on_delete=models.DO_NOTHING,
)
milestone = models.ForeignKey(
'Milestone',
related_name='tickets',
db_column='milestone',
on_delete=models.DO_NOTHING,
)
priority = models.TextField()
status = models.TextField()
resolution = models.TextField()
summary = models.TextField()
description = models.TextField()
keywords = models.TextField()
class Meta:
db_table = 'ticket'
managed = False
def __str__(self):
return "#%s: %s" % (self.id, self.summary)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Munge custom fields onto this object. This sucks since it implies
# querying will work (it won't!) and that writing will work (ditto).
# Also notice that *nasty* mapping of Trac's "booleanish" things to
# real booleans. This can fail in a bunch of ways, but not in our
# particular install.
for name, value in self.custom_fields.values_list('name', 'value'):
if value in ('0', '1'):
value = bool(int(value))
setattr(self, name, value)
class TicketCustom(models.Model):
ticket = models.ForeignKey(
Ticket,
related_name='custom_fields',
db_column='ticket',
primary_key=True,
on_delete=models.DO_NOTHING,
)
name = models.TextField()
value = models.TextField()
class Meta:
db_table = 'ticket_custom'
managed = False
def __str__(self):
return "%s: %s" % (self.name, self.value)
class TicketChange(models.Model):
ticket = models.ForeignKey(
Ticket,
related_name='changes',
db_column='ticket',
primary_key=True,
on_delete=models.DO_NOTHING,
)
author = models.TextField()
field = models.TextField()
oldvalue = models.TextField()
newvalue = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
class Meta:
db_table = 'ticket_change'
managed = False
ordering = ['_time']
def __str__(self):
return "#%s: changed %s" % (self.ticket.id, self.field)
class Component(models.Model):
name = models.TextField(primary_key=True)
owner = models.TextField()
description = models.TextField()
class Meta:
db_table = 'component'
managed = False
def __str__(self):
return self.name
class Version(models.Model):
name = models.TextField(primary_key=True)
description = models.TextField()
_time = models.BigIntegerField(db_column='time')
time = time_property('_time')
class Meta:
db_table = 'version'
managed = False
def __str__(self):
return self.name
class Milestone(models.Model):
name = models.TextField(primary_key=True)
description = models.TextField()
_due = models.BigIntegerField(db_column='_due')
due = time_property('due')
_completed = models.BigIntegerField(db_column='_completed')
completed = time_property('completed')
class Meta:
db_table = 'milestone'
managed = False
def __str__(self):
return self.name
class SingleRepoRevisionManager(models.Manager):
"""
Forces Revision to only query against a single repo, thus making
Revision.rev behave something like a primary key.
"""
def __init__(self, repo_id):
self.repo_id = repo_id
super().__init__()
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(repos=self.repo_id)
SINGLE_REPO_ID = 1
class Revision(models.Model):
repos = models.IntegerField()
rev = models.TextField(primary_key=True)
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
author = models.TextField()
message = models.TextField()
objects = SingleRepoRevisionManager(repo_id=SINGLE_REPO_ID)
class Meta:
db_table = 'revision'
managed = False
def __str__(self):
return '[%s] %s' % (self.rev, self.message.split('\n', 1)[0])
# The Wiki table uses a composite primary key (name, version). Since
# Django doesn't support this, this model sits on top of a simple view.
# CREATE VIEW "wiki_django_view" AS
# SELECT "name" || '.' || "version" AS "django_id", *
# FROM wiki;
class Wiki(models.Model):
django_id = models.TextField(primary_key=True)
name = models.TextField()
version = models.IntegerField()
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
author = models.TextField()
ipnr = models.TextField()
text = models.TextField()
comment = models.TextField()
readonly = models.IntegerField()
class Meta:
db_table = 'wiki_django_view'
managed = False
def __str__(self):
return '%s (v%s)' % (self.name, self.version)
# Same story as for Wiki: attachment's PK is (type, id, filename), so again
# there's a simple view this is on top of.
# CREATE VIEW "attachment_django_view" AS
# SELECT "type" || '.' || "id" || '.' || "filename" AS "django_id", *
# FROM attachment;
class Attachment(models.Model):
django_id = models.TextField(primary_key=True)
type = models.TextField()
id = models.TextField()
filename = models.TextField()
size = models.IntegerField()
_time = models.BigIntegerField(db_column='time')
time = time_property('time')
description = models.TextField()
author = models.TextField()
ipnr = models.TextField()
class Meta:
db_table = 'attachment_django_view'
managed = False
def __str__(self):
attached_to = ('#%s' % self.id) if self.type == 'ticket' else self.id
return '%s (on %s)' % (self.filename, attached_to)
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class AttachInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(AttachInterfacesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
@classmethod
def setup_credentials(cls):
# This test class requires network and subnet
cls.set_network_resources(network=True, subnet=True)
super(AttachInterfacesTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(AttachInterfacesTestJSON, cls).setup_clients()
cls.client = cls.os.interfaces_client
def wait_for_interface_status(self, server, port_id, status):
"""Waits for a interface to reach a given status."""
body = (self.client.show_interface(server, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
start = int(time.time())
while(interface_status != status):
time.sleep(self.build_interval)
body = (self.client.show_interface(server, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
message = ('Interface %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(port_id, status, interface_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
return body
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None, mac_addr=None):
self.assertIn('port_state', iface)
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
self.assertEqual(iface['net_id'], network_id)
if fixed_ip:
self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
if mac_addr:
self.assertEqual(iface['mac_addr'], mac_addr)
def _create_server_get_interfaces(self):
server = self.create_test_server(wait_until='ACTIVE')
ifs = (self.client.list_interfaces(server['id'])
['interfaceAttachments'])
body = self.wait_for_interface_status(
server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
return server, ifs
def _test_create_interface(self, server):
iface = (self.client.create_interface(server['id'])
['interfaceAttachment'])
iface = self.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
iface = self.client.create_interface(
server['id'], net_id=network_id)['interfaceAttachment']
iface = self.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface, network_id=network_id)
return iface
def _test_show_interface(self, server, ifs):
iface = ifs[0]
_iface = self.client.show_interface(
server['id'], iface['port_id'])['interfaceAttachment']
self._check_interface(iface, port_id=_iface['port_id'],
network_id=_iface['net_id'],
fixed_ip=_iface['fixed_ips'][0]['ip_address'],
mac_addr=_iface['mac_addr'])
def _test_delete_interface(self, server, ifs):
# NOTE(danms): delete not the first or last, but one in the middle
iface = ifs[1]
self.client.delete_interface(server['id'], iface['port_id'])
_ifs = (self.client.list_interfaces(server['id'])
['interfaceAttachments'])
start = int(time.time())
while len(ifs) == len(_ifs):
time.sleep(self.build_interval)
_ifs = (self.client.list_interfaces(server['id'])
['interfaceAttachments'])
timed_out = int(time.time()) - start >= self.build_timeout
if len(ifs) == len(_ifs) and timed_out:
message = ('Failed to delete interface within '
'the required time: %s sec.' % self.build_timeout)
raise exceptions.TimeoutException(message)
self.assertNotIn(iface['port_id'], [i['port_id'] for i in _ifs])
return _ifs
def _compare_iface_list(self, list1, list2):
# NOTE(danms): port_state will likely have changed, so just
# confirm the port_ids are the same at least
list1 = [x['port_id'] for x in list1]
list2 = [x['port_id'] for x in list2]
self.assertEqual(sorted(list1), sorted(list2))
@test.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
@test.services('network')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
try:
iface = self._test_create_interface(server)
except lib_exc.BadRequest as e:
msg = ('Multiple possible networks found, use a Network ID to be '
'more specific.')
if not CONF.compute.fixed_network_name and e.message == msg:
raise
else:
ifs.append(iface)
iface = self._test_create_interface_by_network_id(server, ifs)
ifs.append(iface)
_ifs = (self.client.list_interfaces(server['id'])
['interfaceAttachments'])
self._compare_iface_list(ifs, _ifs)
self._test_show_interface(server, ifs)
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
@test.attr(type='smoke')
@test.idempotent_id('c7e0e60b-ee45-43d0-abeb-8596fd42a2f9')
@test.services('network')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
self.client.add_fixed_ip(server['id'], networkId=network_id)
# Remove the fixed IP from server.
server_detail = self.os.servers_client.show_server(
server['id'])
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
self.client.remove_fixed_ip(server['id'], address=fixed_ip)
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Nova."""
import os.path
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
import nova.conf
from nova import context
from nova import exception
from nova import policy
from nova import test
from nova.tests.unit import fake_policy
from nova.tests.unit import policy_fixture
from nova import utils
CONF = nova.conf.CONF
class PolicyFileTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename, group='oslo_policy')
# NOTE(uni): context construction invokes policy check to determine
# is_admin or not. As a side-effect, policy reset is needed here
# to flush existing policy cache.
policy.reset()
policy.init()
rule = oslo_policy.RuleDefault('example:test', "")
policy._ENFORCER.register_defaults([rule])
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": ""}')
policy.authorize(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class PolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
rules = [
oslo_policy.RuleDefault("true", '@'),
oslo_policy.RuleDefault("example:allowed", '@'),
oslo_policy.RuleDefault("example:denied", "!"),
oslo_policy.RuleDefault("example:get_http",
"http://www.example.com"),
oslo_policy.RuleDefault("example:my_file",
"role:compute_admin or "
"project_id:%(project_id)s"),
oslo_policy.RuleDefault("example:early_and_fail", "! and @"),
oslo_policy.RuleDefault("example:early_or_success", "@ or !"),
oslo_policy.RuleDefault("example:lowercase_admin",
"role:admin or role:sysadmin"),
oslo_policy.RuleDefault("example:uppercase_admin",
"role:ADMIN or role:sysadmin"),
]
policy.reset()
policy.init()
# before a policy rule can be used, its default has to be registered.
policy._ENFORCER.register_defaults(rules)
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def test_authorize_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(oslo_policy.PolicyNotRegistered, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_noraise(self):
action = "example:denied"
result = policy.authorize(self.context, action, self.target, False)
self.assertFalse(result)
def test_authorize_good_action(self):
action = "example:allowed"
result = policy.authorize(self.context, action, self.target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_true(self, req_mock):
req_mock.post('http://www.example.com/',
text='True')
action = "example:get_http"
target = {}
result = policy.authorize(self.context, action, target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_false(self, req_mock):
req_mock.post('http://www.example.com/',
text='False')
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target)
def test_templatized_authorization(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.authorize(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target_not_mine)
def test_early_AND_authorization(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_early_OR_authorization(self):
action = "example:early_or_success"
policy.authorize(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.authorize(admin_context, lowercase_action, self.target)
policy.authorize(admin_context, uppercase_action, self.target)
@mock.patch.object(policy.LOG, 'warning')
def test_warning_when_deprecated_user_based_rule_used(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s or user_id:%(user_id)s")])
mock_warning.assert_called_once_with(
u"The user_id attribute isn't supported in the rule "
"'%s'. All the user_id based policy enforcement will be removed "
"in the future.", "os_compute_api:servers:index")
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_user_based_resource(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:os-keypairs:index",
"user_id:%(user_id)s")])
mock_warning.assert_not_called()
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_no_user_based_rule(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s")])
mock_warning.assert_not_called()
class IsAdminCheckTestCase(test.NoDBTestCase):
def setUp(self):
super(IsAdminCheckTestCase, self).setUp()
policy.init()
def test_init_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'True')
self.assertTrue(check.expected)
def test_init_false(self):
check = policy.IsAdminCheck('is_admin', 'nottrue')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'False')
self.assertFalse(check.expected)
def test_call_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertTrue(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertFalse(check('target', dict(is_admin=False),
policy._ENFORCER))
def test_call_false(self):
check = policy.IsAdminCheck('is_admin', 'False')
self.assertFalse(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertTrue(check('target', dict(is_admin=False),
policy._ENFORCER))
class AdminRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(AdminRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.actions = policy.get_rules().keys()
self.target = {}
def test_authorize_admin_actions_with_nonadmin_context_throws(self):
"""Check if non-admin context passed to admin actions throws
Policy not authorized exception
"""
for action in self.actions:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.non_admin_context = context.RequestContext('fake', 'fake',
roles=['member'])
self.admin_context = context.RequestContext('fake', 'fake', True,
roles=['member'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
self.admin_only_rules = (
"cells_scheduler_filter:DifferentCellFilter",
"cells_scheduler_filter:TargetCellFilter",
"network:attach_external_network",
"os_compute_api:servers:create:forced_host",
"os_compute_api:servers:detail:get_all_tenants",
"os_compute_api:servers:index:get_all_tenants",
"os_compute_api:servers:show:host_status",
"os_compute_api:servers:migrations:force_complete",
"os_compute_api:servers:migrations:delete",
"os_compute_api:os-admin-actions:reset_network",
"os_compute_api:os-admin-actions:inject_network_info",
"os_compute_api:os-admin-actions:reset_state",
"os_compute_api:os-aggregates:index",
"os_compute_api:os-aggregates:create",
"os_compute_api:os-aggregates:show",
"os_compute_api:os-aggregates:update",
"os_compute_api:os-aggregates:delete",
"os_compute_api:os-aggregates:add_host",
"os_compute_api:os-aggregates:remove_host",
"os_compute_api:os-aggregates:set_metadata",
"os_compute_api:os-agents",
"os_compute_api:os-baremetal-nodes",
"os_compute_api:os-cells",
"os_compute_api:os-cells:create",
"os_compute_api:os-cells:delete",
"os_compute_api:os-cells:update",
"os_compute_api:os-cells:sync_instances",
"os_compute_api:os-evacuate",
"os_compute_api:os-extended-server-attributes",
"os_compute_api:os-fixed-ips",
"os_compute_api:os-flavor-access:remove_tenant_access",
"os_compute_api:os-flavor-access:add_tenant_access",
"os_compute_api:os-flavor-extra-specs:create",
"os_compute_api:os-flavor-extra-specs:update",
"os_compute_api:os-flavor-extra-specs:delete",
"os_compute_api:os-flavor-manage",
"os_compute_api:os-floating-ips-bulk",
"os_compute_api:os-floating-ip-dns:domain:delete",
"os_compute_api:os-floating-ip-dns:domain:update",
"os_compute_api:os-fping:all_tenants",
"os_compute_api:os-hosts",
"os_compute_api:os-hypervisors",
"os_compute_api:os-instance-actions:events",
"os_compute_api:os-instance-usage-audit-log",
"os_compute_api:os-lock-server:unlock:unlock_override",
"os_compute_api:os-migrate-server:migrate",
"os_compute_api:os-migrate-server:migrate_live",
"os_compute_api:os-networks",
"os_compute_api:os-networks-associate",
"os_compute_api:os-quota-sets:update",
"os_compute_api:os-quota-sets:delete",
"os_compute_api:os-security-group-default-rules",
"os_compute_api:os-server-diagnostics",
"os_compute_api:os-services",
"os_compute_api:os-shelve:shelve_offload",
"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-used-limits",
"os_compute_api:os-migrations:index",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
"os_compute_api:os-console-auth-tokens",
"os_compute_api:os-quota-class-sets:update",
"os_compute_api:os-server-external-events:create",
"os_compute_api:os-volumes-attachments:update",
"os_compute_api:servers:migrations:index",
"os_compute_api:servers:migrations:show",
)
self.admin_or_owner_rules = (
"os_compute_api:servers:start",
"os_compute_api:servers:stop",
"os_compute_api:servers:trigger_crash_dump",
"os_compute_api:os-create-backup",
"os_compute_api:ips:index",
"os_compute_api:ips:show",
"os_compute_api:os-keypairs:create",
"os_compute_api:os-keypairs:delete",
"os_compute_api:os-keypairs:index",
"os_compute_api:os-keypairs:show",
"os_compute_api:os-lock-server:lock",
"os_compute_api:os-lock-server:unlock",
"os_compute_api:os-pause-server:pause",
"os_compute_api:os-pause-server:unpause",
"os_compute_api:os-quota-sets:show",
"os_compute_api:os-quota-sets:detail",
"os_compute_api:server-metadata:index",
"os_compute_api:server-metadata:show",
"os_compute_api:server-metadata:delete",
"os_compute_api:server-metadata:create",
"os_compute_api:server-metadata:update",
"os_compute_api:server-metadata:update_all",
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
"os_compute_api:os-tenant-networks",
"os_compute_api:extensions",
"os_compute_api:os-config-drive",
"os_compute_api:servers:confirm_resize",
"os_compute_api:servers:create",
"os_compute_api:servers:create:attach_network",
"os_compute_api:servers:create:attach_volume",
"os_compute_api:servers:create_image",
"os_compute_api:servers:delete",
"os_compute_api:servers:detail",
"os_compute_api:servers:index",
"os_compute_api:servers:reboot",
"os_compute_api:servers:rebuild",
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
"os_compute_api:servers:update",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-admin-password",
"os_compute_api:os-attach-interfaces",
"os_compute_api:os-attach-interfaces:create",
"os_compute_api:os-attach-interfaces:delete",
"os_compute_api:os-consoles:create",
"os_compute_api:os-consoles:delete",
"os_compute_api:os-consoles:index",
"os_compute_api:os-consoles:show",
"os_compute_api:os-console-output",
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete",
"os_compute_api:os-extended-status",
"os_compute_api:os-extended-availability-zone",
"os_compute_api:os-extended-volumes",
"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-rxtx",
"os_compute_api:flavors",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ip-dns",
"os_compute_api:os-floating-ip-pools",
"os_compute_api:os-floating-ips",
"os_compute_api:os-fping",
"os_compute_api:image-size",
"os_compute_api:os-instance-actions",
"os_compute_api:os-keypairs",
"os_compute_api:limits",
"os_compute_api:os-multinic",
"os_compute_api:os-networks:view",
"os_compute_api:os-rescue",
"os_compute_api:os-security-groups",
"os_compute_api:os-server-password",
"os_compute_api:os-server-usage",
"os_compute_api:os-server-groups",
"os_compute_api:os-server-tags:delete",
"os_compute_api:os-server-tags:delete_all",
"os_compute_api:os-server-tags:index",
"os_compute_api:os-server-tags:show",
"os_compute_api:os-server-tags:update",
"os_compute_api:os-server-tags:update_all",
"os_compute_api:os-server-groups:index",
"os_compute_api:os-server-groups:show",
"os_compute_api:os-server-groups:create",
"os_compute_api:os-server-groups:delete",
"os_compute_api:os-shelve:shelve",
"os_compute_api:os-shelve:unshelve",
"os_compute_api:os-virtual-interfaces",
"os_compute_api:os-volumes",
"os_compute_api:os-volumes-attachments:index",
"os_compute_api:os-volumes-attachments:show",
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-availability-zone:list",
)
self.non_admin_only_rules = (
"os_compute_api:os-hide-server-addresses",)
self.allow_all_rules = (
"os_compute_api:os-quota-sets:defaults",
)
def test_all_rules_in_sample_file(self):
special_rules = ["context_is_admin", "admin_or_owner", "default"]
for (name, rule) in self.fake_policy.items():
if name in special_rules:
continue
self.assertIn(name, policy.get_rules())
def test_admin_only_rules(self):
for rule in self.admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
policy.authorize(self.admin_context, rule, self.target)
def test_non_admin_only_rules(self):
for rule in self.non_admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.admin_context, rule, self.target)
policy.authorize(self.non_admin_context, rule, self.target)
def test_admin_or_owner_rules(self):
for rule in self.admin_or_owner_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule, self.target)
policy.authorize(self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
def test_allow_all_rules(self):
for rule in self.allow_all_rules:
policy.authorize(self.non_admin_context, rule, self.target)
def test_rule_missing(self):
rules = policy.get_rules()
# eliqiao os_compute_api:os-quota-class-sets:show requires
# admin=True or quota_class match, this rule won't belong to
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules + self.non_admin_only_rules +
self.allow_all_rules + special_rules)
self.assertEqual(set([]), result)
|
|
__author__ = 'Henri Bunting'
import sys
import numpy as np
from pypet.parameter import BaseParameter
import unittest
try:
import brian2
from brian2.units.stdunits import mV, mA, kHz, ms
from pypet.brian2.parameter import Brian2Parameter, Brian2Result, get_unit_fast
except ImportError:
brian2 = None
from pypet.parameter import PickleParameter, ArrayParameter, SparseParameter
from pypet.tests.unittests.parameter_test import ParameterTest, ResultTest
from pypet.tests.testutils.ioutils import parse_args, run_suite
from pypet.utils.explore import cartesian_product
import logging
logging.basicConfig(level=logging.DEBUG)
@unittest.skipIf(brian2 is None, 'Can only be run with brian2!')
class Brian2ParameterTest(ParameterTest):
tags = 'unittest', 'brian2', 'parameter', 'henri'
def setUp(self):
if not hasattr(self, 'data'):
self.data = {}
self.data['mV1'] = 42.0*mV
self.data['ampere1'] = 1*mA
self.data['integer'] = 16
#self.data['kHz05'] = 0.5*kHz
self.data['nested_array'] = np.array([[6.,7.,8.],[9.,10.,11.]]) * ms
self.data['b2a'] = np.array([1., 2.]) * mV
self.data['complex'] = np.array([1., 2.]) * mV*mV/mA**2.73
super(Brian2ParameterTest, self).setUp()
self.dynamic_imports = [Brian2Parameter]
def make_params(self):
self.param = {}
for key, val in self.data.items():
self.param[key] = Brian2Parameter(self.location+'.'+key, val, comment=key)
def explore(self):
self.explore_dict = cartesian_product({
#'npstr': [np.array(['Uno', 'Dos', 'Tres']),
# np.array(['Cinco', 'Seis', 'Siette']),
# np.array(['Ocho', 'Nueve', 'Diez'])],
'ampere1': [1*mA],
#'val0': [1, 2, 3],
'mV1': [42.0*mV, 3*mV, 4*mV],
'b2a': [np.array([1., 2.]) * mV]})
## Explore the parameter:
for key, vallist in self.explore_dict.items():
self.param[key]._explore(vallist)
self.assertTrue(self.param[key].v_explored and self.param[key].f_has_range())
def test_supports(self):
for key, val in self.data.items():
self.assertTrue(self.param[key].f_supports(val))
def test_false_on_values_not_of_same_type(self):
self.assertFalse(self.param[list(self.param.keys())[0]]._values_of_same_type(11, 99*mV))
@unittest.skipIf(brian2 is None, 'Can only be run with brian2!')
class Brian2ParameterDuplicatesInStoreTest(unittest.TestCase):
tags = 'unittest', 'brian2', 'parameter', 'store', 'henri'
def setUp(self):
self.data = {}
self.data['brian2_single_a'] = 1. * mV
self.data['brian2_array_b'] = np.array([3., 3., 4.]) * mV
self.data['brian2_array_c'] = np.array([5.]) * mV
self.data['brian2_array_d'] = np.array([[6.,7.,8.],[9.,10.,11.]]) * ms
#self.data['brian2_mixedtype_array_a'] = np.array([9., 10.]) * mV
self.data['brian2_mixedtype_array_b'] = np.array([13., 14.]) * mV
self.location = 'MyName.Is.myParam'
self.make_params()
self.explore()
def make_params(self):
self.param = {}
for key, val in self.data.items():
self.param[key] = Brian2Parameter(self.location+'.'+key, val, comment=key)
def explore(self):
self.explore_dict = cartesian_product({#'brian2_array_a': [np.array([1., 2.]) * mV],
# 'brian2_array_b': [2 * mV],
# Arrays need to be of the same size!
'brian2_array_c': [np.array([5., 8.]) * mV, np.array([7., 8.]) * mV],
})
## Explore the parameter:
for key, vallist in self.explore_dict.items():
self.param[key]._explore(vallist)
self.assertTrue(self.param[key].v_explored and self.param[key].f_has_range())
def test_storage_and_loading(self):
for key, param in self.param.items():
store_dict = param._store()
# Due to smart storing the storage dict should be small and only contain 5 items or less
# 1 for data, 1 for reference, and 3 for the array/matrices/items
if param.f_has_range():
if isinstance(param,(ArrayParameter, PickleParameter)) and not isinstance(param, SparseParameter):
self.assertTrue(len(store_dict)<7)
# For sparse parameter it is more:
if isinstance(param, SparseParameter):
self.assertTrue(len(store_dict)<23)
constructor = param.__class__
param.f_unlock()
param.f_empty()
param = constructor('')
param._load(store_dict)
param._rename(self.location+'.'+key)
self.param[key] = param
self.test_the_insertion_made_implicetly_in_setUp()
self.test_exploration()
self.test_meta_settings()
def test_the_insertion_made_implicetly_in_setUp(self):
for key, val in self.data.items():
if not key in self.explore_dict:
self.param[key]._restore_default()
param_val = self.param[key].f_get()
self.assertTrue(np.all(repr(val) == repr(param_val)),'%s != %s' %(str(val),str(param_val)))
def test_exploration(self):
for key, vallist in self.explore_dict.items():
param = self.param[key]
for idx, val in enumerate(vallist):
assert isinstance(param, BaseParameter)
param._set_parameter_access(idx)
self.assertTrue(np.all(repr(param.f_get())==repr(val))),'%s != %s'%( str(param.f_get()),str(val))
param_val = self.param[key].f_get_range()[idx]
self.assertTrue(np.all(str(val) == str(param_val)),'%s != %s' %(str(val),str(param_val)))
param._restore_default()
self.assertTrue(param.v_explored and param.f_has_range(), 'Error for %s' % key)
val = self.data[key]
self.assertTrue(np.all(repr(param.f_get())==repr(val))),'%s != %s'%( str(param.f_get()),str(val))
def test_expanding(self):
for key, vallist in self.explore_dict.items():
param = self.param[key]
copy_list = vallist.copy()
old_len = len(vallist)
param.f_unlock()
param._expand(copy_list)
new_len = len(param.f_get_range())
self.assertEqual(new_len, 2 * old_len)
def test_loading_and_expanding(self):
# Regression test for issue #50
# https://github.com/SmokinCaterpillar/pypet/issues/50
for key, vallist in self.explore_dict.items():
param = self.param[key]
copy_list = vallist.copy()
old_len = len(vallist)
store_dict = param._store()
param.f_unlock()
param._load(store_dict)
param.f_unlock()
param._expand(copy_list)
new_len = len(param.f_get_range())
self.assertEqual(new_len, 2 * old_len)
def test_meta_settings(self):
for key, param in self.param.items():
self.assertEqual(param.v_full_name, self.location+'.'+key)
self.assertEqual(param.v_name, key)
self.assertEqual(param.v_location, self.location)
@unittest.skipIf(brian2 is None, 'Can only be run with brian2!')
class Brian2ResultTest(ResultTest):
tags = 'unittest', 'brian2', 'result', 'henri'
def make_constructor(self):
self.Constructor = Brian2Result
self.dynamic_imports = [Brian2Result]
def test_illegal_naming(self):
for res in self.results.values():
data_dict = {'val'+Brian2Result.IDENTIFIER:42}
with self.assertRaises(AttributeError):
res.f_set(**data_dict)
def setUp(self):
if not hasattr(self,'data'):
self.data = {}
self.data['mV1'] = 1*mV
self.data['ampere1'] = 1*mA
self.data['msecond17'] = 16*ms
self.data['kHz05'] = 0.5*kHz
self.data['mV_array'] = np.ones(20) * mV
self.data['integer'] = 444
self.data['complex'] = np.array([1., 2.]) * mV*mV/mA**-2.7343
super(Brian2ResultTest, self).setUp()
@unittest.skipIf(brian2 is None, 'Can only be run with brian2!')
class Brian2GetUnitFastTest(unittest.TestCase):
tags = 'unittest', 'brian2'
def test_get_unit_fast(self):
unit = get_unit_fast(42 * mV)
self.assertEquals(unit, 1000 * mV)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import traceback
import functools
import subprocess
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import six
import sys
sys.path.append("..")
import models
import utils
from reader import train, val
from utility import add_arguments, print_arguments
from batch_merge import copyback_repeat_bn_params, append_bn_repeat_init_op
from dist_utils import pserver_prepare, nccl2_prepare
from env import dist_env
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('total_images', int, 1281167, "Training image number.")
add_arg('num_epochs', int, 120, "number of epochs.")
add_arg('class_dim', int, 1000, "Class number.")
add_arg('image_shape', str, "3,224,224", "input image size")
add_arg('model_save_dir', str, "output", "model save directory")
add_arg('with_mem_opt', bool, False, "Whether to use memory optimization or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('checkpoint', str, None, "Whether to resume checkpoint.")
add_arg('lr', float, 0.1, "set learning rate.")
add_arg('lr_strategy', str, "piecewise_decay", "Set the learning rate decay strategy.")
add_arg('model', str, "DistResNet", "Set the network to use.")
add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.")
add_arg('data_dir', str, "./data/ILSVRC2012", "The ImageNet dataset root dir.")
add_arg('model_category', str, "models", "Whether to use models_name or not, valid value:'models','models_name'" )
add_arg('fp16', bool, False, "Enable half precision training with fp16." )
add_arg('scale_loss', float, 1.0, "Scale loss for fp16." )
add_arg('reduce_master_grad', bool, False, "Whether to allreduce fp32 gradients." )
# for distributed
add_arg('update_method', str, "local", "Can be local, pserver, nccl2.")
add_arg('multi_batch_repeat', int, 1, "Batch merge repeats.")
add_arg('start_test_pass', int, 0, "Start test after x passes.")
add_arg('num_threads', int, 8, "Use num_threads to run the fluid program.")
add_arg('split_var', bool, True, "Split params on pserver.")
add_arg('async_mode', bool, False, "Async distributed training, only for pserver mode.")
add_arg('reduce_strategy', str, "allreduce", "Choose from reduce or allreduce.")
add_arg('skip_unbalanced_data', bool, False, "Skip data not if data not balanced on nodes.")
add_arg('enable_sequential_execution', bool, False, "Skip data not if data not balanced on nodes.")
# yapf: enable
args = parser.parse_args()
return args
def get_device_num():
if os.getenv("CPU_NUM"):
return int(os.getenv("CPU_NUM"))
visible_device = os.getenv('CUDA_VISIBLE_DEVICES')
if visible_device:
device_num = len(visible_device.split(','))
else:
device_num = subprocess.check_output(['nvidia-smi', '-L']).decode().count('\n')
return device_num
def prepare_reader(is_train, pyreader, args, pass_id=0):
if is_train:
reader = train(data_dir=args.data_dir, pass_id_as_seed=pass_id)
else:
reader = val(data_dir=args.data_dir)
if is_train:
bs = args.batch_size / get_device_num()
else:
bs = 16
pyreader.decorate_paddle_reader(
paddle.batch(
reader,
batch_size=bs))
def build_program(is_train, main_prog, startup_prog, args):
pyreader = None
class_dim = args.class_dim
image_shape = [int(m) for m in args.image_shape.split(",")]
trainer_count = args.dist_env["num_trainers"]
device_num_per_worker = get_device_num()
with fluid.program_guard(main_prog, startup_prog):
pyreader = fluid.layers.py_reader(
capacity=16,
shapes=([-1] + image_shape, (-1, 1)),
dtypes=('float32', 'int64'),
name="train_reader" if is_train else "test_reader",
use_double_buffer=True)
with fluid.unique_name.guard():
image, label = fluid.layers.read_file(pyreader)
if args.fp16:
image = fluid.layers.cast(image, "float16")
model_def = models.__dict__[args.model](layers=50, is_train=is_train)
predict = model_def.net(image, class_dim=class_dim)
cost, pred = fluid.layers.softmax_with_cross_entropy(predict, label, return_softmax=True)
if args.scale_loss > 1:
avg_cost = fluid.layers.mean(x=cost) * float(args.scale_loss)
else:
avg_cost = fluid.layers.mean(x=cost)
batch_acc1 = fluid.layers.accuracy(input=pred, label=label, k=1)
batch_acc5 = fluid.layers.accuracy(input=pred, label=label, k=5)
optimizer = None
if is_train:
start_lr = args.lr
end_lr = args.lr * trainer_count * args.multi_batch_repeat
if os.getenv("FLAGS_selected_gpus"):
# in multi process mode, "trainer_count" will be total devices
# in the whole cluster, and we need to scale num_of nodes.
end_lr /= device_num_per_worker
total_images = args.total_images / trainer_count
step = int(total_images / (args.batch_size * args.multi_batch_repeat) + 1)
warmup_steps = step * 5 # warmup 5 passes
epochs = [30, 60, 80]
bd = [step * e for e in epochs]
base_lr = end_lr
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
print("start lr: %s, end lr: %s, decay boundaries: %s" % (
start_lr,
end_lr,
bd
))
# NOTE: we put weight decay in layers config, and remove
# weight decay on bn layers, so don't add weight decay in
# optimizer config.
optimizer = fluid.optimizer.Momentum(
learning_rate=utils.learning_rate.lr_warmup(
fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
warmup_steps, start_lr, end_lr),
momentum=0.9)
if args.fp16:
params_grads = optimizer.backward(avg_cost)
master_params_grads = utils.create_master_params_grads(
params_grads, main_prog, startup_prog, args.scale_loss,
reduce_master_grad = args.reduce_master_grad)
optimizer.apply_gradients(master_params_grads)
utils.master_param_to_train_param(master_params_grads, params_grads, main_prog)
else:
optimizer.minimize(avg_cost)
# prepare reader for current program
prepare_reader(is_train, pyreader, args)
return pyreader, avg_cost, batch_acc1, batch_acc5
def test_single(exe, test_prog, args, pyreader, fetch_list):
acc1 = fluid.metrics.Accuracy()
acc5 = fluid.metrics.Accuracy()
test_losses = []
pyreader.start()
while True:
try:
acc_rets = exe.run(program=test_prog, fetch_list=fetch_list)
test_losses.append(acc_rets[0])
acc1.update(value=np.array(acc_rets[1]), weight=args.batch_size)
acc5.update(value=np.array(acc_rets[2]), weight=args.batch_size)
except fluid.core.EOFException:
pyreader.reset()
break
test_avg_loss = np.mean(np.array(test_losses))
return test_avg_loss, np.mean(acc1.eval()), np.mean(acc5.eval())
def test_parallel(exe, test_prog, args, pyreader, fetch_list):
acc1 = fluid.metrics.Accuracy()
acc5 = fluid.metrics.Accuracy()
test_losses = []
pyreader.start()
while True:
try:
acc_rets = exe.run(fetch_list=fetch_list)
test_losses.append(acc_rets[0])
acc1.update(value=np.array(acc_rets[1]), weight=args.batch_size)
acc5.update(value=np.array(acc_rets[2]), weight=args.batch_size)
except fluid.core.EOFException:
pyreader.reset()
break
test_avg_loss = np.mean(np.array(test_losses))
return test_avg_loss, np.mean(acc1.eval()), np.mean(acc5.eval())
def run_pserver(train_prog, startup_prog):
server_exe = fluid.Executor(fluid.CPUPlace())
server_exe.run(startup_prog)
server_exe.run(train_prog)
def train_parallel(args):
train_prog = fluid.Program()
test_prog = fluid.Program()
startup_prog = fluid.Program()
train_pyreader, train_cost, train_acc1, train_acc5 = build_program(True, train_prog, startup_prog, args)
test_pyreader, test_cost, test_acc1, test_acc5 = build_program(False, test_prog, startup_prog, args)
if args.update_method == "pserver":
train_prog, startup_prog = pserver_prepare(args, train_prog, startup_prog)
elif args.update_method == "nccl2":
nccl2_prepare(args, startup_prog)
if args.dist_env["training_role"] == "PSERVER":
run_pserver(train_prog, startup_prog)
exit(0)
if args.use_gpu:
# NOTE: for multi process mode: one process per GPU device.
gpu_id = 0
if os.getenv("FLAGS_selected_gpus"):
gpu_id = int(os.getenv("FLAGS_selected_gpus"))
place = core.CUDAPlace(gpu_id) if args.use_gpu else core.CPUPlace()
startup_exe = fluid.Executor(place)
if args.multi_batch_repeat > 1:
append_bn_repeat_init_op(train_prog, startup_prog, args.multi_batch_repeat)
startup_exe.run(startup_prog)
if args.checkpoint:
fluid.io.load_persistables(startup_exe, args.checkpoint, main_program=train_prog)
strategy = fluid.ExecutionStrategy()
strategy.num_threads = args.num_threads
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = False
build_strategy.memory_optimize = False
build_strategy.enable_sequential_execution = bool(args.enable_sequential_execution)
if args.reduce_strategy == "reduce":
build_strategy.reduce_strategy = fluid.BuildStrategy(
).ReduceStrategy.Reduce
else:
build_strategy.reduce_strategy = fluid.BuildStrategy(
).ReduceStrategy.AllReduce
if args.update_method == "pserver" or args.update_method == "local":
# parameter server mode distributed training, merge
# gradients on local server, do not initialize
# ParallelExecutor with multi server all-reduce mode.
num_trainers = 1
trainer_id = 0
else:
num_trainers = args.dist_env["num_trainers"]
trainer_id = args.dist_env["trainer_id"]
# Set this to let build_strategy to add "allreduce_deps_pass" automatically
build_strategy.num_trainers = num_trainers
build_strategy.trainer_id = trainer_id
if args.multi_batch_repeat > 1:
pass_builder = build_strategy._finalize_strategy_and_create_passes()
mypass = pass_builder.insert_pass(
len(pass_builder.all_passes()) - 4, "multi_batch_merge_pass")
mypass.set("num_repeats", args.multi_batch_repeat)
exe = fluid.ParallelExecutor(
True,
train_cost.name,
main_program=train_prog,
exec_strategy=strategy,
build_strategy=build_strategy,
num_trainers=num_trainers,
trainer_id=trainer_id)
# Uncomment below lines to use ParallelExecutor to run test.
# test_exe = fluid.ParallelExecutor(
# True,
# main_program=test_prog,
# share_vars_from=exe,
# scope=fluid.global_scope().new_scope()
# )
over_all_start = time.time()
fetch_list = [train_cost.name, train_acc1.name, train_acc5.name]
steps_per_pass = args.total_images / args.batch_size / args.dist_env["num_trainers"]
for pass_id in range(args.num_epochs):
num_samples = 0
start_time = time.time()
batch_id = 1
# use pass_id+1 as per pass global shuffle for distributed training
prepare_reader(True, train_pyreader, args, pass_id + 1)
train_pyreader.start()
while True:
try:
if batch_id % 30 == 0:
fetch_ret = exe.run(fetch_list)
fetched_data = [np.mean(np.array(d)) for d in fetch_ret]
print("Pass [%d/%d], batch [%d/%d], loss %s, acc1: %s, acc5: %s, avg batch time %.4f" %
(pass_id, args.num_epochs, batch_id, steps_per_pass, fetched_data[0], fetched_data[1],
fetched_data[2], (time.time()-start_time) / batch_id))
else:
fetch_ret = exe.run([])
except fluid.core.EOFException:
break
except fluid.core.EnforceNotMet:
traceback.print_exc()
break
num_samples += args.batch_size
batch_id += 1
if args.skip_unbalanced_data and batch_id >= steps_per_pass:
break
print_train_time(start_time, time.time(), num_samples)
train_pyreader.reset()
if pass_id >= args.start_test_pass:
if args.multi_batch_repeat > 1:
copyback_repeat_bn_params(train_prog)
test_fetch_list = [test_cost.name, test_acc1.name, test_acc5.name]
test_ret = test_single(startup_exe, test_prog, args, test_pyreader,test_fetch_list)
# NOTE: switch to below line if you use ParallelExecutor to run test.
# test_ret = test_parallel(test_exe, test_prog, args, test_pyreader,test_fetch_list)
print("Pass: %d, Test Loss %s, test acc1: %s, test acc5: %s\n" %
(pass_id, test_ret[0], test_ret[1], test_ret[2]))
model_path = os.path.join(args.model_save_dir + '/' + args.model,
str(pass_id))
print("saving model to ", model_path)
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(startup_exe, model_path, main_program=train_prog)
startup_exe.close()
print("total train time: ", time.time() - over_all_start)
def print_train_time(start_time, end_time, num_samples):
train_elapsed = end_time - start_time
examples_per_sec = num_samples / train_elapsed
print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' %
(num_samples, train_elapsed, examples_per_sec))
def print_paddle_envs():
print('----------- Configuration envs -----------')
for k in os.environ:
if "PADDLE_" in k:
print("ENV %s:%s" % (k, os.environ[k]))
print('------------------------------------------------')
def main():
args = parse_args()
print_arguments(args)
print_paddle_envs()
args.dist_env = dist_env()
train_parallel(args)
if __name__ == "__main__":
main()
|
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong, Xizhou Zhu
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.proposal import *
from operator_py.proposal_target import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_convnew3(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
def get_resnet_v1_conv4(self, data):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2),
no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3),
stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
return res4b22_relu
def get_resnet_v1_conv5(self, conv_feat):
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=(2, 2),
kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=(2, 2),
kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=(2, 2),
kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True,
fix_gamma=False, eps=self.eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res5c_relu
def get_rpn(self, conv_feat, num_anchors):
rpn_conv = mx.sym.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.sym.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
return rpn_cls_score, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
# input init
if is_train:
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
else:
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
conv_feat = self.get_resnet_v1_conv4(data)
# res5
relu1 = self.get_resnet_v1_conv5(conv_feat)
rpn_cls_score, rpn_bbox_pred = self.get_rpn(conv_feat, num_anchors)
if is_train:
# prepare rpn data
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.sym.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if cfg.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N,
threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.sym.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N,
threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape,
op_type='proposal_target',
num_classes=num_reg_classes,
batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS,
cfg=cPickle.dumps(cfg),
fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
# ROI Proposal
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if cfg.TEST.CXX_PROPOSAL:
rois = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
else:
rois = mx.sym.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
# conv_new_1
conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name="conv_new_1", lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='relu1')
# rfcn_cls/rfcn_bbox
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes),
name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes),
name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def get_symbol_rpn(self, cfg, is_train=True):
# config alias for convenient
num_anchors = cfg.network.NUM_ANCHORS
# input init
if is_train:
data = mx.sym.Variable(name="data")
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
else:
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
conv_feat = self.get_resnet_v1_conv4(data)
rpn_cls_score, rpn_bbox_pred = self.get_rpn(conv_feat, num_anchors)
if is_train:
# prepare rpn data
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob",
grad_scale=1.0)
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss])
else:
# ROI Proposal
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if cfg.TEST.CXX_PROPOSAL:
rois, score = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', output_score=True,
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
else:
rois, score = mx.sym.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', output_score=True,
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
group = mx.symbol.Group([rois, score])
self.sym = group
return group
def get_symbol_rfcn(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
# input init
if is_train:
data = mx.symbol.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_weight = mx.symbol.Variable(name='bbox_weight')
# reshape input
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
label = mx.symbol.Reshape(data=label, shape=(-1,), name='label_reshape')
bbox_target = mx.symbol.Reshape(data=bbox_target, shape=(-1, 4 * num_reg_classes), name='bbox_target_reshape')
bbox_weight = mx.symbol.Reshape(data=bbox_weight, shape=(-1, 4 * num_reg_classes), name='bbox_weight_reshape')
else:
data = mx.sym.Variable(name="data")
rois = mx.symbol.Variable(name='rois')
# reshape input
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
# shared convolutional layers
conv_feat = self.get_resnet_v1_conv4(data)
relu1 = self.get_resnet_v1_conv5(conv_feat)
# conv_new_1
conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name="conv_new_1", lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='relu1')
# rfcn_cls/rfcn_bbox
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1, grad_scale=1.0)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid', grad_scale=1.0)
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
# reshape output
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([cls_prob, bbox_loss, mx.sym.BlockGrad(label)]) if cfg.TRAIN.ENABLE_OHEM else mx.sym.Group([cls_prob, bbox_loss])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes),
name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes),
name='bbox_pred_reshape')
group = mx.sym.Group([cls_prob, bbox_pred])
self.sym = group
return group
def init_weight(self, cfg, arg_params, aux_params):
arg_params['rpn_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_conv_3x3_weight'])
arg_params['rpn_conv_3x3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_conv_3x3_bias'])
arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_cls_score_weight'])
arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_cls_score_bias'])
arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_bbox_pred_weight'])
arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_bbox_pred_bias'])
arg_params['conv_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['conv_new_1_weight'])
arg_params['conv_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['conv_new_1_bias'])
arg_params['rfcn_cls_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_cls_weight'])
arg_params['rfcn_cls_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_cls_bias'])
arg_params['rfcn_bbox_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_bbox_weight'])
arg_params['rfcn_bbox_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_bbox_bias'])
def init_weight_rpn(self, cfg, arg_params, aux_params):
arg_params['rpn_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_conv_3x3_weight'])
arg_params['rpn_conv_3x3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_conv_3x3_bias'])
arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_cls_score_weight'])
arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_cls_score_bias'])
arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_bbox_pred_weight'])
arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_bbox_pred_bias'])
def init_weight_rfcn(self, cfg, arg_params, aux_params):
arg_params['conv_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['conv_new_1_weight'])
arg_params['conv_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['conv_new_1_bias'])
arg_params['rfcn_cls_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_cls_weight'])
arg_params['rfcn_cls_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_cls_bias'])
arg_params['rfcn_bbox_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_bbox_weight'])
arg_params['rfcn_bbox_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_bbox_bias'])
|
|
"""ACME Identifier Validation Challenges."""
import abc
import functools
import hashlib
import logging
import socket
from cryptography.hazmat.primitives import hashes
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
from acme import other
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
try:
return super(Challenge, cls).from_json(jobj)
except jose.UnrecognizedTypeError as error:
logger.debug(error)
return UnrecognizedChallenge.from_json(jobj)
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
class UnrecognizedChallenge(Challenge):
"""Unrecognized challenge.
ACME specification defines a generic framework for challenges and
defines some standard challenges that are implemented in this
module. However, other implementations (including peers) might
define additional challenge types, which should be ignored if
unrecognized.
:ivar jobj: Original JSON decoded object.
"""
def __init__(self, jobj):
super(UnrecognizedChallenge, self).__init__()
object.__setattr__(self, "jobj", jobj)
def to_partial_json(self):
# pylint: disable=no-member
return self.jobj
@classmethod
def from_json(cls, jobj):
return cls(jobj)
class _TokenDVChallenge(DVChallenge):
"""DV Challenge with token.
:ivar bytes token:
"""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
# XXX: rename to ~token_good_for_url
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
class KeyAuthorizationChallengeResponse(ChallengeResponse):
"""Response to Challenges based on Key Authorization.
:param unicode key_authorization:
"""
key_authorization = jose.Field("keyAuthorization")
thumbprint_hash_function = hashes.SHA256
def verify(self, chall, account_public_key):
"""Verify the key authorization.
:param KeyAuthorization chall: Challenge that corresponds to
this response.
:param JWK account_public_key:
:return: ``True`` iff verification of the key authorization was
successful.
:rtype: bool
"""
parts = self.key_authorization.split('.') # pylint: disable=no-member
if len(parts) != 2:
logger.debug("Key authorization (%r) is not well formed",
self.key_authorization)
return False
if parts[0] != chall.encode("token"):
logger.debug("Mismatching token in key authorization: "
"%r instead of %r", parts[0], chall.encode("token"))
return False
thumbprint = jose.b64encode(account_public_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
if parts[1] != thumbprint:
logger.debug("Mismatching thumbprint in key authorization: "
"%r instead of %r", parts[0], thumbprint)
return False
return True
class KeyAuthorizationChallenge(_TokenDVChallenge):
# pylint: disable=abstract-class-little-used,too-many-ancestors
"""Challenge based on Key Authorization.
:param response_cls: Subclass of `KeyAuthorizationChallengeResponse`
that will be used to generate `response`.
"""
__metaclass__ = abc.ABCMeta
response_cls = NotImplemented
thumbprint_hash_function = (
KeyAuthorizationChallengeResponse.thumbprint_hash_function)
def key_authorization(self, account_key):
"""Generate Key Authorization.
:param JWK account_key:
:rtype unicode:
"""
return self.encode("token") + "." + jose.b64encode(
account_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
def response(self, account_key):
"""Generate response to the challenge.
:param JWK account_key:
:returns: Response (initialized `response_cls`) to the challenge.
:rtype: KeyAuthorizationChallengeResponse
"""
return self.response_cls(
key_authorization=self.key_authorization(account_key))
@abc.abstractmethod
def validation(self, account_key):
"""Generate validation for the challenge.
Subclasses must implement this method, but they are likely to
return completely different data structures, depending on what's
necessary to complete the challenge. Interepretation of that
return value must be known to the caller.
:param JWK account_key:
:returns: Challenge-specific validation.
"""
raise NotImplementedError() # pragma: no cover
def response_and_validation(self, account_key):
"""Generate response and validation.
Convenience function that return results of `response` and
`validation`.
:param JWK account_key:
:rtype: tuple
"""
return (self.response(account_key), self.validation(account_key))
@ChallengeResponse.register
class HTTP01Response(KeyAuthorizationChallengeResponse):
"""ACME http-01 challenge response."""
typ = "http-01"
PORT = 80
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
:param challenges.SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param account_public_key: Public key for the key pair
being authorized. If ``None`` key verification is not
performed!
:param JWK account_public_key:
:param int port: Port used in the validation.
:returns: ``True`` iff validation is successful, ``False``
otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.PORT:
logger.warning(
"Using non-standard port for SimpleHTTP verification: %s", port)
domain += ":{0}".format(port)
uri = chall.uri(domain)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
found_ct = http_response.headers.get(
"Content-Type", chall.CONTENT_TYPE)
if found_ct != chall.CONTENT_TYPE:
logger.debug("Wrong Content-Type: found %r, expected %r",
found_ct, chall.CONTENT_TYPE)
return False
if self.key_authorization != http_response.text:
logger.debug("Key authorization from response (%r) doesn't match "
"HTTP response (%r)", self.key_authorization,
http_response.text)
return False
return True
@Challenge.register # pylint: disable=too-many-ancestors
class HTTP01(KeyAuthorizationChallenge):
"""ACME http-01 challenge."""
response_cls = HTTP01Response
typ = response_cls.typ
CONTENT_TYPE = "text/plain"
"""Only valid value for Content-Type if the header is included."""
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
@property
def path(self):
"""Path (starting with '/') for provisioned resource.
:rtype: string
"""
return '/' + self.URI_ROOT_PATH + '/' + self.encode('token')
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:rtype: string
"""
return "http://" + domain + self.path
def validation(self, account_key):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return self.key_authorization(account_key)
@Challenge.register # pylint: disable=too-many-ancestors
class DVSNI(_TokenDVChallenge):
"""ACME "dvsni" challenge.
:ivar bytes token: Random data, **not** base64-encoded.
"""
typ = "dvsni"
PORT = 443
"""Port to perform DVSNI challenge."""
def gen_response(self, account_key, alg=jose.RS256, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:rtype: .DVSNIResponse
"""
return DVSNIResponse(validation=jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs))
@ChallengeResponse.register
class DVSNIResponse(ChallengeResponse):
"""ACME "dvsni" challenge response.
:param bytes s: Random data, **not** base64-encoded.
"""
typ = "dvsni"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = DVSNI.PORT
"""Port to perform DVSNI challenge."""
validation = jose.Field("validation", decoder=jose.JWS.from_json)
@property
def z(self): # pylint: disable=invalid-name
"""The ``z`` parameter.
:rtype: bytes
"""
# Instance of 'Field' has no 'signature' member
# pylint: disable=no-member
return hashlib.sha256(self.validation.signature.encode(
"signature").encode("utf-8")).hexdigest().encode()
@property
def z_domain(self):
"""Domain name for certificate subjectAltName.
:rtype: bytes
"""
z = self.z # pylint: disable=invalid-name
return z[:32] + b'.' + z[32:] + self.DOMAIN_SUFFIX
@property
def chall(self):
"""Get challenge encoded in the `validation` payload.
:rtype: challenges.DVSNI
"""
# pylint: disable=no-member
return DVSNI.json_loads(self.validation.payload.decode('utf-8'))
def gen_cert(self, key=None, bits=2048):
"""Generate DVSNI certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and
`OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe DVSNI challenge certificate.
:param unicode domain:
"""
if "host" not in kwargs:
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs["host"] = host
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util.probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify DVSNI challenge certificate."""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe DVSNI certificate and check using `verify_cert`.
:param .challenges.DVSNI chall: Corresponding challenge.
:param str domain: Domain name being validated.
:param JWK account_public_key:
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:returns: ``True`` iff client's control of the domain has been
verified, ``False`` otherwise.
:rtype: bool
"""
# pylint: disable=no-member
if not self.validation.verify(key=account_public_key):
return False
# TODO: it's not checked that payload has exectly 2 fields!
try:
decoded_chall = self.chall
except jose.DeserializationError as error:
logger.debug(error, exc_info=True)
return False
if decoded_chall.token != chall.token:
logger.debug("Wrong token: expected %r, found %r",
chall.token, decoded_chall.token)
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge.
:ivar unicode activation_url:
:ivar unicode success_url:
:ivar unicode contact:
"""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response.
:ivar unicode token:
"""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar .JWAAlgorithm alg:
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar JWK jwk: JSON Web Key
:ivar tuple cert_fingerprints: `tuple` of `unicode`
:ivar tuple certs: Sequence of :class:`acme.jose.ComparableX509`
certificates.
:ivar tuple subject_key_identifiers: `tuple` of `unicode`
:ivar tuple issuers: `tuple` of `unicode`
:ivar tuple authorized_for: `tuple` of `unicode`
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar acme.other.Signature signature: Sugnature of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register # pylint: disable=too-many-ancestors
class DNS(_TokenDVChallenge):
"""ACME "dns" challenge."""
typ = "dns"
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: This challenge wrapped in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, account_public_key):
"""Check validation.
:param JWS validation:
:param JWK account_public_key:
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
return self == self.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug("Checking validation for DNS failed: %s", error)
return False
def gen_response(self, account_key, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:param .JWA alg:
:rtype: DNSResponse
"""
return DNSResponse(validation=self.gen_validation(
self, account_key, **kwargs))
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response.
:param JWS validation:
"""
typ = "dns"
validation = jose.Field("validation", decoder=jose.JWS.from_json)
def check_validation(self, chall, account_public_key):
"""Check validation.
:param challenges.DNS chall:
:param JWK account_public_key:
:rtype: bool
"""
return chall.check_validation(self.validation, account_public_key)
|
|
# Copyright 2015 Jarrod N. Bakker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ACLSwitch modules
from aclswitch.acl.acl_manager import ACLManager
from aclswitch.flow.flow_scheduler import FlowScheduler
from aclswitch.policy.policy_manager import PolicyManager
# Module imports
import logging
__author__ = "Jarrod N. Bakker"
__status__ = "Development"
class ACLSwitchAPI:
"""API for modifying and viewing the state of ACLSwitch.
"""
def __init__(self, logging_config, aclsw_version, flow_man):
"""Initialise the API class.
:param logging_config: Logging configuration dict.
:param aclsw_version: The current version of ACLSwitch.
:param flow_man: FlowManager object.
"""
self._logging = logging.getLogger(__name__)
self._logging.setLevel(logging_config["min_lvl"])
self._logging.propagate = logging_config["propagate"]
self._logging.addHandler(logging_config["handler"])
self._logging.info("Initialising API...")
self._aclsw_version = aclsw_version
self._flow_man = flow_man
self._acl_man = ACLManager(logging_config)
self._pol_man = PolicyManager(logging_config)
self._flow_sch = FlowScheduler(logging_config, self, flow_man)
def acl_create_rule(self, rule):
"""Create an ACL rule.
:param rule: dict of the rule to create.
:return: Result of the operation.
"""
if not self._pol_man.policy_exists(rule["policy"]):
return ReturnStatus.POLICY_NOT_EXISTS
if not self._acl_man.acl_rule_syntax_check(rule):
return ReturnStatus.RULE_SYNTAX_INVALID
rule_id = self._acl_man.acl_add_rule(rule)
if rule_id is None:
return ReturnStatus.RULE_EXISTS
self._pol_man.policy_add_rule(rule["policy"], rule_id)
new_rule = self.acl_get_rule(rule_id)
if new_rule.time_enforce == "N/A":
switches = self.policy_get_connected_switches(rule["policy"])
self._flow_man.flow_deploy_single_rule(new_rule, switches)
else:
self._flow_sch.sched_add_rule(rule_id, new_rule.time_enforce)
return ReturnStatus.RULE_CREATED
def acl_remove_rule(self, rule_id):
"""Remove an ACL rule.
:param rule_id: ID of the rule to remove.
:return: Result of the operation.
"""
if not self._acl_man.acl_is_rule(rule_id):
return ReturnStatus.RULE_NOT_EXISTS
rule = self._acl_man.acl_remove_rule(rule_id)
self._pol_man.policy_remove_rule(rule.policy, rule_id)
switches = self.policy_get_switches(rule.policy)
if rule.time_enforce == "N/A":
switches = self.policy_get_connected_switches(rule.policy)
self._flow_man.flow_remove_single_rule(rule, switches)
else:
self._flow_sch.sched_remove_rule(rule_id)
self._flow_man.flow_remove_single_rule(rule, switches)
return ReturnStatus.RULE_REMOVED
def acl_get_rule(self, rule_id):
"""Return a rule given a rule ID.
:param rule_id: ID of a rule.
:return: Named tuple of a rule.
"""
if not self._acl_man.acl_is_rule(rule_id):
return -1
return self._acl_man.acl_get_rule(rule_id)
def policy_create(self, policy):
"""Create a policy domain.
:param policy: Name of the policy domain.
:return: Result of the operation.
"""
if self._pol_man.policy_create(policy):
return ReturnStatus.POLICY_CREATED
else:
return ReturnStatus.POLICY_EXISTS
def policy_remove(self, policy):
"""Remove a policy domain.
:param policy: Name of the policy domain.
:return: Result of the operation.
"""
if not self._pol_man.policy_exists(policy):
return ReturnStatus.POLICY_NOT_EXISTS
if not self._pol_man.policy_empty(policy):
return ReturnStatus.POLICY_NOT_EMPTY
self._pol_man.policy_remove(policy)
return ReturnStatus.POLICY_REMOVED
def policy_assign_switch(self, switch_id, policy, from_file=False):
"""Assign a policy to a switch assuming it has been registered.
The switch does not have to exist if the assignment
declaration is specified in a file. This does mean that the
application could me DoSed by having many fake switches
specified, however the benefit is that assignments can be
specified in a file and loaded on application start-up. Such
declarations result in switches being registered with the
policy manager before they connect to controller. Care must
then be taken to not send out flow table entries to the 'fake'
switch. This functionality does not exist when the declaration
is passed by the REST WSGI.
:param switch_id: Switch identifier, typically the datapath ID.
:param policy: Name of the policy to assign.
:param from_file: False if the declaration came from the WSGI,
True if it was specified in a file.
"""
if not self._pol_man.switch_exists(switch_id):
return ReturnStatus.SWITCH_NOT_EXISTS
if not self._pol_man.policy_exists(policy):
return ReturnStatus.POLICY_NOT_EXISTS
if not self._pol_man.switch_assign_policy(switch_id, policy):
return ReturnStatus.POLICY_ALREADY_ASSIGNED
if not from_file and self._pol_man.switch_is_connected(
switch_id):
# Do not send out the rules if the switch has not connected.
rule_ids = self._pol_man.policy_get_rules(policy)
rules = []
for r_id in rule_ids:
rule = self.acl_get_rule(r_id)
if rule.time_enforce == "N/A":
rules.append(rule)
self._flow_man.flow_deploy_multiple_rules(switch_id, rules)
return ReturnStatus.POLICY_ASSIGNED
def policy_revoke_switch(self, switch_id, policy):
"""Revoke a policy assignment from a switch.
:param switch_id: Switch identifier, typically the datapath ID.
:param policy: Policy to revoke.
"""
if not self._pol_man.switch_exists(switch_id):
return ReturnStatus.SWITCH_NOT_EXISTS
if not self._pol_man.policy_exists(policy):
return ReturnStatus.POLICY_NOT_EXISTS
if not self._pol_man.switch_revoke_policy(switch_id, policy):
return ReturnStatus.POLICY_NOT_ASSIGNED
if self._pol_man.switch_is_connected(switch_id):
# Do not send out removal messages to switches that have
# not connected.
rule_ids = self._pol_man.policy_get_rules(policy)
rules = []
for r_id in rule_ids:
rules.append(self.acl_get_rule(r_id))
self._flow_man.flow_remove_multiple_rules(switch_id, rules)
return ReturnStatus.POLICY_REVOKED
def policy_get_switches(self, policy):
"""Return the IDs of switches assigned to a policy domain.
:param policy: Policy domain name.
:return: A list of switch IDs.
"""
return self._pol_man.policy_get_switches(policy)
def policy_get_connected_switches(self, policy):
"""Return the IDs os connected switches assigned to a policy
domain.
Note the connected distinction.
:param policy: Policy domain name.
:return: A list of switch IDs.
"""
return self._pol_man.policy_get_connected_switches(policy)
def switch_register(self, switch_id):
"""Register a switch with the policy manager.
:param switch_id: Switch identifier, typically the datapath ID.
:return: A return status.
"""
if self._pol_man.switch_register(switch_id):
return ReturnStatus.SWITCH_REGISTERED
else:
return ReturnStatus.SWITCH_EXISTS
def switch_connect(self, switch_id):
"""Inform the policy manager that a switch has connected.
:param switch_id: Switch identifier, typically the datapath ID.
:return: A return status.
"""
if self._pol_man.switch_connect(switch_id):
rules = []
for policy in self._pol_man.switch_get_policies(switch_id):
rule_ids = self._pol_man.policy_get_rules(policy)
for r_id in rule_ids:
rule = self.acl_get_rule(r_id)
if rule.time_enforce == "N/A":
rules.append(rule)
self._flow_man.flow_deploy_multiple_rules(switch_id, rules)
return ReturnStatus.SWITCH_CONNECTED
else:
return ReturnStatus.SWITCH_NOT_REGISTERED
def get_aclswitch_info(self):
"""Fetch and return a dict containing a summary of the state
of ACLSwitch.
:return: A dict containing some summary information.
"""
return {"num_rules": self._acl_man.get_num_rules(),
"num_policies": self._pol_man.get_num_policies(),
"num_switches": self._pol_man.get_num_switches(),
"version": self._aclsw_version}
def get_all_policies(self):
"""Fetch and return a dict of policies and the rules that are
associated with them.
:return: A dict of policies to a list of rule IDs.
"""
return {"policies": self._pol_man.get_all_policies()}
def get_all_rules(self):
"""Fetch and return a dict of ACL rule IDs to their respective
ACL rules.
:return: A dict containing all ACL rules.
"""
return {"acl": self._acl_man.get_all_rules()}
def get_all_switches(self):
"""Fetch and return a dict of the IDs of connected switches
and the policies assigned to them.
:return: A dict of switch IDs to a list of policies.
"""
return {"switches": self._pol_man.get_all_switches()}
def get_time_queue(self):
"""Fetch and return the time enforced ACL rule queue.
:return: The time queue as a list of lists.
"""
return {"time_queue": self._flow_sch.get_time_queue()}
class ReturnStatus:
"""Enums for function return statuses.
"""
POLICY_EXISTS = 10
POLICY_NOT_EXISTS = 11
POLICY_CREATED = 12
POLICY_REMOVED = 13
POLICY_NOT_EMPTY = 14
POLICY_ASSIGNED = 15
POLICY_NOT_ASSIGNED = 16
POLICY_ALREADY_ASSIGNED = 17
POLICY_REVOKED = 18
RULE_EXISTS = 20
RULE_NOT_EXISTS = 21
RULE_CREATED = 22
RULE_REMOVED = 23
RULE_SYNTAX_INVALID = 24
SWITCH_EXISTS = 30
SWITCH_NOT_EXISTS = 31
SWITCH_REGISTERED = 32
SWITCH_NOT_REGISTERED = 33
SWITCH_CONNECTED = 34
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
This class is deprecated. Please use
${tf.train.MonitoredTrainingSession} instead.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
@{tf.train.Server.create_local_server} for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific host, and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess, ))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
@deprecation.deprecated(None,
"Please switch to tf.train.MonitoredTrainingSession")
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`,
the model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all global variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`Supervisor`s are not supported when eager execution is enabled.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Supervisors are compatible with eager execution.")
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = _summary.FileWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
logging.info("Starting standard services.")
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info("Starting queue runners.")
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Queues are not compatible with eager execution. To ingest data when eager
execution is enabled, use the `tf.data` API.
@end_compatibility
"""
if context.in_eager_mode():
raise RuntimeError("Queues are not compatible with eager execution.")
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self,
threads=None,
close_summary_writer=True,
ignore_live_threads=False):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
ignore_live_threads: If `True` ignores threads that remain running after
a grace period when joining threads via the coordinator, instead of
raising a RuntimeError.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(
threads,
stop_grace_period_secs=self._stop_grace_secs,
ignore_live_threads=ignore_live_threads)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
logging.info("Recording summary at step %s.", global_step)
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess, step_counter=None):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
step_counter: A `Tensor` holding the step counter. By defaults, it uses
sv.global_step.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
step_counter = sv.global_step if step_counter is None else step_counter
self._step_counter = step_counter
self._summary_tag = "%s/sec" % self._step_counter.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._step_counter)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._step_counter)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
if elapsed_time > 0.:
steps_per_sec = added_steps / elapsed_time
else:
steps_per_sec = float("inf")
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
logging.info("Saving checkpoint to path %s", self._sv.save_path)
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import transition_route_group
from google.cloud.dialogflowcx_v3beta1.types import (
transition_route_group as gcdc_transition_route_group,
)
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class TransitionRouteGroupsTransport(abc.ABC):
"""Abstract transport class for TransitionRouteGroups."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_transition_route_groups: gapic_v1.method.wrap_method(
self.list_transition_route_groups,
default_timeout=None,
client_info=client_info,
),
self.get_transition_route_group: gapic_v1.method.wrap_method(
self.get_transition_route_group,
default_timeout=None,
client_info=client_info,
),
self.create_transition_route_group: gapic_v1.method.wrap_method(
self.create_transition_route_group,
default_timeout=None,
client_info=client_info,
),
self.update_transition_route_group: gapic_v1.method.wrap_method(
self.update_transition_route_group,
default_timeout=None,
client_info=client_info,
),
self.delete_transition_route_group: gapic_v1.method.wrap_method(
self.delete_transition_route_group,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_transition_route_groups(
self,
) -> Callable[
[transition_route_group.ListTransitionRouteGroupsRequest],
Union[
transition_route_group.ListTransitionRouteGroupsResponse,
Awaitable[transition_route_group.ListTransitionRouteGroupsResponse],
],
]:
raise NotImplementedError()
@property
def get_transition_route_group(
self,
) -> Callable[
[transition_route_group.GetTransitionRouteGroupRequest],
Union[
transition_route_group.TransitionRouteGroup,
Awaitable[transition_route_group.TransitionRouteGroup],
],
]:
raise NotImplementedError()
@property
def create_transition_route_group(
self,
) -> Callable[
[gcdc_transition_route_group.CreateTransitionRouteGroupRequest],
Union[
gcdc_transition_route_group.TransitionRouteGroup,
Awaitable[gcdc_transition_route_group.TransitionRouteGroup],
],
]:
raise NotImplementedError()
@property
def update_transition_route_group(
self,
) -> Callable[
[gcdc_transition_route_group.UpdateTransitionRouteGroupRequest],
Union[
gcdc_transition_route_group.TransitionRouteGroup,
Awaitable[gcdc_transition_route_group.TransitionRouteGroup],
],
]:
raise NotImplementedError()
@property
def delete_transition_route_group(
self,
) -> Callable[
[transition_route_group.DeleteTransitionRouteGroupRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("TransitionRouteGroupsTransport",)
|
|
# Copyright 2012 NEC Corporation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import logging
import uuid
import fixtures
from oslo_serialization import jsonutils
from requests_mock.contrib import fixture as mock_fixture
import testtools
from keystoneclient.auth.identity import v2 as ks_v2_auth
from keystoneclient.auth.identity import v3 as ks_v3_auth
from keystoneclient import exceptions as ks_exceptions
from keystoneclient import fixture as ks_fixture
from keystoneclient import session
from neutronclient import client
from neutronclient.common import exceptions
USERNAME = 'testuser'
USER_ID = 'testuser_id'
TENANT_NAME = 'testtenant'
TENANT_ID = 'testtenant_id'
PASSWORD = 'password'
ENDPOINT_URL = 'http://localurl'
PUBLIC_ENDPOINT_URL = '%s/public' % ENDPOINT_URL
ADMIN_ENDPOINT_URL = '%s/admin' % ENDPOINT_URL
INTERNAL_ENDPOINT_URL = '%s/internal' % ENDPOINT_URL
ENDPOINT_OVERRIDE = 'http://otherurl'
TOKENID = uuid.uuid4().hex
REGION = 'RegionOne'
NOAUTH = 'noauth'
KS_TOKEN_RESULT = ks_fixture.V2Token()
KS_TOKEN_RESULT.set_scope()
_s = KS_TOKEN_RESULT.add_service('network', 'Neutron Service')
_s.add_endpoint(ENDPOINT_URL, region=REGION)
ENDPOINTS_RESULT = {
'endpoints': [{
'type': 'network',
'name': 'Neutron Service',
'region': REGION,
'adminURL': ENDPOINT_URL,
'internalURL': ENDPOINT_URL,
'publicURL': ENDPOINT_URL
}]
}
BASE_URL = "http://keystone.example.com:5000/"
V2_URL = "%sv2.0" % BASE_URL
V3_URL = "%sv3" % BASE_URL
_v2 = ks_fixture.V2Discovery(V2_URL)
_v3 = ks_fixture.V3Discovery(V3_URL)
V3_VERSION_LIST = jsonutils.dumps({'versions': {'values': [_v2, _v3]}})
V2_VERSION_ENTRY = {'version': _v2}
V3_VERSION_ENTRY = {'version': _v3}
def setup_keystone_v2(mrequests):
v2_token = ks_fixture.V2Token(token_id=TOKENID)
service = v2_token.add_service('network')
service.add_endpoint(PUBLIC_ENDPOINT_URL, region=REGION)
mrequests.register_uri('POST',
'%s/tokens' % (V2_URL),
json=v2_token)
auth_session = session.Session()
auth_plugin = ks_v2_auth.Password(V2_URL, 'xx', 'xx')
return auth_session, auth_plugin
def setup_keystone_v3(mrequests):
mrequests.register_uri('GET',
V3_URL,
json=V3_VERSION_ENTRY)
v3_token = ks_fixture.V3Token()
service = v3_token.add_service('network')
service.add_standard_endpoints(public=PUBLIC_ENDPOINT_URL,
admin=ADMIN_ENDPOINT_URL,
internal=INTERNAL_ENDPOINT_URL,
region=REGION)
mrequests.register_uri('POST',
'%s/auth/tokens' % (V3_URL),
text=json.dumps(v3_token),
headers={'X-Subject-Token': TOKENID})
auth_session = session.Session()
auth_plugin = ks_v3_auth.Password(V3_URL,
username='xx',
user_id='xx',
user_domain_name='xx',
user_domain_id='xx')
return auth_session, auth_plugin
AUTH_URL = V2_URL
class CLITestAuthNoAuth(testtools.TestCase):
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthNoAuth, self).setUp()
self.requests = self.useFixture(mock_fixture.Fixture())
self.client = client.HTTPClient(username=USERNAME,
tenant_name=TENANT_NAME,
password=PASSWORD,
endpoint_url=ENDPOINT_URL,
auth_strategy=NOAUTH,
region_name=REGION)
def test_get_noauth(self):
url = ENDPOINT_URL + '/resource'
self.requests.get(ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertEqual(url, self.requests.last_request.url)
self.assertEqual(self.client.endpoint_url, ENDPOINT_URL)
class CLITestAuthKeystone(testtools.TestCase):
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystone, self).setUp()
for var in ('http_proxy', 'HTTP_PROXY'):
self.useFixture(fixtures.EnvironmentVariableFixture(var))
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
self.requests = self.useFixture(mock_fixture.Fixture())
self.client = client.construct_http_client(
username=USERNAME,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
def test_reused_token_get_auth_info(self):
"""Test that Client.get_auth_info() works even if client was
instantiated with predefined token.
"""
token_id = uuid.uuid4().hex
client_ = client.HTTPClient(username=USERNAME,
tenant_name=TENANT_NAME,
token=token_id,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
expected = {'auth_token': token_id,
'auth_tenant_id': None,
'auth_user_id': None,
'endpoint_url': self.client.endpoint_url}
self.assertEqual(client_.get_auth_info(), expected)
def test_get_token(self):
auth_session, auth_plugin = setup_keystone_v2(self.requests)
self.client = client.construct_http_client(
username=USERNAME,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION,
session=auth_session,
auth=auth_plugin)
m = self.requests.get(PUBLIC_ENDPOINT_URL + '/resource',
request_headers={'X-Auth-Token': TOKENID})
self.client.do_request('/resource', 'GET')
self.assertTrue(m.called)
def test_refresh_token(self):
token_id = uuid.uuid4().hex
text = uuid.uuid4().hex
self.client.auth_token = token_id
self.client.endpoint_url = ENDPOINT_URL
res_url = ENDPOINT_URL + '/resource'
v2_url = AUTH_URL + '/tokens'
# token_id gives 401, KS_TOKEN_RESULT gives 200
self.requests.get(res_url,
request_headers={'X-Auth-Token': token_id},
status_code=401)
self.requests.get(
res_url,
text=text,
status_code=200,
request_headers={'X-Auth-Token': KS_TOKEN_RESULT.token_id})
self.requests.post(v2_url, json=KS_TOKEN_RESULT)
resp = self.client.do_request('/resource', 'GET')
self.assertEqual(text, resp[1])
self.assertEqual(3, len(self.requests.request_history))
self.assertEqual(res_url, self.requests.request_history[0].url)
self.assertEqual(v2_url, self.requests.request_history[1].url)
self.assertEqual(res_url, self.requests.request_history[2].url)
def test_refresh_token_no_auth_url(self):
self.client.auth_url = None
token_id = uuid.uuid4().hex
self.client.auth_token = token_id
self.client.endpoint_url = ENDPOINT_URL
self.requests.get(ENDPOINT_URL + '/resource', status_code=401)
self.assertRaises(exceptions.NoAuthURLProvided,
self.client.do_request,
'/resource',
'GET')
def test_get_endpoint_url_with_invalid_auth_url(self):
# Handle the case when auth_url is not provided
self.client.auth_url = None
self.assertRaises(exceptions.NoAuthURLProvided,
self.client._get_endpoint_url)
def test_get_endpoint_url(self):
token_id = uuid.uuid4().hex
self.client.auth_token = token_id
self.requests.get(AUTH_URL + '/tokens/%s/endpoints' % token_id,
json=ENDPOINTS_RESULT)
self.requests.get(ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertEqual(token_id,
self.requests.last_request.headers['X-Auth-Token'])
def test_use_given_endpoint_url(self):
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION,
endpoint_url=ENDPOINT_OVERRIDE)
self.assertEqual(self.client.endpoint_url, ENDPOINT_OVERRIDE)
token_id = uuid.uuid4().hex
self.client.auth_token = token_id
self.requests.get(ENDPOINT_OVERRIDE + '/resource')
self.client.do_request('/resource', 'GET')
self.assertEqual(self.client.endpoint_url, ENDPOINT_OVERRIDE)
self.assertEqual(token_id,
self.requests.last_request.headers['X-Auth-Token'])
def test_get_endpoint_url_other(self):
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='otherURL')
token_id = uuid.uuid4().hex
self.client.auth_token = token_id
self.requests.get(AUTH_URL + '/tokens/%s/endpoints' % token_id,
json=ENDPOINTS_RESULT)
self.assertRaises(exceptions.EndpointTypeNotFound,
self.client.do_request,
'/resource',
'GET')
def test_get_endpoint_url_failed(self):
token_id = uuid.uuid4().hex
self.client.auth_token = token_id
self.requests.get(AUTH_URL + '/tokens/%s/endpoints' % token_id,
status_code=401)
self.requests.post(AUTH_URL + '/tokens', json=KS_TOKEN_RESULT)
m = self.requests.get(ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertEqual(KS_TOKEN_RESULT.token_id,
m.last_request.headers['X-Auth-Token'])
def test_endpoint_type(self):
auth_session, auth_plugin = setup_keystone_v3(self.requests)
# Test default behavior is to choose public.
self.client = client.construct_http_client(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION,
session=auth_session, auth=auth_plugin)
self.assertEqual(self.client.endpoint_url, PUBLIC_ENDPOINT_URL)
# Test admin url
self.client = client.construct_http_client(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='adminURL',
session=auth_session, auth=auth_plugin)
self.assertEqual(self.client.endpoint_url, ADMIN_ENDPOINT_URL)
# Test public url
self.client = client.construct_http_client(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='publicURL',
session=auth_session, auth=auth_plugin)
self.assertEqual(self.client.endpoint_url, PUBLIC_ENDPOINT_URL)
# Test internal url
self.client = client.construct_http_client(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='internalURL',
session=auth_session, auth=auth_plugin)
self.assertEqual(self.client.endpoint_url, INTERNAL_ENDPOINT_URL)
# Test url that isn't found in the service catalog
self.client = client.construct_http_client(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='privateURL',
session=auth_session, auth=auth_plugin)
self.assertRaises(
ks_exceptions.EndpointNotFound,
getattr, self.client, 'endpoint_url')
def test_strip_credentials_from_log(self):
m = self.requests.post(AUTH_URL + '/tokens', json=KS_TOKEN_RESULT)
self.requests.get(ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertIn('REDACTED', self.logger.output)
self.assertNotIn(self.client.password, self.logger.output)
self.assertNotIn('REDACTED', m.last_request.body)
self.assertIn(self.client.password, m.last_request.body)
class CLITestAuthKeystoneWithId(CLITestAuthKeystone):
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystoneWithId, self).setUp()
self.client = client.HTTPClient(user_id=USER_ID,
tenant_id=TENANT_ID,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
class CLITestAuthKeystoneWithIdandName(CLITestAuthKeystone):
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystoneWithIdandName, self).setUp()
self.client = client.HTTPClient(username=USERNAME,
user_id=USER_ID,
tenant_id=TENANT_ID,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
class TestKeystoneClientVersions(testtools.TestCase):
def setUp(self):
"""Prepare the test environment."""
super(TestKeystoneClientVersions, self).setUp()
self.requests = self.useFixture(mock_fixture.Fixture())
def test_v2_auth(self):
auth_session, auth_plugin = setup_keystone_v2(self.requests)
self.client = client.construct_http_client(
username=USERNAME,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION,
session=auth_session,
auth=auth_plugin)
m = self.requests.get(PUBLIC_ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertTrue(m.called)
def test_v3_auth(self):
auth_session, auth_plugin = setup_keystone_v3(self.requests)
self.client = client.construct_http_client(
user_id=USER_ID,
tenant_id=TENANT_ID,
password=PASSWORD,
auth_url=V3_URL,
region_name=REGION,
session=auth_session,
auth=auth_plugin)
m = self.requests.get(PUBLIC_ENDPOINT_URL + '/resource')
self.client.do_request('/resource', 'GET')
self.assertTrue(m.called)
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
# Universal confraternization
conf_universal = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(conf_universal)
# Sao Paulo city birthday
aniversario_sao_paulo = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aniversario_sao_paulo)
# Carnival Monday
carnaval_segunda = rrule.rrule(
rrule.MONTHLY,
byeaster=-48,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_segunda)
# Carnival Tuesday
carnaval_terca = rrule.rrule(
rrule.MONTHLY,
byeaster=-47,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_terca)
# Passion of the Christ
sexta_paixao = rrule.rrule(
rrule.MONTHLY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(sexta_paixao)
# Corpus Christi
corpus_christi = rrule.rrule(
rrule.MONTHLY,
byeaster=60,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(corpus_christi)
tiradentes = rrule.rrule(
rrule.MONTHLY,
bymonth=4,
bymonthday=21,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(tiradentes)
# Labor day
dia_trabalho = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(dia_trabalho)
# Constitutionalist Revolution
constitucionalista = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=9,
cache=True,
dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(constitucionalista)
# Independency day
independencia = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
bymonthday=7,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(independencia)
# Our Lady of Aparecida
aparecida = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=12,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aparecida)
# All Souls' day
finados = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(finados)
# Proclamation of the Republic
proclamacao_republica = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=15,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(proclamacao_republica)
# Day of Black Awareness
consciencia_negra = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=20,
cache=True,
dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(consciencia_negra)
# Christmas Eve
vespera_natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(vespera_natal)
# Christmas
natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(natal)
# New Year Eve
ano_novo = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=31,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo)
# New Year Eve on saturday
ano_novo_sab = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=30,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo_sab)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Ash Wednesday
quarta_cinzas = rrule.rrule(
rrule.MONTHLY,
byeaster=-46,
cache=True,
dtstart=start,
until=end
)
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
early_close_rules = []
early_close_rules.append(quarta_cinzas)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_closes(trading_days, early_closes):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
for day in trading_days:
# only "early close" event in Bovespa actually is a late start
# as the market only opens at 1pm
open_hour = 13 if day in quarta_cinzas else 10
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=open_hour,
minute=00),
tz='America/Sao_Paulo').tz_convert('UTC')
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=16),
tz='America/Sao_Paulo').tz_convert('UTC')
open_and_closes.loc[day, 'market_open'] = market_open
open_and_closes.loc[day, 'market_close'] = market_close
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes)
|
|
#!/usr/bin/python
import unittest
import math
import random
import rarfile
import os, sys
import subprocess
try:
subprocess.Popen("rar", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell = True)
except:
print >> sys.stderr, "You do not have the 'rar' binary, please install!"
sys.exit(1)
class PyarrCheck(unittest.TestCase):
def setUp(self):
self.scriptdir = os.path.realpath(os.path.dirname(sys.argv[0]))
_, self.scriptname = os.path.split(sys.argv[0])
self.scriptpath = os.path.normpath(os.path.join(self.scriptdir, self.scriptname))
pathname, scriptname = os.path.split(sys.argv[0])
self.testdir = os.path.join(self.scriptdir, 'rartest')
self.rarmntdir = os.path.join(self.testdir, 'rarmnt')
self.testfiledir = os.path.join(self.testdir, 'testfiles')
self.testarchivedir = os.path.join(self.testdir, 'testarchives')
self.pyarrpath = self.scriptdir + '/../pyarrfs.py'
self.mkdir(self.testdir)
self.mkdir(self.rarmntdir)
self.mkdir(self.testfiledir)
self.mkdir(self.testarchivedir)
# make sure mount dir is empty
os.system('fusermount -q -z -u ' + self.rarmntdir)
try:
os.system(self.pyarrpath + ' ' + self.rarmntdir)
except:
pass
filedata = [
[ 'test1',
'this is testfile1 bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla bla\n' ],
[ 'test2',
'crap crap crap crap\n' ]
]
filedata.append(['test3', self.generate_content(200000)])
self.files = []
for entry in filedata:
self.files.append(entry[0])
self.create_test_files(filedata)
self.uncompressed_rar_archive = 'testarchive1.rar'
self.create_uncompressed_rar_archive('testarchive1.rar', self.files)
def mkdir(self, path):
if not os.path.exists(path):
os.mkdir(path)
self.assertTrue(os.path.exists(path))
def create_test_files(self, filedata):
for entry in filedata:
filename = entry[0]
filedata = entry[1]
f = open(os.path.join(self.testfiledir, filename), 'w')
f.write(filedata)
f.close()
def create_uncompressed_rar_archive(self, rarfile, files):
os.chdir(self.testarchivedir)
for file in files:
filepath = os.path.join(self.testfiledir, file)
cmd = 'rar a -inul -ep -m0 ' + os.path.join(self.testarchivedir, rarfile) + ' ' + filepath
os.system(cmd)
def tearDown(self):
os.chdir(self.scriptdir)
os.system('fusermount -q -z -u ' + self.rarmntdir)
import shutil
# shutil.rmtree(self.testdir)
def generate_content(self, size = 0, population = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'):
result = ''
for i in xrange(0, size):
result += str(random.choice(population))
return result
def generate_content_code(self, size = 0):
# round off size to nearest number divisible by 10
ns = size - ((size + 10) % 10)
result = ''
for i in xrange(0, size):
result += "$%09d" % (i)
return result
def test_read_sequential(self):
"""Read the entire file sequentially from start
"""
for file in self.files:
rar_file = os.path.normpath(os.path.join(self.rarmntdir, '.' + self.testarchivedir, self.uncompressed_rar_archive, file))
raw_file = os.path.normpath(os.path.join(self.testfiledir, file))
rawf = open(raw_file, 'r')
rarf = open(rar_file, 'r')
self.assertEqual(rarf.read(), rawf.read(), 'mismatch in sequential read')
rarf.close()
rawf.close()
def test_seek_whence0_1(self):
"""Single seek from start (whence = 0) to random location in file
"""
for file in self.files:
rar_file = os.path.normpath(os.path.join(self.rarmntdir, '.' + self.testarchivedir, self.uncompressed_rar_archive, file))
raw_file = os.path.normpath(os.path.join(self.testfiledir, file))
file_size = os.path.getsize(raw_file)
rarf = open(rar_file, 'r')
rawf = open(raw_file, 'r')
for i in xrange(0, 10000):
byte = random.randrange(0, file_size)
rawf.seek(byte)
rarf.seek(byte)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 0) test 1')
rarf.close()
rawf.close()
def test_seek_whence0_2(self):
"""Two reads, first from start (whence = 0) to random byte in first half of file and second seek() from start (whence = 0) to second half of file
"""
for file in self.files:
rar_file = os.path.normpath(os.path.join(self.rarmntdir, '.' + self.testarchivedir, self.uncompressed_rar_archive, file))
raw_file = os.path.normpath(os.path.join(self.testfiledir, file))
file_size = os.path.getsize(raw_file)
rarf = open(rar_file, 'r')
rawf = open(raw_file, 'r')
for i in xrange(0, 10000):
byte = random.randrange(0, math.floor(file_size/2))
rawf.seek(byte)
rarf.seek(byte)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 0) test 2')
byte = random.randrange(math.floor(file_size/2), file_size)
rawf.seek(byte)
rarf.seek(byte)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 0) test 2')
rarf.close()
rawf.close()
def test_seek_whence0_3(self):
"""Two reads, first from start (whence = 0) to random byte in second half of file and second seek() from start (whence = 0) to first half of file
"""
for file in self.files:
rar_file = os.path.normpath(os.path.join(self.rarmntdir, '.' + self.testarchivedir, self.uncompressed_rar_archive, file))
raw_file = os.path.normpath(os.path.join(self.testfiledir, file))
file_size = os.path.getsize(raw_file)
rarf = open(rar_file, 'r')
rawf = open(raw_file, 'r')
for i in xrange(0, 10000):
byte = random.randrange(math.floor(file_size/2), file_size)
rawf.seek(byte)
rarf.seek(byte)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 0) test 3')
byte = random.randrange(0, math.floor(file_size/2))
rawf.seek(byte)
rarf.seek(byte)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 0) test 3')
rarf.close()
rawf.close()
def test_seek_whence2_1(self):
"""Single seek from end (whence = 2) to random location in file
"""
for file in self.files:
rar_file = os.path.normpath(os.path.join(self.rarmntdir, '.' + self.testarchivedir, self.uncompressed_rar_archive, file))
raw_file = os.path.normpath(os.path.join(self.testfiledir, file))
file_size = os.path.getsize(raw_file)
rarf = open(rar_file, 'r')
rawf = open(raw_file, 'r')
for i in xrange(0, 10000):
byte = random.randrange(0, file_size)
rawf.seek(-byte, 2)
rarf.seek(-byte, 2)
self.assertEqual(rarf.read(1), rawf.read(1), 'mismatch in seek (whence = 2) test 1')
rarf.close()
rawf.close()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import fake_stat
from docker import constants
CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_raw_version():
status_code = 200
response = {
"ApiVersion": "1.18",
"GitCommit": "fake-commit",
"GoVersion": "go1.3.3",
"Version": "1.5.0"
}
return status_code, response
def get_fake_version():
status_code = 200
response = {'GoVersion': '1', 'Version': '1.1.1',
'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
"State": {
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'id': FAKE_IMAGE_ID,
'parent': "27cf784147099545",
'created': "2013-03-23T22:24:18.818426-07:00",
'container': FAKE_CONTAINER_ID,
'container_config':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_port():
status_code = 200
response = {
'HostConfig': {
'Binds': None,
'ContainerIDFile': '',
'Links': None,
'LxcConf': None,
'PortBindings': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None
},
'Privileged': False,
'PublishAllPorts': False
},
'NetworkSettings': {
'Bridge': 'docker0',
'PortMapping': None,
'Ports': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None},
'MacAddress': '02:42:ac:11:00:0a'
}
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_events():
status_code = 200
response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
'from': FAKE_IMAGE_ID, 'time': 1423247867}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_exec_create():
status_code = 200
response = {'Id': FAKE_EXEC_ID}
return status_code, response
def post_fake_exec_start():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
return status_code, response
def post_fake_exec_resize():
status_code = 201
return status_code, ''
def get_fake_exec_inspect():
return 200, {
'OpenStderr': True,
'OpenStdout': True,
'Container': get_fake_inspect_container()[1],
'Running': False,
'ProcessConfig': {
'arguments': ['hello world'],
'tty': False,
'entrypoint': 'echo',
'privileged': False,
'user': ''
},
'ExitCode': 0,
'ID': FAKE_EXEC_ID,
'OpenStdin': False
}
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_pause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_unpause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_rename_container():
status_code = 204
return status_code, None
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_stats():
status_code = 200
response = fake_stat.OBJ
return status_code, response
def get_fake_volume_list():
status_code = 200
response = {
'Volumes': [
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/subterraneananimism'
}
]
}
return status_code, response
def get_fake_volume():
status_code = 200
response = {
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
}
return status_code, response
def fake_remove_volume():
return 204, None
# Maps real api url to fake response callback
prefix = 'http+docker://localunixsocket'
fake_responses = {
'{0}/version'.format(prefix):
get_fake_raw_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
get_fake_images,
'{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
get_fake_image_history,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_import_image,
'{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
get_fake_containers,
'{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_start_container,
'{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
post_fake_resize_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_container,
'{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
post_fake_rename_container,
'{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
post_fake_tag_image,
'{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
get_fake_wait,
'{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
get_fake_logs,
'{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
get_fake_diff,
'{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
get_fake_export,
'{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
post_fake_exec_create,
'{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
post_fake_exec_start,
'{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
get_fake_exec_inspect,
'{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
post_fake_exec_resize,
'{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
get_fake_stats,
'{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
post_fake_stop_container,
'{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
post_fake_kill_container,
'{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
post_fake_pause_container,
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
post_fake_unpause_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_port,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
delete_fake_remove_container,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_image_create,
'{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
delete_fake_remove_image,
'{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
get_fake_get_image,
'{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
post_fake_load_image,
'{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_image,
'{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
get_fake_insert_image,
'{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
post_fake_push,
'{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
post_fake_commit,
'{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
post_fake_create_container,
'{1}/{0}/build'.format(CURRENT_VERSION, prefix):
post_fake_build_container,
'{1}/{0}/events'.format(CURRENT_VERSION, prefix):
get_fake_events,
('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_volume_list,
('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'GET'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
}
|
|
'''
Created on Feb 20, 2013
@author: Maribel Acosta
@author: Fabian Floeck
'''
from wmf import dump
from difflib import Differ
from time import time
from structures.Revision import Revision
from structures.Paragraph import Paragraph
from structures.Sentence import Sentence
from structures.Word import Word
from structures import Text
from etc.Relation import Relation
from sys import argv,exit
import getopt
# Container of revisions.
revisions = {}
revision_order = []
# Hash tables.
paragraphs_ht = {}
sentences_ht = {}
spam = []
# SPAM detection variables.
CHANGE_PERCENTAGE = -0.40
PREVIOUS_LENGTH = 1000
CURR_LENGTH = 1000
FLAG = "move"
UNMATCHED_PARAGRAPH = 0.0
WORD_DENSITY = 10
WORD_LEN = 100
def analyseArticle(file_name):
# Container of relationships.
relations = {}
# Revisions to compare.
revision_curr = Revision()
revision_prev = Revision()
text_curr = None
# Access the file.
dumpIterator = dump.Iterator(file_name)
# Iterate over the pages.
for page in dumpIterator.readPages():
i = 0
# Iterate over revisions of the article.
for revision in page.readRevisions():
vandalism = False
#print "processing rev", revision.getId()
# Update the information about the previous revision.
revision_prev = revision_curr
if (revision.getSha1() == None):
revision.setSha1(Text.calculateHash(revision.getText().encode("utf-8")))
if (revision.getSha1() in spam):
vandalism = True
#TODO: SPAM detection: DELETION
if (revision.getComment()!= None and revision.getComment().find(FLAG) > 0):
pass
else:
if (revision_prev.length > PREVIOUS_LENGTH) and (len(revision.getText()) < CURR_LENGTH) and (((len(revision.getText())-revision_prev.length)/float(revision_prev.length)) <= CHANGE_PERCENTAGE):
vandalism = True
revision_curr = revision_prev
#if (vandalism):
#print "---------------------------- FLAG 1"
#print "SPAM", revision.getId()
#print revision.getText()
#print
if (not vandalism):
# Information about the current revision.
revision_curr = Revision()
revision_curr.id = i
revision_curr.wikipedia_id = int(revision.getId())
revision_curr.length = len(revision.getText())
revision_curr.timestamp = revision.getTimestamp()
# Relation of the current relation.
relation = Relation()
relation.revision = int(revision.getId())
relation.length = len(revision.getText())
# Some revisions don't have contributor.
if (revision.getContributor() != None):
revision_curr.contributor_id = revision.getContributor().getId()
revision_curr.contributor_name = revision.getContributor().getUsername().encode('utf-8')
relation.author = revision.getContributor().getUsername().encode('utf-8')
else:
revision_curr.contributor_id = 'Not Available ' + revision.getId()
revision_curr.contribur_name = 'Not Available ' + revision.getId()
relation.author = 'Not Available ' + revision.getId()
# Content within the revision.
text_curr = revision.getText().encode('utf-8')
text_curr = text_curr.lower()
revision_curr.content = text_curr
# Perform comparison.
vandalism = determineAuthorship(revision_curr, revision_prev, text_curr, relation)
if (not vandalism):
#print "NOT SPAM", revision.getId()
# Add the current revision with all the information.
revisions.update({revision_curr.wikipedia_id : revision_curr})
relations.update({revision_curr.wikipedia_id : relation})
revision_order.append((revision_curr.wikipedia_id, False))
# Update the fake revision id.
i = i+1
# Calculate the number of tokens in the revision.
total = 0
for p in revision_curr.ordered_paragraphs:
for paragraph_curr in revision_curr.paragraphs[p]:
for hash_sentence_curr in paragraph_curr.sentences.keys():
for sentence_curr in paragraph_curr.sentences[hash_sentence_curr]:
total = total + len(sentence_curr.words)
revision_curr.total_tokens = total
relation.total_tokens = total
else:
#print "---------------------------- FLAG 2"
#print "SPAM", revision.getId()
#print revision.getText()
#print
revision_order.append((revision_curr.wikipedia_id, True))
revision_curr = revision_prev
spam.append(revision.getSha1())
return (revisions, revision_order, relations)
def determineAuthorship(revision_curr, revision_prev, text_curr, relation):
# Containers for unmatched paragraphs and sentences in both revisions.
unmatched_sentences_curr = []
unmatched_sentences_prev = []
matched_sentences_prev = []
matched_words_prev = []
possible_vandalism = False
vandalism = False
# Analysis of the paragraphs in the current revision.
(unmatched_paragraphs_curr, unmatched_paragraphs_prev, matched_paragraphs_prev) = analyseParagraphsInRevision(revision_curr, revision_prev, text_curr, relation)
# Analysis of the sentences in the unmatched paragraphs of the current revision.
if (len(unmatched_paragraphs_curr)>0):
(unmatched_sentences_curr, unmatched_sentences_prev, matched_sentences_prev, _) = analyseSentencesInParagraphs(unmatched_paragraphs_curr, unmatched_paragraphs_prev, revision_curr, revision_prev, relation)
#TODO: SPAM detection
if (len(unmatched_paragraphs_curr)/float(len(revision_curr.ordered_paragraphs)) > UNMATCHED_PARAGRAPH):
possible_vandalism = True
# Analysis of words in unmatched sentences (diff of both texts).
if (len(unmatched_sentences_curr)>0):
(matched_words_prev, vandalism) = analyseWordsInSentences(unmatched_sentences_curr, unmatched_sentences_prev, revision_curr, possible_vandalism, relation)
if (len(unmatched_paragraphs_curr) == 0):
for paragraph in unmatched_paragraphs_prev:
for sentence_key in paragraph.sentences.keys():
for sentence in paragraph.sentences[sentence_key]:
if not(sentence.matched):
unmatched_sentences_prev.append(sentence)
# Add the information of 'deletion' to words
for unmatched_sentence in unmatched_sentences_prev:
#print "unmatched sentence", unmatched_sentence.value, revision_curr.wikipedia_id
for word_prev in unmatched_sentence.words:
if not(word_prev.matched):
for elem in word_prev.deleted:
if (elem != revision_curr.wikipedia_id) and (elem in revisions.keys()):
if (revisions[elem].contributor_id != revision_curr.contributor_id):
if (elem in relation.redeleted.keys()):
relation.redeleted.update({elem : relation.redeleted[elem] + 1})
else:
relation.redeleted.update({elem : 1})
else:
if (elem in relation.self_redeleted.keys()):
relation.self_redeleted.update({elem : relation.self_redeleted[elem] + 1})
else:
relation.self_redeleted.update({elem : 1})
# Revert: deleting something that somebody else reintroduced.
for elem in word_prev.freq:
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
if (elem != revision_curr.wikipedia_id) and (elem in revisions.keys()):
if (revisions[elem].contributor_id != revision_curr.contributor_id):
if (elem in relation.revert.keys()):
relation.revert.update({elem: relation.revert[elem] +1})
else:
relation.revert.update({elem: 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem: relation.self_revert[elem] +1})
else:
relation.self_revert.update({elem: 1})
#print "relation.revert", word_prev.value, word_prev.deleted, relation.revert, revision_curr.wikipedia_id
word_prev.deleted.append(revision_curr.wikipedia_id)
if (revisions[word_prev.revision].contributor_id != revision_curr.contributor_id):
if (word_prev.revision in relation.deleted.keys()):
relation.deleted.update({word_prev.revision : relation.deleted[word_prev.revision] + 1 })
else:
relation.deleted.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_deleted.keys()):
relation.self_deleted.update({word_prev.revision : relation.self_deleted[word_prev.revision] + 1 })
else:
relation.self_deleted.update({word_prev.revision : 1 })
# Reset matched structures from old revisions.
for matched_paragraph in matched_paragraphs_prev:
matched_paragraph.matched = False
for sentence_hash in matched_paragraph.sentences.keys():
for sentence in matched_paragraph.sentences[sentence_hash]:
sentence.matched = False
for word in sentence.words:
word.matched = False
for matched_sentence in matched_sentences_prev:
matched_sentence.matched = False
for word in matched_sentence.words:
word.matched = False
for matched_word in matched_words_prev:
matched_word.matched = False
if (not vandalism):
# Add the new paragraphs to hash table of paragraphs.
for unmatched_paragraph in unmatched_paragraphs_curr:
if (unmatched_paragraph.hash_value in paragraphs_ht.keys()):
paragraphs_ht[unmatched_paragraph.hash_value].append(unmatched_paragraph)
else:
paragraphs_ht.update({unmatched_paragraph.hash_value : [unmatched_paragraph]})
# Add the new sentences to hash table of sentences.
for unmatched_sentence in unmatched_sentences_curr:
if (unmatched_sentence.hash_value in sentences_ht.keys()):
sentences_ht[unmatched_sentence.hash_value].append(unmatched_sentence)
else:
sentences_ht.update({unmatched_sentence.hash_value : [unmatched_sentence]})
return vandalism
def analyseParagraphsInRevision(revision_curr, revision_prev, text_curr, relation):
# Containers for unmatched and matched paragraphs.
unmatched_paragraphs_curr = []
unmatched_paragraphs_prev = []
matched_paragraphs_prev = []
# Split the text of the current into paragraphs.
paragraphs = Text.splitIntoParagraphs(text_curr)
# Iterate over the paragraphs of the current version.
for paragraph in paragraphs:
# Build Paragraph structure and calculate hash value.
paragraph = paragraph.strip()
hash_curr = Text.calculateHash(paragraph)
matched_curr = False
# If the paragraph is in the previous revision,
# update the authorship information and mark both paragraphs as matched (also in HT).
if (hash_curr in revision_prev.ordered_paragraphs):
for paragraph_prev in revision_prev.paragraphs[hash_curr]:
if (not paragraph_prev.matched):
matched_curr = True
paragraph_prev.matched = True
matched_paragraphs_prev.append(paragraph_prev)
# TODO: added this (CHECK).
for hash_sentence_prev in paragraph_prev.sentences.keys():
for sentence_prev in paragraph_prev.sentences[hash_sentence_prev]:
sentence_prev.matched = True
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
# Add paragraph to current revision.
if (hash_curr in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_prev.hash_value].append(paragraph_prev)
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
else:
revision_curr.paragraphs.update({paragraph_prev.hash_value : [paragraph_prev]})
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
break
# If the paragraph is not in the previous revision, but it is in an older revision
# update the authorship information and mark both paragraphs as matched.
if ((not matched_curr) and (hash_curr in paragraphs_ht)):
for paragraph_prev in paragraphs_ht[hash_curr]:
if (not paragraph_prev.matched):
matched_curr = True
paragraph_prev.matched = True
matched_paragraphs_prev.append(paragraph_prev)
# TODO: added this (CHECK).
for hash_sentence_prev in paragraph_prev.sentences.keys():
for sentence_prev in paragraph_prev.sentences[hash_sentence_prev]:
sentence_prev.matched = True
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
if (revision_prev.wikipedia_id not in word_prev.used):
word_prev.freq.append(revision_curr.wikipedia_id)
# Revert: reintroducing something that somebody else deleted,
# (and was not used in the previous revision)
if (revision_prev.wikipedia_id not in word_prev.used):
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
for elem in word_prev.deleted:
if (elem in revisions.keys()):
if (revisions[elem].contributor_id != revision_curr.contributor_id):
if (elem in relation.revert.keys()):
relation.revert.update({elem : relation.revert[elem] + 1})
else:
relation.revert.update({elem : 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem : relation.self_revert[elem] + 1})
else:
relation.self_revert.update({elem : 1})
if (revision_prev.wikipedia_id not in word_prev.used):
if (elem in revisions.keys()):
if (revisions[word_prev.revision].contributor_id != revision_curr.contributor_id):
if (word_prev.revision in relation.reintroduced.keys()):
relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
else:
relation.reintroduced.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_reintroduced.keys()):
relation.self_reintroduced.update({word_prev.revision : relation.self_reintroduced[word_prev.revision] + 1})
else:
relation.self_reintroduced.update({word_prev.revision : 1})
# Add paragraph to current revision.
if (hash_curr in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_prev.hash_value].append(paragraph_prev)
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
else:
revision_curr.paragraphs.update({paragraph_prev.hash_value : [paragraph_prev]})
revision_curr.ordered_paragraphs.append(paragraph_prev.hash_value)
break
# If the paragraph did not match with previous revisions,
# add to container of unmatched paragraphs for further analysis.
if (not matched_curr):
paragraph_curr = Paragraph()
paragraph_curr.hash_value = Text.calculateHash(paragraph)
paragraph_curr.value = paragraph
revision_curr.ordered_paragraphs.append(paragraph_curr.hash_value)
if (paragraph_curr.hash_value in revision_curr.paragraphs.keys()):
revision_curr.paragraphs[paragraph_curr.hash_value].append(paragraph_curr)
else:
revision_curr.paragraphs.update({paragraph_curr.hash_value : [paragraph_curr]})
unmatched_paragraphs_curr.append(paragraph_curr)
# Identify unmatched paragraphs in previous revision for further analysis.
for paragraph_prev_hash in revision_prev.ordered_paragraphs:
for paragraph_prev in revision_prev.paragraphs[paragraph_prev_hash]:
if (not paragraph_prev.matched):
unmatched_paragraphs_prev.append(paragraph_prev)
return (unmatched_paragraphs_curr, unmatched_paragraphs_prev, matched_paragraphs_prev)
def analyseSentencesInParagraphs(unmatched_paragraphs_curr, unmatched_paragraphs_prev, revision_curr, revision_prev, relation):
# Containers for unmatched and matched sentences.
unmatched_sentences_curr = []
unmatched_sentences_prev = []
matched_sentences_prev = []
total_sentences = 0
# Iterate over the unmatched paragraphs of the current revision.
for paragraph_curr in unmatched_paragraphs_curr:
# Split the current paragraph into sentences.
sentences = Text.splitIntoSentences(paragraph_curr.value)
# Iterate over the sentences of the current paragraph
for sentence in sentences:
# Create the Sentence structure.
sentence = sentence.strip()
sentence = ' '.join(Text.splitIntoWords(sentence))
hash_curr = Text.calculateHash(sentence)
matched_curr = False
total_sentences = total_sentences + 1
# Iterate over the unmatched paragraphs from the previous revision.
for paragraph_prev in unmatched_paragraphs_prev:
if (hash_curr in paragraph_prev.sentences.keys()):
for sentence_prev in paragraph_prev.sentences[hash_curr]:
if (not sentence_prev.matched):
matched_one = False
matched_all = True
for word_prev in sentence_prev.words:
if (word_prev.matched):
matched_one = True
else:
matched_all = False
if not(matched_one):
sentence_prev.matched = True
matched_curr = True
matched_sentences_prev.append(sentence_prev)
# TODO: CHECK this
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
# Add the sentence information to the paragraph.
if (hash_curr in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[hash_curr].append(sentence_prev)
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
else:
paragraph_curr.sentences.update({sentence_prev.hash_value : [sentence_prev]})
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
break
elif (matched_all):
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
if (matched_curr):
break
# Iterate over the hash table of sentences from old revisions.
if ((not matched_curr) and (hash_curr in sentences_ht.keys())):
for sentence_prev in sentences_ht[hash_curr]:
if (not sentence_prev.matched):
matched_one = False
matched_all = True
for word_prev in sentence_prev.words:
if (word_prev.matched):
matched_one = True
else:
matched_all = False
if not(matched_one):
sentence_prev.matched = True
matched_curr = True
matched_sentences_prev.append(sentence_prev)
# TODO: CHECK this
for word_prev in sentence_prev.words:
word_prev.matched = True
word_prev.used.append(revision_curr.wikipedia_id)
if (revision_prev.wikipedia_id not in word_prev.used):
word_prev.freq.append(revision_curr.wikipedia_id)
# Revert: reintroducing something that somebody else deleted
if (revision_prev.wikipedia_id not in word_prev.used):
for elem in word_prev.deleted:
#if (revision_curr.wikipedia_id == 11):
# print "Revert in 11", word_prev.value, word_prev.deleted, relation.revert
if (elem in revisions.keys()):
if (revisions[elem].contributor_id != revision_curr.contributor_id):
if (elem in relation.revert.keys()):
relation.revert.update({elem : relation.revert[elem] + 1})
else:
relation.revert.update({elem : 1})
else:
if (elem in relation.self_revert.keys()):
relation.self_revert.update({elem : relation.self_revert[elem] + 1})
else:
relation.self_revert.update({elem : 1})
#print "relation.revert", word_prev.value, word_prev.deleted, relation.revert, revision_curr.wikipedia_id
if (revision_prev.wikipedia_id not in word_prev.used):
if (word_prev.revision in revisions.keys()):
if (revisions[word_prev.revision].contributor_id != revision_curr.contributor_id):
if (word_prev.revision in relation.reintroduced.keys()):
relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
else:
relation.reintroduced.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_reintroduced.keys()):
relation.self_reintroduced.update({word_prev.revision : relation.self_reintroduced[word_prev.revision] + 1})
else:
relation.self_reintroduced.update({word_prev.revision : 1})
# Add the sentence information to the paragraph.
if (hash_curr in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[hash_curr].append(sentence_prev)
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
else:
paragraph_curr.sentences.update({sentence_prev.hash_value : [sentence_prev]})
paragraph_curr.ordered_sentences.append(sentence_prev.hash_value)
break
elif (matched_all):
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
# If the sentence did not match, then include in the container of unmatched sentences for further analysis.
if (not matched_curr):
sentence_curr = Sentence()
sentence_curr.value = sentence
sentence_curr.hash_value = hash_curr
paragraph_curr.ordered_sentences.append(sentence_curr.hash_value)
if (sentence_curr.hash_value in paragraph_curr.sentences.keys()):
paragraph_curr.sentences[sentence_curr.hash_value].append(sentence_curr)
else:
paragraph_curr.sentences.update({sentence_curr.hash_value : [sentence_curr]})
unmatched_sentences_curr.append(sentence_curr)
# Identify the unmatched sentences in the previous paragraph revision.
for paragraph_prev in unmatched_paragraphs_prev:
for sentence_prev_hash in paragraph_prev.ordered_sentences:
for sentence_prev in paragraph_prev.sentences[sentence_prev_hash]:
if (not sentence_prev.matched):
unmatched_sentences_prev.append(sentence_prev)
sentence_prev.matched = True
matched_sentences_prev.append(sentence_prev)
return (unmatched_sentences_curr, unmatched_sentences_prev, matched_sentences_prev, total_sentences)
def analyseWordsInSentences(unmatched_sentences_curr, unmatched_sentences_prev, revision_curr, possible_vandalism, relation):
matched_words_prev = []
unmatched_words_prev = []
# Split sentences into words.
text_prev = []
for sentence_prev in unmatched_sentences_prev:
for word_prev in sentence_prev.words:
if (not word_prev.matched):
text_prev.append(word_prev.value)
unmatched_words_prev.append(word_prev)
text_curr = []
for sentence_curr in unmatched_sentences_curr:
splitted = Text.splitIntoWords(sentence_curr.value)
text_curr.extend(splitted)
sentence_curr.splitted.extend(splitted)
# Edit consists of removing sentences, not adding new content.
if (len(text_curr) == 0):
return (matched_words_prev, False)
# SPAM detection.
if (possible_vandalism):
density = Text.computeAvgWordFreq(text_curr, revision_curr.wikipedia_id)
if (density > WORD_DENSITY):
return (matched_words_prev, possible_vandalism)
else:
possible_vandalism = False
if (len(text_prev) == 0):
for sentence_curr in unmatched_sentences_curr:
for word in sentence_curr.splitted:
word_curr = Word()
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.value = word
sentence_curr.words.append(word_curr)
word_curr.used.append(revision_curr.wikipedia_id)
relation.added = relation.added + 1
return (matched_words_prev, possible_vandalism)
d = Differ()
diff = list(d.compare(text_prev, text_curr))
for sentence_curr in unmatched_sentences_curr:
for word in sentence_curr.splitted:
curr_matched = False
pos = 0
while (pos < len(diff)):
word_diff = diff[pos]
if (word == word_diff[2:]):
if (word_diff[0] == ' '):
for word_prev in unmatched_words_prev:
if ((not word_prev.matched) and (word_prev.value == word)):
word_prev.used.append(revision_curr.wikipedia_id)
word_prev.matched = True
curr_matched = True
sentence_curr.words.append(word_prev)
matched_words_prev.append(word_prev)
diff[pos] = ''
pos = len(diff)+1
#if (word_prev.revision in relation.reintroduced.keys()):
# relation.reintroduced.update({word_prev.revision : relation.reintroduced[word_prev.revision] + 1 })
#else:
# relation.reintroduced.update({word_prev.revision : 1 })
break
elif (word_diff[0] == '-'):
for word_prev in unmatched_words_prev:
if ((not word_prev.matched) and (word_prev.value == word)):
word_prev.matched = True
matched_words_prev.append(word_prev)
diff[pos] = ''
word_prev.deleted.append(revision_curr.wikipedia_id)
if (revisions[word_prev.revision].contributor_id != revision_curr.contributor_id):
if (word_prev.revision in relation.deleted.keys()):
relation.deleted.update({word_prev.revision : relation.deleted[word_prev.revision] + 1 })
else:
relation.deleted.update({word_prev.revision : 1 })
else:
if (word_prev.revision in relation.self_deleted.keys()):
relation.self_deleted.update({word_prev.revision : relation.self_deleted[word_prev.revision] + 1 })
else:
relation.self_deleted.update({word_prev.revision : 1 })
break
elif (word_diff[0] == '+'):
curr_matched = True
word_curr = Word()
word_curr.value = word
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.used.append(revision_curr.wikipedia_id)
sentence_curr.words.append(word_curr)
relation.added = relation.added + 1
diff[pos] = ''
pos = len(diff)+1
pos = pos + 1
if not(curr_matched):
word_curr = Word()
word_curr.value = word
word_curr.author_id = revision_curr.contributor_id
word_curr.author_name = revision_curr.contributor_name
word_curr.revision = revision_curr.wikipedia_id
word_curr.used.append(revision_curr.wikipedia_id)
sentence_curr.words.append(word_curr)
relation.added = relation.added + 1
return (matched_words_prev, possible_vandalism)
def printAllRevisions(order, revisions):
for (revision, vandalism) in order:
if not(vandalism):
printRevision(revisions[revision])
def printRevision(revision):
print "Printing authorhship for revision: ", revision.wikipedia_id
text = []
authors = []
for hash_paragraph in revision.ordered_paragraphs:
#print hash_paragraph
#text = ''
para = revision.paragraphs[hash_paragraph]
paragraph = para[-1]
#print paragraph.value
#print len(paragraph.sentences)
for hash_sentence in paragraph.ordered_sentences:
#print hash_sentence
sentence = paragraph.sentences[hash_sentence][-1]
#print sentence.words
for word in sentence.words:
#print word
#text = text + ' ' + unicode(word.value,'utf-8') + "@@" + str(word.revision)
text.append(word.value)
authors.append(word.revision)
print text
print authors
def printRevisionTrackAppearance(revision):
print "Printing authorship for revision: ", revision.wikipedia_id
text = []
authors = []
for hash_paragraph in revision.ordered_paragraphs:
#print hash_paragraph
#text = ''
para = revision.paragraphs[hash_paragraph]
paragraph = para[-1]
#print paragraph.value
#print len(paragraph.sentences)
for hash_sentence in paragraph.ordered_sentences:
#print hash_sentence
sentence = paragraph.sentences[hash_sentence][-1]
#print sentence.words
for word in sentence.words:
appeared = copy(word.used)
disappeared = copy(word.deleted)
changes = []
changes.append("+(" + str(appeared.pop(0))+")")
while len(disappeared) > 0:
d = disappeared.pop(0)
if (d > revision.wikipedia_id):
break
changes.append("-(" + str(d)+")")
while len(appeared) > 0:
a = appeared.pop(0)
if (a > d):
changes.append("+(" + str(a)+")")
break
#print word.used
#print word.deleted
print unicode(word.value,'utf-8') + "@@" + str(word.revision) + "@@" + str(changes)
text.append(word.value)
authors.append(word.revision)
#print text
#print authors
def printRelationships(relations, order):
print "Printing relationships"
header = ["revision", "author", "deleted(-)", "revert(-)", "reintroduced(+)", "redeleted(+)", "added", "total", "self-deleted", "self-revert", "self-reintroduced", "self-redeleted"]
print "\t".join(header)
for (revision, vandalism) in order:
if (vandalism):
continue
relation = relations[revision]
#print relation.author
print str(relation.revision) + "\t" + (relation.author).decode("utf-8") + "\t" + str(relation.deleted) + "\t" + str(relation.revert) + "\t" + str(relation.reintroduced) + "\t" + str(relation.redeleted) + "\t" + str(relation.added) + "\t" + str(relation.total_tokens) + "\t" + str(relation.self_deleted) + "\t" + str(relation.self_revert) + "\t" + str(relation.self_reintroduced) + "\t" + str(relation.self_redeleted)
def printJSON(relations, order):
deleted_values = {}
revert_values = {}
reintroduced_values = {}
redeleted_values = {}
for (revision, vandalism) in order:
if (vandalism):
continue
relation = relations[revision]
print str(relation.revision) + "\t" + str(relation.author) + "\t" + str(relation.deleted) + "\t" + str(relation.revert) + "\t" + str(relation.reintroduced) + "\t" + str(relation.redeleted) + "\t" + str(relation.added) + "\t" + str(relation.total_tokens)
def main(my_argv):
inputfile = ''
revision = None
output = ''
if (len(my_argv) <= 3):
try:
opts, _ = getopt.getopt(my_argv,"i:",["ifile="])
except getopt.GetoptError:
print 'Usage: Wikiwho.py -i <inputfile> -o <output> [-rev <revision_id>]'
exit(2)
else:
try:
opts, _ = getopt.getopt(my_argv,"i:o:r:",["ifile=","revision=", "output="])
except getopt.GetoptError:
print 'Usage: Wikiwho.py -i <inputfile> -o output [-rev <revision_id>]'
exit(2)
for opt, arg in opts:
if opt in ('-h', "--help"):
print "WikiWho: An algorithm for detecting attribution of authorship in revisioned content"
print
print 'Usage: Wikiwho.py -i <inputfile> [-rev <revision_id>]'
print "-i --ifile File to analyze"
print "-o --type of output: <a> for authorship, <r> for relations"
print "-r --revision Revision to analyse. If not specified, the last revision is printed."
print "-h --help This help."
exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-r", "--revision"):
revision = arg
elif opt in ("-o", "--output"):
output = arg
return (inputfile,revision,output)
if __name__ == '__main__':
(file_name, revision, output) = main(argv[1:])
#print "Calculating authorship for:", file_name
time1 = time()
(revisions, order, relations) = analyseArticle(file_name)
time2 = time()
#pos = file_name.rfind("/")
#print file_name[pos+1: len(file_name)-len(".xml")], time2-time1
if (output == 'r'):
printRelationships(relations, order)
if (output == 'a'):
print "revision", revision
if (revision == 'all'):
printAllRevisions(order, revisions)
else:
printRevision(revisions[int(revision)])
#print "Execution time:", time2-time1
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests copying images to a Glance API server which uses a filesystem-
based storage backend.
"""
import hashlib
import tempfile
import time
import httplib2
from oslo_serialization import jsonutils
from oslo_utils import units
from six.moves import http_client
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.functional.store_utils import get_http_uri
from glance.tests.functional.store_utils import setup_http
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
class TestCopyToFile(functional.FunctionalTest):
"""
Functional tests for copying images from the HTTP storage
backend to file
"""
def _do_test_copy_from(self, from_store, get_uri):
"""
Ensure we can copy from an external image in from_store.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# POST /images with public image to be stored in from_store,
# to stand in for the 'external' image
image_data = b"*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'external',
'X-Image-Meta-Store': from_store,
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
original_image_id = data['image']['id']
copy_from = get_uri(self, original_image_id)
# POST /images with public image copied from_store (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertNotEqual(copy_image_id, original_image_id)
# GET image and make sure image content is as expected
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(http_client.OK, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual(image_data, content)
self.assertEqual(hashlib.md5(image_data).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE original image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
original_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
# GET image again to make sure the existence of the original
# image in from_store is not depended on
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual(image_data, content)
self.assertEqual(hashlib.md5(image_data).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE copied image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_store(self):
"""
Ensure we can copy from an external image in HTTP store.
"""
self._do_test_copy_from('file', get_http_uri)
@skip_if_disabled
def test_copy_from_http_exists(self):
"""Ensure we can copy from an external image in HTTP."""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
copy_from = get_http_uri(self, 'foobar')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertEqual('queued', data['image']['status'], content)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(http_client.OK, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
# GET image and make sure image content is as expected
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual(b"*" * FIVE_KB, content)
self.assertEqual(hashlib.md5(b"*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# DELETE copied image
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_nonexistent_location_url(self):
# Ensure HTTP 404 response returned when try to create
# image with non-existent http location URL.
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
uri = get_http_uri(self, 'foobar')
copy_from = uri.replace('images', 'snafu')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.NOT_FOUND, response.status, content)
expected = 'HTTP datastore could not find image at URI.'
self.assertIn(expected, content.decode())
self.stop_servers()
@skip_if_disabled
def test_copy_from_file(self):
"""
Ensure we can't copy from file
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
with tempfile.NamedTemporaryFile() as image_file:
image_file.write(b"XXX")
image_file.flush()
copy_from = 'file://' + image_file.name
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.BAD_REQUEST, response.status, content)
expected = 'External sources are not supported: \'%s\'' % copy_from
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content.decode(), msg)
self.stop_servers()
@skip_if_disabled
def test_copy_from_swift_config(self):
"""
Ensure we can't copy from swift+config
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': 'swift+config://xxx'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.BAD_REQUEST, response.status, content)
expected = 'External sources are not supported: \'swift+config://xxx\''
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content.decode(), msg)
self.stop_servers()
|
|
import sh
import os
import json
import zipfile
__version__ = '0.0.2'
DEFAULT_PACKER_PATH = 'packer'
class Packer():
"""Packer interface using the `sh` module (http://amoffat.github.io/sh/)
"""
def __init__(self, packerfile, exc=None, only=None, vars=None,
vars_file=None, exec_path=DEFAULT_PACKER_PATH):
"""
:param string packerfile: Path to Packer template file
:param list exc: List of builders to exclude
:param list only: List of builders to include
:param dict vars: Key Value pairs of template variables
:param string vars_file: Path to variables file
:param string exec_path: Path to Packer executable
"""
self.packerfile = self._validate_argtype(packerfile, str)
self.vars_file = vars_file
if not os.path.isfile(self.packerfile):
raise OSError('packerfile not found at path: {0}'.format(
self.packerfile))
self.exc = self._validate_argtype(exc if exc else [], list)
self.only = self._validate_argtype(only if only else [], list)
self.vars = self._validate_argtype(vars if vars else {}, dict)
self.packer = sh.Command(exec_path)
def build(self, parallel=True, debug=False, force=False):
"""Executes a Packer build (`packer build`)
:param bool parallel: Run builders in parallel
:param bool debug: Run in debug mode
:param bool force: Force artifact output even if exists
"""
self.ccmd = self.packer.build
self._add_opt('-parallel=true' if parallel else None)
self._add_opt('-debug' if debug else None)
self._add_opt('-force' if force else None)
self._append_base_arguments()
self._add_opt(self.packerfile)
return self.ccmd()
def fix(self, to_file=None):
"""Implements the `packer fix` function
:param string to_file: File to output fixed template to
"""
self.ccmd = self.packer.fix
self._add_opt(self.packerfile)
result = self.ccmd()
if to_file:
with open(to_file, 'w') as f:
f.write(result.stdout)
result.fixed = json.loads(result.stdout)
return result
def inspect(self, mrf=True):
"""Inspects a Packer Templates file (`packer inspect -machine-readable`)
To return the output in a readable form, the `-machine-readable` flag
is appended automatically, afterwhich the output is parsed and returned
as a dict of the following format:
"variables": [
{
"name": "aws_access_key",
"value": "{{env `AWS_ACCESS_KEY_ID`}}"
},
{
"name": "aws_secret_key",
"value": "{{env `AWS_ACCESS_KEY`}}"
}
],
"provisioners": [
{
"type": "shell"
}
],
"builders": [
{
"type": "amazon-ebs",
"name": "amazon"
}
]
:param bool mrf: output in machine-readable form.
"""
self.ccmd = self.packer.inspect
self._add_opt('-machine-readable' if mrf else None)
self._add_opt(self.packerfile)
result = self.ccmd()
if mrf:
result.parsed_output = self._parse_inspection_output(
result.stdout)
return result
def push(self, create=True, token=False):
"""Implmenets the `packer push` function
UNTESTED! Must be used alongside an Atlas account
"""
self.ccmd = self.packer.push
self._add_opt('-create=true' if create else None)
self._add_opt('-tokn={0}'.format(token) if token else None)
self._add_opt(self.packerfile)
return self.ccmd()
def validate(self, syntax_only=False):
"""Validates a Packer Template file (`packer validate`)
If the validation failed, an `sh` exception will be raised.
:param bool syntax_only: Whether to validate the syntax only
without validating the configuration itself.
"""
self.ccmd = self.packer.validate
self._add_opt('-syntax-only' if syntax_only else None)
self._append_base_arguments()
self._add_opt(self.packerfile)
# as sh raises an exception rather than return a value when execution
# fails we create an object to return the exception and the validation
# state
try:
validation = self.ccmd()
validation.succeeded = True if validation.exit_code == 0 else False
validation.error = None
except Exception as ex:
validation = ValidationObject()
validation.succeeded = False
validation.failed = True
validation.error = ex.message
return validation
def version(self):
"""Returns Packer's version number (`packer version`)
As of v0.7.5, the format shows when running `packer version`
is: Packer vX.Y.Z. This method will only returns the number, without
the `packer v` prefix so that you don't have to parse the version
yourself.
"""
return self.packer.version().split('v')[1].rstrip('\n')
def _add_opt(self, option):
if option:
self.ccmd = self.ccmd.bake(option)
def _validate_argtype(self, arg, argtype):
if not isinstance(arg, argtype):
raise PackerException('{0} argument must be of type {1}'.format(
arg, argtype))
return arg
def _append_base_arguments(self):
"""Appends base arguments to packer commands.
-except, -only, -var and -vars-file are appeneded to almost
all subcommands in packer. As such this can be called to add
these flags to the subcommand.
"""
if self.exc and self.only:
raise PackerException('Cannot provide both "except" and "only"')
elif self.exc:
self._add_opt('-except={0}'.format(self._joinc(self.exc)))
elif self.only:
self._add_opt('-only={0}'.format(self._joinc(self.only)))
for var, value in self.vars.items():
self._add_opt("-var '{0}={1}'".format(var, value))
if self.vars_file:
self._add_opt('-vars-file={0}'.format(self.vars_file))
def _joinc(self, lst):
"""Returns a comma delimited string from a list"""
return str(','.join(lst))
def _joins(self, lst):
"""Returns a space delimited string from a list"""
return str(' '.join(lst))
def _parse_inspection_output(self, output):
"""Parses the machine-readable output `packer inspect` provides.
See the inspect method for more info.
This has been tested vs. Packer v0.7.5
"""
parts = {'variables': [], 'builders': [], 'provisioners': []}
for l in output.splitlines():
l = l.split(',')
if l[2].startswith('template'):
del l[0:2]
component = l[0]
if component == 'template-variable':
variable = {"name": l[1], "value": l[2]}
parts['variables'].append(variable)
elif component == 'template-builder':
builder = {"name": l[1], "type": l[2]}
parts['builders'].append(builder)
elif component == 'template-provisioner':
provisioner = {"type": l[1]}
parts['provisioners'].append(provisioner)
return parts
class Installer():
def __init__(self, packer_path, installer_path):
self.packer_path = packer_path
self.installer_path = installer_path
def install(self):
with open(self.installer_path, 'rb') as f:
zip = zipfile.ZipFile(f)
for path in zip.namelist():
zip.extract(path, self.packer_path)
exec_path = os.path.join(self.packer_path, 'packer')
if not self._verify(exec_path):
raise PackerException('packer installation failed. '
'Executable could not be found under: '
'{0}'.format(exec_path))
else:
return exec_path
def _verify(self, packer):
return True if os.path.isfile(packer) else False
class ValidationObject():
pass
class PackerException(Exception):
pass
|
|
"""Summary
"""
#
# Wrappers for Types in NVL
#
#
from ctypes import *
class WeldType(object):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "type"
def __hash__(self):
"""Summary
Returns:
TYPE: Description
"""
return hash(str(self))
def __eq__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return hash(other) == hash(self)
def __ne__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return hash(other) != hash(self)
@property
def ctype_class(self):
"""
Returns a class representing this type's ctype representation.
Raises:
NotImplementedError: Description
"""
raise NotImplementedError
class WeldChar(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "i8"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_wchar_p
class WeldBit(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "bool"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_bool
class WeldInt16(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return 'i16'
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_int16
class WeldInt(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "i32"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_int
class WeldLong(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "i64"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_long
class WeldFloat(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "f32"
@property
def ctype_class(self):
return c_float
class WeldDouble(WeldType):
"""Summary
"""
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "f64"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
return c_double
class WeldVec(WeldType):
"""Summary
Attributes:
elemType (TYPE): Description
"""
# Kind of a hack, but ctypes requires that the class instance returned is
# the same object. Every time we create a new Vec instance (templatized by
# type), we cache it here.
_singletons = {}
def __init__(self, elemType):
"""Summary
Args:
elemType (TYPE): Description
"""
self.elemType = elemType
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "vec[%s]" % str(self.elemType)
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
def vec_factory(elemType):
"""Summary
Args:
elemType (TYPE): Description
Returns:
TYPE: Description
"""
class Vec(Structure):
"""Summary
"""
_fields_ = [
("ptr", POINTER(elemType.ctype_class)),
("size", c_long),
]
return Vec
if self.elemType not in WeldVec._singletons:
WeldVec._singletons[self.elemType] = vec_factory(self.elemType)
return WeldVec._singletons[self.elemType]
class WeldStruct(WeldType):
"""Summary
Attributes:
field_types (TYPE): Description
"""
_singletons = {}
def __init__(self, field_types):
"""Summary
Args:
field_types (TYPE): Description
"""
assert False not in [isinstance(e, WeldType) for e in field_types]
self.field_types = field_types
def __str__(self):
"""Summary
Returns:
TYPE: Description
"""
return "{" + ",".join([str(f) for f in self.field_types]) + "}"
@property
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
def struct_factory(field_types):
"""Summary
Args:
field_types (TYPE): Description
Returns:
TYPE: Description
"""
class Struct(Structure):
"""Summary
"""
_fields_ = [(str(i), t.ctype_class)
for i, t in enumerate(field_types)]
return Struct
if frozenset(self.field_types) not in WeldVec._singletons:
WeldStruct._singletons[
frozenset(self.field_types)] = struct_factory(self.field_types)
return WeldStruct._singletons[frozenset(self.field_types)]
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test case for L{twisted.protocols.loopback}.
"""
from __future__ import division, absolute_import
from zope.interface import implementer
from twisted.python.compat import _PY3, intToBytes
from twisted.trial import unittest
from twisted.trial.util import suppress as SUPPRESS
from twisted.protocols import basic, loopback
from twisted.internet import defer
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IAddress, IPushProducer, IPullProducer
from twisted.internet import reactor, interfaces
class SimpleProtocol(basic.LineReceiver):
def __init__(self):
self.conn = defer.Deferred()
self.lines = []
self.connLost = []
def connectionMade(self):
self.conn.callback(None)
def lineReceived(self, line):
self.lines.append(line)
def connectionLost(self, reason):
self.connLost.append(reason)
class DoomProtocol(SimpleProtocol):
i = 0
def lineReceived(self, line):
self.i += 1
if self.i < 4:
# by this point we should have connection closed,
# but just in case we didn't we won't ever send 'Hello 4'
self.sendLine(b"Hello " + intToBytes(self.i))
SimpleProtocol.lineReceived(self, line)
if self.lines[-1] == b"Hello 3":
self.transport.loseConnection()
class LoopbackTestCaseMixin:
def testRegularFunction(self):
s = SimpleProtocol()
c = SimpleProtocol()
def sendALine(result):
s.sendLine(b"THIS IS LINE ONE!")
s.transport.loseConnection()
s.conn.addCallback(sendALine)
def check(ignored):
self.assertEqual(c.lines, [b"THIS IS LINE ONE!"])
self.assertEqual(len(s.connLost), 1)
self.assertEqual(len(c.connLost), 1)
d = defer.maybeDeferred(self.loopbackFunc, s, c)
d.addCallback(check)
return d
def testSneakyHiddenDoom(self):
s = DoomProtocol()
c = DoomProtocol()
def sendALine(result):
s.sendLine(b"DOOM LINE")
s.conn.addCallback(sendALine)
def check(ignored):
self.assertEqual(s.lines, [b'Hello 1', b'Hello 2', b'Hello 3'])
self.assertEqual(
c.lines, [b'DOOM LINE', b'Hello 1', b'Hello 2', b'Hello 3'])
self.assertEqual(len(s.connLost), 1)
self.assertEqual(len(c.connLost), 1)
d = defer.maybeDeferred(self.loopbackFunc, s, c)
d.addCallback(check)
return d
class LoopbackAsyncTestCase(LoopbackTestCaseMixin, unittest.TestCase):
loopbackFunc = staticmethod(loopback.loopbackAsync)
def test_makeConnection(self):
"""
Test that the client and server protocol both have makeConnection
invoked on them by loopbackAsync.
"""
class TestProtocol(Protocol):
transport = None
def makeConnection(self, transport):
self.transport = transport
server = TestProtocol()
client = TestProtocol()
loopback.loopbackAsync(server, client)
self.failIfEqual(client.transport, None)
self.failIfEqual(server.transport, None)
def _hostpeertest(self, get, testServer):
"""
Test one of the permutations of client/server host/peer.
"""
class TestProtocol(Protocol):
def makeConnection(self, transport):
Protocol.makeConnection(self, transport)
self.onConnection.callback(transport)
if testServer:
server = TestProtocol()
d = server.onConnection = Deferred()
client = Protocol()
else:
server = Protocol()
client = TestProtocol()
d = client.onConnection = Deferred()
loopback.loopbackAsync(server, client)
def connected(transport):
host = getattr(transport, get)()
self.failUnless(IAddress.providedBy(host))
return d.addCallback(connected)
def test_serverHost(self):
"""
Test that the server gets a transport with a properly functioning
implementation of L{ITransport.getHost}.
"""
return self._hostpeertest("getHost", True)
def test_serverPeer(self):
"""
Like C{test_serverHost} but for L{ITransport.getPeer}
"""
return self._hostpeertest("getPeer", True)
def test_clientHost(self, get="getHost"):
"""
Test that the client gets a transport with a properly functioning
implementation of L{ITransport.getHost}.
"""
return self._hostpeertest("getHost", False)
def test_clientPeer(self):
"""
Like C{test_clientHost} but for L{ITransport.getPeer}.
"""
return self._hostpeertest("getPeer", False)
def _greetingtest(self, write, testServer):
"""
Test one of the permutations of write/writeSequence client/server.
@param write: The name of the method to test, C{"write"} or
C{"writeSequence"}.
"""
class GreeteeProtocol(Protocol):
bytes = b""
def dataReceived(self, bytes):
self.bytes += bytes
if self.bytes == b"bytes":
self.received.callback(None)
class GreeterProtocol(Protocol):
def connectionMade(self):
if write == "write":
self.transport.write(b"bytes")
else:
self.transport.writeSequence([b"byt", b"es"])
if testServer:
server = GreeterProtocol()
client = GreeteeProtocol()
d = client.received = Deferred()
else:
server = GreeteeProtocol()
d = server.received = Deferred()
client = GreeterProtocol()
loopback.loopbackAsync(server, client)
return d
def test_clientGreeting(self):
"""
Test that on a connection where the client speaks first, the server
receives the bytes sent by the client.
"""
return self._greetingtest("write", False)
def test_clientGreetingSequence(self):
"""
Like C{test_clientGreeting}, but use C{writeSequence} instead of
C{write} to issue the greeting.
"""
return self._greetingtest("writeSequence", False)
def test_serverGreeting(self, write="write"):
"""
Test that on a connection where the server speaks first, the client
receives the bytes sent by the server.
"""
return self._greetingtest("write", True)
def test_serverGreetingSequence(self):
"""
Like C{test_serverGreeting}, but use C{writeSequence} instead of
C{write} to issue the greeting.
"""
return self._greetingtest("writeSequence", True)
def _producertest(self, producerClass):
toProduce = list(map(intToBytes, range(0, 10)))
class ProducingProtocol(Protocol):
def connectionMade(self):
self.producer = producerClass(list(toProduce))
self.producer.start(self.transport)
class ReceivingProtocol(Protocol):
bytes = b""
def dataReceived(self, data):
self.bytes += data
if self.bytes == b''.join(toProduce):
self.received.callback((client, server))
server = ProducingProtocol()
client = ReceivingProtocol()
client.received = Deferred()
loopback.loopbackAsync(server, client)
return client.received
def test_pushProducer(self):
"""
Test a push producer registered against a loopback transport.
"""
@implementer(IPushProducer)
class PushProducer(object):
resumed = False
def __init__(self, toProduce):
self.toProduce = toProduce
def resumeProducing(self):
self.resumed = True
def start(self, consumer):
self.consumer = consumer
consumer.registerProducer(self, True)
self._produceAndSchedule()
def _produceAndSchedule(self):
if self.toProduce:
self.consumer.write(self.toProduce.pop(0))
reactor.callLater(0, self._produceAndSchedule)
else:
self.consumer.unregisterProducer()
d = self._producertest(PushProducer)
def finished(results):
(client, server) = results
self.assertFalse(
server.producer.resumed,
"Streaming producer should not have been resumed.")
d.addCallback(finished)
return d
def test_pullProducer(self):
"""
Test a pull producer registered against a loopback transport.
"""
@implementer(IPullProducer)
class PullProducer(object):
def __init__(self, toProduce):
self.toProduce = toProduce
def start(self, consumer):
self.consumer = consumer
self.consumer.registerProducer(self, False)
def resumeProducing(self):
self.consumer.write(self.toProduce.pop(0))
if not self.toProduce:
self.consumer.unregisterProducer()
return self._producertest(PullProducer)
def test_writeNotReentrant(self):
"""
L{loopback.loopbackAsync} does not call a protocol's C{dataReceived}
method while that protocol's transport's C{write} method is higher up
on the stack.
"""
class Server(Protocol):
def dataReceived(self, bytes):
self.transport.write(b"bytes")
class Client(Protocol):
ready = False
def connectionMade(self):
reactor.callLater(0, self.go)
def go(self):
self.transport.write(b"foo")
self.ready = True
def dataReceived(self, bytes):
self.wasReady = self.ready
self.transport.loseConnection()
server = Server()
client = Client()
d = loopback.loopbackAsync(client, server)
def cbFinished(ignored):
self.assertTrue(client.wasReady)
d.addCallback(cbFinished)
return d
def test_pumpPolicy(self):
"""
The callable passed as the value for the C{pumpPolicy} parameter to
L{loopbackAsync} is called with a L{_LoopbackQueue} of pending bytes
and a protocol to which they should be delivered.
"""
pumpCalls = []
def dummyPolicy(queue, target):
bytes = []
while queue:
bytes.append(queue.get())
pumpCalls.append((target, bytes))
client = Protocol()
server = Protocol()
finished = loopback.loopbackAsync(server, client, dummyPolicy)
self.assertEqual(pumpCalls, [])
client.transport.write(b"foo")
client.transport.write(b"bar")
server.transport.write(b"baz")
server.transport.write(b"quux")
server.transport.loseConnection()
def cbComplete(ignored):
self.assertEqual(
pumpCalls,
# The order here is somewhat arbitrary. The implementation
# happens to always deliver data to the client first.
[(client, [b"baz", b"quux", None]),
(server, [b"foo", b"bar"])])
finished.addCallback(cbComplete)
return finished
def test_identityPumpPolicy(self):
"""
L{identityPumpPolicy} is a pump policy which calls the target's
C{dataReceived} method one for each string in the queue passed to it.
"""
bytes = []
client = Protocol()
client.dataReceived = bytes.append
queue = loopback._LoopbackQueue()
queue.put(b"foo")
queue.put(b"bar")
queue.put(None)
loopback.identityPumpPolicy(queue, client)
self.assertEqual(bytes, [b"foo", b"bar"])
def test_collapsingPumpPolicy(self):
"""
L{collapsingPumpPolicy} is a pump policy which calls the target's
C{dataReceived} only once with all of the strings in the queue passed
to it joined together.
"""
bytes = []
client = Protocol()
client.dataReceived = bytes.append
queue = loopback._LoopbackQueue()
queue.put(b"foo")
queue.put(b"bar")
queue.put(None)
loopback.collapsingPumpPolicy(queue, client)
self.assertEqual(bytes, [b"foobar"])
class LoopbackTCPTestCase(LoopbackTestCaseMixin, unittest.TestCase):
loopbackFunc = staticmethod(loopback.loopbackTCP)
class LoopbackUNIXTestCase(LoopbackTestCaseMixin, unittest.TestCase):
loopbackFunc = staticmethod(loopback.loopbackUNIX)
if interfaces.IReactorUNIX(reactor, None) is None:
skip = "Current reactor does not support UNIX sockets"
elif _PY3:
skip = "UNIX sockets not supported on Python 3. See #6136"
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2015-2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dci.app
from dci import dci_config
from dci.db import models
import tests.utils as utils
import tests.sso_tokens as sso_tokens
from passlib.apps import custom_app_context as pwd_context
import contextlib
import pytest
import sqlalchemy_utils.functions
from sqlalchemy.orm import sessionmaker
import uuid
@pytest.fixture(scope="session")
def engine(request):
utils.rm_upload_folder()
db_uri = utils.conf["SQLALCHEMY_DATABASE_URI"]
engine = dci_config.get_engine()
if not sqlalchemy_utils.functions.database_exists(db_uri):
sqlalchemy_utils.functions.create_database(db_uri)
utils.restore_db(engine)
return engine
@pytest.fixture
def session(engine):
return sessionmaker(bind=engine)()
@pytest.fixture
def empty_db(engine):
with contextlib.closing(engine.connect()) as con:
meta = models.metadata
trans = con.begin()
for table in reversed(meta.sorted_tables):
con.execute(table.delete())
trans.commit()
return True
@pytest.fixture
def reset_job_event(engine):
with contextlib.closing(engine.connect()) as con:
trans = con.begin()
con.execute("ALTER SEQUENCE jobs_events_id_seq RESTART WITH 1")
trans.commit()
return True
@pytest.fixture
def delete_db(request, engine, teardown_db_clean):
models.metadata.drop_all(engine)
engine.execute("DROP TABLE IF EXISTS alembic_version")
@pytest.fixture(scope="session", autouse=True)
def memoize_password_hash():
def memoize(func):
cache = {}
def helper(*args):
if args in cache:
return cache[args]
else:
value = func(*args)
cache[args] = value
return value
return helper
pwd_context.verify = memoize(pwd_context.verify)
pwd_context.hash = memoize(pwd_context.hash)
@pytest.fixture
def teardown_db_clean(request, engine):
request.addfinalizer(lambda: utils.restore_db(engine))
@pytest.fixture
def fs_clean(request):
"""Clean test file upload directory"""
request.addfinalizer(utils.rm_upload_folder)
@pytest.fixture
def db_provisioning(empty_db, session):
utils.provision(session)
@pytest.fixture
def app(db_provisioning, engine, fs_clean):
app = dci.app.create_app()
app.testing = True
app.engine = engine
return app
@pytest.fixture
def admin(app):
return utils.generate_client(app, ("admin", "admin"))
@pytest.fixture
def unauthorized(app):
return utils.generate_client(app, ("bob", "bob"))
@pytest.fixture
def user(app):
return utils.generate_client(app, ("user", "user"))
@pytest.fixture
def user2(app):
return utils.generate_client(app, ("user2", "user2"))
@pytest.fixture
def rh_employee(app):
return utils.generate_client(app, ("rh_employee", "rh_employee"))
@pytest.fixture
def user_sso(app, access_token):
client = utils.generate_client(app, access_token=access_token)
# first call, it create the user in the database
client.get("/api/v1/users/me")
return client
@pytest.fixture
def user_id(admin):
user = admin.get("/api/v1/users?where=name:user")
user = admin.get("/api/v1/users/%s" % user.data["users"][0]["id"]).data
return str(user["user"]["id"])
@pytest.fixture
def user_no_team(admin):
r = admin.get("/api/v1/users?where=name:user_no_team")
return dict(r.data["users"][0])
@pytest.fixture
def epm(app):
return utils.generate_client(app, ("epm", "epm"))
@pytest.fixture
def epm_id(epm):
return epm.get("/api/v1/users/me").data["user"]["id"]
@pytest.fixture
def topic_id(admin, team_id, product):
data = {
"name": "topic_name",
"product_id": product["id"],
"component_types": ["type_1", "type_2", "type_3"],
}
topic = admin.post("/api/v1/topics", data=data).data
t_id = topic["topic"]["id"]
admin.post("/api/v1/topics/%s/teams" % t_id, data={"team_id": team_id})
return str(t_id)
@pytest.fixture
def topic(admin, team_user_id, product):
topic = admin.post(
"/api/v1/topics",
data={
"name": "OSP12",
"product_id": product["id"],
"component_types": ["puddle_osp"],
},
).data["topic"]
admin.post(
"/api/v1/components",
data={
"topic_id": topic["id"],
"name": "RH7-RHOS-12.0 2017-11-09.2",
"type": "puddle_osp",
},
)
admin.post("/api/v1/topics/%s/teams" % topic["id"], data={"team_id": team_user_id})
return topic
@pytest.fixture
def test_id(admin):
data = {"name": "pname"}
test = admin.post("/api/v1/tests", data=data).data
return str(test["test"]["id"])
@pytest.fixture
def team_id(admin, team_user_id):
team = admin.post("/api/v1/teams", data={"name": "pname"})
return str(team.data["team"]["id"])
@pytest.fixture
def team_product_id(admin):
team = admin.get("/api/v1/teams?where=name:product")
team = admin.get("/api/v1/teams/%s" % team.data["teams"][0]["id"]).data
return str(team["team"]["id"])
@pytest.fixture
def team_user_id(admin):
team = admin.get("/api/v1/teams?where=name:user").data["teams"][0]
return str(team["id"])
@pytest.fixture
def team_user_id2(admin):
team = admin.get("/api/v1/teams?where=name:user2").data["teams"][0]
return str(team["id"])
@pytest.fixture
def team_admin_id(admin):
team = admin.get("/api/v1/teams?where=name:admin").data["teams"][0]
return str(team["id"])
@pytest.fixture
def team_redhat_id(admin):
team = admin.get("/api/v1/teams?where=name:Red Hat").data["teams"][0]
return str(team["id"])
@pytest.fixture
def team_epm_id(admin):
team = admin.get("/api/v1/teams?where=name:EPM").data["teams"][0]
return str(team["id"])
@pytest.fixture
def topic_user(admin, user, team_user_id, team_user_id2, product):
data = {
"name": "topic_user_name",
"product_id": product["id"],
"component_types": ["type_1", "type_2", "type_3"],
}
topic = admin.post("/api/v1/topics", data=data).data["topic"]
admin.post("/api/v1/topics/%s/teams" % topic["id"], data={"team_id": team_user_id})
admin.post("/api/v1/topics/%s/teams" % topic["id"], data={"team_id": team_user_id2})
for i in range(1, 4):
admin.post(
"/api/v1/components",
data={"topic_id": topic["id"], "name": "comp%s" % i, "type": "type_%s" % i},
)
return topic
@pytest.fixture
def topic_user_id(topic_user):
return topic_user["id"]
@pytest.fixture
def remoteci_id(admin, team_id):
data = {"name": "pname", "team_id": team_id}
remoteci = admin.post("/api/v1/remotecis", data=data).data
return str(remoteci["remoteci"]["id"])
@pytest.fixture
def remoteci_user_api_secret(user, remoteci_user_id):
api_secret = user.get("/api/v1/remotecis/%s" % remoteci_user_id).data
return api_secret["remoteci"]["api_secret"]
@pytest.fixture
def remoteci_user(user, admin, team_user_id, topic_user_id):
data = {"name": "user remoteci", "team_id": team_user_id}
remoteci = user.post("/api/v1/remotecis", data=data).data
return remoteci["remoteci"]
@pytest.fixture
def remoteci_user_id(remoteci_user):
return str(remoteci_user["id"])
@pytest.fixture
def remoteci(admin, team_id):
data = {"name": "remoteci", "team_id": team_id}
return admin.post("/api/v1/remotecis", data=data).data["remoteci"]
@pytest.fixture
def remoteci_context(app, remoteci_user_id, remoteci_user_api_secret):
remoteci = {
"id": remoteci_user_id,
"api_secret": remoteci_user_api_secret,
"type": "remoteci",
}
return utils.generate_token_based_client(app, remoteci)
@pytest.fixture
def admin_remoteci_context(app, admin, team_admin_id):
admin_remoteci = admin.post(
"/api/v1/remotecis", data={"name": "admin remoteci", "team_id": team_admin_id}
).data["remoteci"]
return utils.generate_token_based_client(
app,
{
"id": admin_remoteci["id"],
"api_secret": admin_remoteci["api_secret"],
"type": "remoteci",
},
)
@pytest.fixture
def remoteci_configuration_user_id(user, remoteci_user_id, topic_user_id):
rc = user.post(
"/api/v1/remotecis/%s/configurations" % remoteci_user_id,
data={
"name": "cname",
"topic_id": topic_user_id,
"component_types": ["kikoo", "lol"],
"data": {"lol": "lol"},
},
).data
return str(rc["configuration"]["id"])
@pytest.fixture
def feeder_id(epm, team_user_id):
data = {"name": "feeder_osp", "team_id": team_user_id}
feeder = epm.post("/api/v1/feeders", data=data).data
return str(feeder["feeder"]["id"])
@pytest.fixture
def feeder_api_secret(epm, feeder_id):
api_secret = epm.get("/api/v1/feeders/%s" % feeder_id).data
return api_secret["feeder"]["api_secret"]
@pytest.fixture
def feeder_context(app, feeder_id, feeder_api_secret):
feeder = {"id": feeder_id, "api_secret": feeder_api_secret, "type": "feeder"}
return utils.generate_token_based_client(app, feeder)
def create_components(user, topic_id, component_types):
component_ids = []
for ct in component_types:
data = {"topic_id": topic_id, "name": "name-" + str(uuid.uuid4()), "type": ct}
cmpt = user.post("/api/v1/components", data=data).data
component_ids.append(str(cmpt["component"]["id"]))
return component_ids
@pytest.fixture
def components_ids(admin, topic_id):
component_types = ["type_1", "type_2", "type_3"]
return create_components(admin, topic_id, component_types)
@pytest.fixture
def components_user_ids(admin, topic_user_id):
component_types = ["type_1", "type_2", "type_3"]
return create_components(admin, topic_user_id, component_types)
@pytest.fixture
def job_admin(
admin, admin_remoteci_context, components_user_ids, topic_user_id, team_admin_id
):
admin.post(
"/api/v1/topics/%s/teams" % topic_user_id, data={"team_id": team_admin_id}
)
data = {"components_ids": components_user_ids, "topic_id": topic_user_id}
return admin_remoteci_context.post("/api/v1/jobs/schedule", data=data).data["job"]
@pytest.fixture
def job_user(remoteci_context, components_user_ids, topic_user_id):
data = {"components_ids": components_user_ids, "topic_id": topic_user_id}
job = remoteci_context.post("/api/v1/jobs/schedule", data=data).data
return job["job"]
@pytest.fixture
def job_user_id(job_user):
return job_user["id"]
@pytest.fixture
def jobstate_user_id(user, job_user_id):
data = {"job_id": job_user_id, "status": "running", "comment": "kikoolol"}
jobstate = user.post("/api/v1/jobstates", data=data).data
return jobstate["jobstate"]["id"]
@pytest.fixture
def file_user_id(user, jobstate_user_id, team_user_id):
headers = {"DCI-JOBSTATE-ID": jobstate_user_id, "DCI-NAME": "name"}
file = user.post("/api/v1/files", headers=headers, data="kikoolol").data
headers["team_id"] = team_user_id
headers["id"] = file["file"]["id"]
return file["file"]["id"]
@pytest.fixture
def file_job_user_id(user, job_user_id, team_user_id):
headers = {"DCI-JOB-ID": job_user_id, "DCI-NAME": "name"}
file = user.post("/api/v1/files", headers=headers, data="foobar").data
headers["team_id"] = team_user_id
headers["id"] = file["file"]["id"]
return file["file"]["id"]
@pytest.fixture
def feeder(admin, team_product_id):
data = {
"name": "random-name-feeder",
"team_id": team_product_id,
}
feeder = admin.post("/api/v1/feeders", data=data).data
return feeder["feeder"]
@pytest.fixture
def product_openstack(admin):
data = {
"name": "OpenStack",
"label": "OPENSTACK",
"description": "Red Hat OpenStack Platform",
}
return admin.post("/api/v1/products", data=data).data["product"]
@pytest.fixture
def product(admin):
return admin.get("/api/v1/products?where=label:AWSM").data["products"][0]
@pytest.fixture
def product2(admin):
return admin.get("/api/v1/products?where=label:BEST").data["products"][0]
@pytest.fixture
def access_token():
return sso_tokens.ACCESS_TOKEN_USER
@pytest.fixture
def access_token_rh_employee():
return sso_tokens.ACCESS_TOKEN_READ_ONLY_USER
@pytest.fixture(scope="session")
def cakeys():
k = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIIJKAIBAAKCAgEAxVY2C4es4YwMtwwe6FKuybMZ8K8uWylF6AUurzFnp8mYObwT\n"
"IvM5W0es7qjdT7UowZBKC+wiCFfwG9O6HZJj62PW/PfHRJWbJZ6PaLPGj1J83qYN\n"
"SoSWIx340oUgzZnh0h3Yucqt634tjH+9nRq5YumLDKrcxryUSnGkFxv9jbx7yTUJ\n"
"Xl3QFu5pjoam78q6bbZjQapTFqmSoKNkhpflnLsxU1r27lmfWUj0chh1TBR0nCxk\n"
"dqCdafIl2MWCCJh3y459Qm6nbDBNrdDMpc+xluri/9o/MPWBk3amv7qvEzOn2DIx\n"
"H1n/nLqzsCeR86EzREemIk+259QQTWQqsiq1rghDl3CJB4DWHec0C5FLbOq9wQvV\n"
"S8J7UNKQrxcP3nnxa0iOGWYnoSzpfuB9uKIOtSMNJmPznFAiUbMbjRLACkWQlIHe\n"
"VyqqcEXLmERdpIQ8IoZPd6RLtc8g7UYjppMsbd342gcBqn+tskS5C3Ka7g6spYKh\n"
"Ct7TL3cmh5Tjghj9sTemUPsG8q9UVaUDIj5IHOg22kN+TSoa5DsoIES2x2ijgLXg\n"
"LMP9WtmfVVxK9mDKm9FVMwJuqFU2OjvELw/d3eKvfVTLZcZ647BYMxfUxGtj9R58\n"
"jxB0uL/nY4zhxwpgxLBq8Pv+x3MwLpGFOqAqJFO6q53l9d34pIetyuDEqqkCAwEA\n"
"AQKCAgAr7CttV46ASUbPO1Bpz3V+CXI9oxBDVCVuJjEk52815mYOe7Eat15N1d9E\n"
"46sWwbDHnJEOpElS6BfWacLkMCAzVW6VsaTVvCLjNa6f1FverLKEcBtHOcmdGV+3\n"
"o9AQUy7zMJd7iwQ5BUWoHwqaPEeFH4xGjoVDatfq1e57AkzmTkyTFU33hhP59qji\n"
"A1CG0O2727ut8vY5dhbf0F5gotCFmRi6f+W0WZhhLB7UgmMhQvBNjofx63/+A9qu\n"
"rA9sUFthoF56+dwj9YBkrrPOODND7xYFcpNcF1j29JLa2/d546Z5NXq/iq2dOeUi\n"
"0TvoKToa+YOd4XZJlWbnguMJ8v2q0bUdQFcJRcV155DxgqTtng7CAZyKd3AjPtE5\n"
"6+/WkZiMaBS6tJxBeUNSuanErMxpTshLukDZQbrKskn/PKL7Hy7Q04tYXDa1UB6M\n"
"qRMDxJB7+W4ct9dJ9lt4WxmNnnQz7TrzZxzX46i1o45+qDe1R8k5UL9vQ9vXwsE/\n"
"LYHsd4CwbyS2JXpFL/5m7yC6RrkEz2WF2ln5A/fHAW9Wl/6VP2MI05mv6gfYdIr5\n"
"MgkkR4NjucwBj5wK0CP+4+8Qyf+ZGwIBUMMjraIkGFvEFElapxgg8gxNfrHD7gfg\n"
"orwqJ1N55Ajs5ZVjbf14It+u0HfamAbE10++yqEm9H//CaTiAQKCAQEA5ByigRd4\n"
"639fYaLmMgEXTs5I+3D7eDYZt9YnT9fIsINqtvzuuhSI+nxfk/6rlxviJQ2S9yWQ\n"
"dLZDuNSfZxx9G74qaO0ShWSXp4T+jyA+y+E0skuWOR/KxGqzqUab9vdhO+D1oLfV\n"
"dDnnY4ILxPeLj6veAg56Qcc9X+dbHKcPiGgNC/A+Rj0qVvvaUKOcQqyAjCSyIXxF\n"
"PvDshyku+Ty2gAtu0MS2LcXZJDjLs4dGu7MQz5/POe6pjgTEIOy3IWLqKVzczNmR\n"
"4hKra2EgmMQ+Ls/Od4n77WL3bhGg9DTdChKdJyJOMXSCq5YsCEQnQfkodfq6/Amb\n"
"hhpkuVKZwwac6QKCAQEA3XZjYuGwna4CBKF3TWG9yREuYhguUF370ly7G9TL7enw\n"
"e100i/n/KWjApxAByInNy0TEHcSGfEiLMc9sDbQoaBuG7PEhIeSuSn6/D4olTUvq\n"
"F3C0Z27tT95HZZ43xBDszsKJOhNx8wepmbtbK7pHUfqQm1KqY5qiABAxojaxDWn+\n"
"8Q6W7HL4IPcmw9acFni2V/5WrWRfvT1TWEYxzWU65+wT0HWpGatqQVFyh0F6Yxe7\n"
"WnIG7v5qxtz1Sj6hqf5vEkN50fHI7XLOoMDe3RtRj8TZ50fyAvvOjkw1yHMf0Wsk\n"
"nTBCpN+CF6F74zNScITsfp+Cl9jXU6y7FR4/z84HwQKCAQEAhfePNJNtb5kRkkzS\n"
"NoHPh3e9AvaaqUHUntPFqK2I7qlvjeJD7cLLo5hRpaAGdCtrB+nN6xoDmZfFdBJj\n"
"P3JKw3VOgOriWrb2HesMeVAtsR0lDqU3p3rVYb9snjiatlMYpsr6VpZAZQ7wps8k\n"
"TFw5eXotWzXXdTQnBmDgcJZol+rL5rwERsn7SLSGxZ8g0UNwB14xw1qxbEKgFs0I\n"
"ClYutEqCnVc5yu4MFarJbzk+QFPsxpMLZ/GTYJXJ/bAn6RKnhP1Fq4UHmSbvx5N2\n"
"SmHORz3B+xBthT//IoR165X0TssZwnbyRzcu2sjKOVyVVbiXm5pSIF0gGoT7rJ8n\n"
"MJN8qQKCAQBnqsF/ShJ43TmInWTRTk2ez3Ic7SDQ8g2tPUdBEe2cIwQ1Wz37wDzX\n"
"T3fPPEj5bLhuzHPZU2N4ziSKXoRALfM0OJ6CT6WozflgWdBqH8qyUjT0YAey21Qv\n"
"LOfTA6srFpkjeCDwlKWklBOqKO/Wmk5Ea7xBWQL1uS7YRLxXKK7cjp+Oi7vOV0sb\n"
"c1YsGkvaoQsKSb6dT/0ZApn/Gmy5rwdSBUqJLGrJ31nP1aZ89gOqWzOSdQoV2fZ1\n"
"vHz+Ei9u+fFYZUmjI0FhFXrv+RjZ+63EVOuDvkPlbaYVKkuK14kvaK4s/qhTsWSe\n"
"VzM8+Ys/rJlf9J8XIaQ6QQMaMZzBU7qBAoIBABqsTioYbOJDR0OJXLy7ykiXOdcx\n"
"so7mek6YFRL+//9XlprLYDfoMVf0s0uWrJ9xY+Gcr9GIyOiKBrnWKVFrbibfZSvr\n"
"L9swaN82IotuiT9Mk7JKLWdRY0JLMC1XhfahRgg5wyukjct8mYJGcuY2vVvHmd6D\n"
"XuhVO0mlm0v/ScdBUvKQKjMOFLYXxHh/2a1mQD5coujJnn9iCA4Pf9xmLo6fG/Jy\n"
"xqrDef+lE+ow+uPJanueVDo9KcNBEa80f9KOzOwyb90zWfVYkvt1vMkOOsoVkvR/\n"
"qM1R5M9igUzsHGfIpY6jA0OR26gg2G+xcwPKCqeSUnmSbhE8LXHEyskc+Gs=\n"
"-----END RSA PRIVATE KEY-----"
)
c = (
"-----BEGIN CERTIFICATE-----\n"
"MIIF5jCCA86gAwIBAgIJAK1pLlYEf/ebMA0GCSqGSIb3DQEBCwUAMIGHMQswCQYD\n"
"VQQGEwJGUjEMMAoGA1UECAwDSURGMQ4wDAYDVQQHDAVQYXJpczEPMA0GA1UECgwG\n"
"UmVkSGF0MQwwCgYDVQQLDANEQ0kxETAPBgNVBAMMCERDSS1SZXBvMSgwJgYJKoZI\n"
"hvcNAQkBFhlkaXN0cmlidXRlZC1jaUByZWRoYXQuY29tMB4XDTE4MDMxNDEzMzY0\n"
"MFoXDTE5MDMxNDEzMzY0MFowgYcxCzAJBgNVBAYTAkZSMQwwCgYDVQQIDANJREYx\n"
"DjAMBgNVBAcMBVBhcmlzMQ8wDQYDVQQKDAZSZWRIYXQxDDAKBgNVBAsMA0RDSTER\n"
"MA8GA1UEAwwIRENJLVJlcG8xKDAmBgkqhkiG9w0BCQEWGWRpc3RyaWJ1dGVkLWNp\n"
"QHJlZGhhdC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDFVjYL\n"
"h6zhjAy3DB7oUq7Jsxnwry5bKUXoBS6vMWenyZg5vBMi8zlbR6zuqN1PtSjBkEoL\n"
"7CIIV/Ab07odkmPrY9b898dElZslno9os8aPUnzepg1KhJYjHfjShSDNmeHSHdi5\n"
"yq3rfi2Mf72dGrli6YsMqtzGvJRKcaQXG/2NvHvJNQleXdAW7mmOhqbvyrpttmNB\n"
"qlMWqZKgo2SGl+WcuzFTWvbuWZ9ZSPRyGHVMFHScLGR2oJ1p8iXYxYIImHfLjn1C\n"
"bqdsME2t0Mylz7GW6uL/2j8w9YGTdqa/uq8TM6fYMjEfWf+curOwJ5HzoTNER6Yi\n"
"T7bn1BBNZCqyKrWuCEOXcIkHgNYd5zQLkUts6r3BC9VLwntQ0pCvFw/eefFrSI4Z\n"
"ZiehLOl+4H24og61Iw0mY/OcUCJRsxuNEsAKRZCUgd5XKqpwRcuYRF2khDwihk93\n"
"pEu1zyDtRiOmkyxt3fjaBwGqf62yRLkLcpruDqylgqEK3tMvdyaHlOOCGP2xN6ZQ\n"
"+wbyr1RVpQMiPkgc6DbaQ35NKhrkOyggRLbHaKOAteAsw/1a2Z9VXEr2YMqb0VUz\n"
"Am6oVTY6O8QvD93d4q99VMtlxnrjsFgzF9TEa2P1HnyPEHS4v+djjOHHCmDEsGrw\n"
"+/7HczAukYU6oCokU7qrneX13fikh63K4MSqqQIDAQABo1MwUTAdBgNVHQ4EFgQU\n"
"MAt4tfiBDZ7koyZq8Ss0P+swDpQwHwYDVR0jBBgwFoAUMAt4tfiBDZ7koyZq8Ss0\n"
"P+swDpQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAlqC46nbw\n"
"Y8UhIktSTInTrtX70Y9QF/Gl0weDoOgUNG/j9ReQPU1h/W3Ion4sZ4WO6Q3Y3jQ/\n"
"0+Ky0bJFpQZ0C3ssS65XdJC9fpLsiZY/HZUlgRuj7tnR7AjqHA6PBFNz83yGHZgq\n"
"GMA3LMq+FyY3K/FTj/NX5iaE2sJlDLu36yv6zcVnQwskkQ/rHoNmE3Prr3A7ZkLv\n"
"Ox73PpkiD7FySmTFiiz6i/CPUyx6Y9fZNhNSXvjg6F2tXYQWPJAEL0fWTKvywMML\n"
"tpIQnwz6hNaH5Z+O92X67LfJJtmoggNexmO/pbeGVNYPjyRllMcNNJq3GsABwzuA\n"
"7zIitCjqpw0RV40pSLv9oulqrS+tdMW55R/RxVCEx3L0H/L36K7IjXan5UkWQxlW\n"
"zi65LvYGgCU9d0CH7gUtyyRgJ1G7hAYbBqYOlMEjHdYYOGhGW4LVKSJ4QwPn+yHn\n"
"+GXELJTacwV0LVGcDPkqdWbt0KcyMukDFQXs5UikE3i+54783cmfZr3U5Gr/OCWZ\n"
"VikifhmBSl3sRfVm7YPW5pffAdACRDZVjZ6ro37x0JQ6jERuhaKe7sv3s0/gCWT5\n"
"XMFg+rftswcrSvxBpVNTUu5vPnXK3dWsM4noalVxh449ewlcruYh17Yt2KEwkB/+\n"
"4AMjw7GIwuUN1rZsqBbZ5tBHrRoR02IDcMA=\n"
"-----END CERTIFICATE-----"
)
cert = open("/tmp/ca.crt", "w")
cert.write(c)
cert.close()
key = open("/tmp/ca.key", "w")
key.write(k)
key.close()
@pytest.fixture
def RHELProduct(admin):
data = {"name": "RHEL", "label": "RHEL", "description": "Red Hat Entreprise Linux"}
return admin.post("/api/v1/products", data=data).data["product"]
@pytest.fixture
def RHEL80Topic(admin, RHELProduct):
data = {
"name": "RHEL-8.0",
"product_id": RHELProduct["id"],
"component_types": ["Compose"],
"export_control": True,
}
return admin.post("/api/v1/topics", data=data).data["topic"]
@pytest.fixture
def RHEL80Component(admin, RHEL80Topic):
data = {
"topic_id": RHEL80Topic["id"],
"name": "RHEL-8.0.0-20190926.n.0",
"type": "Compose",
}
return admin.post("/api/v1/components", data=data).data["component"]
@pytest.fixture
def RHEL81Topic(admin, RHELProduct):
data = {
"name": "RHEL-8.1",
"product_id": RHELProduct["id"],
"component_types": ["Compose"],
"export_control": False,
}
return admin.post("/api/v1/topics", data=data).data["topic"]
@pytest.fixture
def RHEL81Component(admin, RHEL81Topic):
data = {
"topic_id": RHEL81Topic["id"],
"name": "RHEL-8.1.0-20190926.n.0",
"type": "Compose",
}
return admin.post("/api/v1/components", data=data).data["component"]
@pytest.fixture
def cki_test_file(user, job_user):
headers = {
"DCI-JOB-ID": job_user["id"],
"DCI-NAME": "cki-result",
"DCI-MIME": "application/junit",
"Content-Disposition": "attachment; filename=cki-results.xml",
"Content-Type": "application/junit",
}
data = open("tests/data/cki-results.xml").read()
r = user.post("/api/v1/files", headers=headers, data=data)
return r.data["file"]
|
|
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import importutils
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from neutron.openstack.common._i18n import _
from neutron.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
try:
handler = importutils.import_object(
"neutron.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
except ImportError:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(facility=facility)
else:
syslog = logging.handlers.SysLogHandler(facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import urllib
import xml.sax
import threading
import boto
from boto import handler
from boto.connection import AWSQueryConnection
from boto.sdb.domain import Domain, DomainMetaData
from boto.sdb.item import Item
from boto.exception import SDBResponseError
from boto.resultset import ResultSet
class ItemThread(threading.Thread):
def __init__(self, name, domain_name, item_names):
threading.Thread.__init__(self, name=name)
print 'starting %s with %d items' % (name, len(item_names))
self.domain_name = domain_name
self.conn = SDBConnection()
self.item_names = item_names
self.items = []
def run(self):
for item_name in self.item_names:
item = self.conn.get_attributes(self.domain_name, item_name)
self.items.append(item)
class SDBConnection(AWSQueryConnection):
APIVersion = '2007-11-07'
SignatureVersion = '2'
ResponseError = SDBResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='sdb.amazonaws.com', debug=0,
https_connection_factory=None):
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory)
self.box_usage = 0.0
def build_name_value_list(self, params, attributes, replace=False):
keys = attributes.keys()
keys.sort()
i = 1
for key in keys:
value = attributes[key]
if isinstance(value, list):
for v in value:
params['Attribute.%d.Name'%i] = key
params['Attribute.%d.Value'%i] = v
if replace:
params['Attribute.%d.Replace'%i] = 'true'
i += 1
else:
params['Attribute.%d.Name'%i] = key
params['Attribute.%d.Value'%i] = value
if replace:
params['Attribute.%d.Replace'%i] = 'true'
i += 1
def build_name_list(self, params, attribute_names):
i = 1
attribute_names.sort()
for name in attribute_names:
params['Attribute.%d.Name'%i] = name
i += 1
def get_usage(self):
"""
Returns the BoxUsage accumulated on this SDBConnection object.
@rtype: float
@return: The accumulated BoxUsage of all requests made on the connection.
"""
return self.box_usage
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on this connection.
"""
print 'Total Usage: %f compute seconds' % self.box_usage
cost = self.box_usage * 0.14
print 'Approximate Cost: $%f' % cost
def get_domain(self, domain_name, validate=True):
domain = Domain(self, domain_name)
if validate:
self.query(domain, '', max_items=1)
return domain
def lookup(self, domain_name, validate=True):
"""
Lookup an existing SimpleDB domain
@type domain_name: string
@param domain_name: The name of the new domain
@rtype: L{Domain<boto.sdb.domain.Domain>} object or None
@return: The Domain object or None if the domain does not exist.
"""
try:
domain = self.get_domain(domain_name, validate)
except:
domain = None
return domain
def get_all_domains(self, max_domains=None, next_token=None):
params = {}
if max_domains:
params['MaxNumberOfDomains'] = max_domains
if next_token:
params['NextToken'] = next_token
return self.get_list('ListDomains', params, [('DomainName', Domain)])
def create_domain(self, domain_name):
"""
Create a SimpleDB domain.
@type domain_name: string
@param domain_name: The name of the new domain
@rtype: L{Domain<boto.sdb.domain.Domain>} object
@return: The newly created domain
"""
params = {'DomainName':domain_name}
d = self.get_object('CreateDomain', params, Domain)
d.name = domain_name
return d
def get_domain_and_name(self, domain_or_name):
if (isinstance(domain_or_name, Domain)):
return (domain_or_name, domain_or_name.name)
else:
return (self.get_domain(domain_or_name), domain_or_name)
def delete_domain(self, domain_or_name):
"""
Delete a SimpleDB domain.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@rtype: bool
@return: True if successful
B{Note:} This will delete the domain and all items within the domain.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
return self.get_status('DeleteDomain', params)
def domain_metadata(self, domain_or_name):
"""
Get the Metadata for a SimpleDB domain.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@rtype: L{DomainMetaData<boto.sdb.domain.DomainMetaData>} object
@return: The newly created domain metadata object
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name}
d = self.get_object('DomainMetadata', params, DomainMetaData)
d.domain = domain
return d
def put_attributes(self, domain_or_name, item_name, attributes, replace=True):
"""
Store attributes for a given item in a domain.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@type item_name: string
@param item_name: The name of the item whose attributes are being stored.
@type attribute_names: dict or dict-like object
@param attribute_names: The name/value pairs to store as attributes
@type replace: bool
@param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
@rtype: bool
@return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
self.build_name_value_list(params, attributes, replace)
return self.get_status('PutAttributes', params)
def get_attributes(self, domain_or_name, item_name, attribute_names=None, item=None):
"""
Retrieve attributes for a given item in a domain.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@type item_name: string
@param item_name: The name of the item whose attributes are being retrieved.
@type attribute_names: string or list of strings
@param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
@rtype: L{Item<boto.sdb.item.Item>}
@return: An Item mapping type containing the requested attribute name/values
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName' : domain_name,
'ItemName' : item_name}
if attribute_names:
if not isinstance(attribute_names, list):
attribute_names = [attribute_names]
self.build_list_params(params, attribute_names, 'AttributeName')
response = self.make_request('GetAttributes', params)
body = response.read()
if response.status == 200:
if item == None:
item = Item(domain, item_name)
h = handler.XmlHandler(item, self)
xml.sax.parseString(body, h)
return item
else:
raise SDBResponseError(response.status, response.reason, body)
def delete_attributes(self, domain_or_name, item_name, attr_names=None):
"""
Delete attributes from a given item in a domain.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@type item_name: string
@param item_name: The name of the item whose attributes are being deleted.
@type attributes: dict, list or L{Item<boto.sdb.item.Item>}
@param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
@rtype: bool
@return: True if successful
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'ItemName' : item_name}
if attr_names:
if isinstance(attr_names, list):
self.build_name_list(params, attr_names)
elif isinstance(attr_names, dict) or isinstance(attr_names, Item):
self.build_name_value_list(params, attr_names)
return self.get_status('DeleteAttributes', params)
def query(self, domain_or_name, query='', max_items=None, next_token=None):
"""
Returns a list of item names within domain_name that match the query.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@type query: string
@param query: The SimpleDB query to be performed.
@type max_items: int
@param max_items: The maximum number of items to return. If not
supplied, the default is None which returns all
items matching the query.
@rtype: ResultSet
@return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'QueryExpression' : query}
if max_items:
params['MaxNumberOfItems'] = max_items
if next_token:
params['NextToken'] = next_token
return self.get_object('Query', params, ResultSet)
def query_with_attributes(self, domain_or_name, query='', attr_names=None,
max_items=None, next_token=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
@type domain_or_name: string or L{Domain<boto.sdb.domain.Domain>} object.
@param domain_or_name: Either the name of a domain or a Domain object
@type query: string
@param query: The SimpleDB query to be performed.
@type attr_names: list
@param attr_names: The name of the attributes to be returned.
If no attributes are specified, all attributes
will be returned.
@type max_items: int
@param max_items: The maximum number of items to return. If not
supplied, the default is None which returns all
items matching the query.
@rtype: ResultSet
@return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName':domain_name,
'QueryExpression' : query}
if max_items:
params['MaxNumberOfItems'] = max_items
if next_token:
params['NextToken'] = next_token
if attr_names:
self.build_list_params(params, attr_names, 'AttributeName')
return self.get_list('QueryWithAttributes', params, [('Item', Item)], parent=domain)
def threaded_query(self, domain_or_name, query='', max_items=None, next_token=None, num_threads=6):
"""
Returns a list of fully populated items that match the query provided.
The name/value pairs for all of the matching item names are retrieved in a number of separate
threads (specified by num_threads) to achieve maximum throughput.
The ResultSet that is returned has an attribute called next_token that can be used
to retrieve additional results for the same query.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
if max_items and num_threads > max_items:
num_threads = max_items
rs = self.query(domain_or_name, query, max_items, next_token)
threads = []
n = len(rs) / num_threads
for i in range(0, num_threads):
if i+1 == num_threads:
thread = ItemThread('Thread-%d' % i, domain_name, rs[n*i:])
else:
thread = ItemThread('Thread-%d' % i, domain_name, rs[n*i:n*(i+1)])
threads.append(thread)
thread.start()
del rs[0:]
for thread in threads:
thread.join()
for item in thread.items:
rs.append(item)
return rs
|
|
"""
Virtuoso kernel for Jupyter.
Inspired by https://github.com/takluyver/bash_kernel
"""
from ipykernel.kernelbase import Kernel
from IPython.display import HTML, Image
from ipykernel import (
get_connection_file, get_connection_info, connect_qtconsole
)
import signal
from .shell import VirtuosoShell, VirtuosoExceptions
from pexpect import EOF
import colorama
import re
import time
import os
import zmq
__version__ = '0.2'
class VirtuosoKernel(Kernel):
"""
Kernel to connect virtuoso to Jupyter front-end
"""
implementation = 'virtuoso_kernel'
implementation_version = __version__
language = 'SKILL'
@property
def language_version(self):
"""
Language version
"""
return self._shell.language_version
@property
def language_info(self):
"""
Language info
"""
return {'name': 'SKILL',
'version': self.language_version,
'mimetype': 'text/x-skill',
'file_extension': '.il',
'pygments_lexer': 'skill',
'codemirror_mode': 'skill'}
@property
def banner(self):
"""
Shell's banner
"""
return self._shell.banner
_err_header = HTML('<span style="color:red; font-family:monospace">'
'Traceback:</span>')
def __init__(self, **kwargs):
super(VirtuosoKernel, self).__init__(**kwargs)
self._start_virtuoso()
colorama.init()
self._plot_re = re.compile(r'[^\w]?\w*?[pP]lot\w*\(')
self._exit_re = re.compile(r'^exit\W*')
self._cell_magic_re = re.compile(r'^%+(\S+)')
self._plt_width = 8.0
self._plt_height = 5.0
self._plt_resolution = 96
self._plt_file_name = None
# Start a new window to handle plots
#self._shell.run_raw("__win_id__ = awvCreatePlotWindow()")
def _handle_interrupt(self, signum, frame):
"""
Interrupt handler for the kernel
"""
self._shell.interrupt()
#self._shell._shell.sendline("]")
self._shell.wait_ready()
def _start_virtuoso(self):
"""
Start the virtuoso shell
"""
# Signal handlers are inherited by forked processes, and we can't
# easily reset it from the subprocess. Since kernelapp ignores SIGINT
# except in message handlers, we need to temporarily reset the SIGINT
# handler here so that virtuoso and its children are interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
self._shell = VirtuosoShell()
finally:
signal.signal(signal.SIGINT, sig)
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
"""
Execute the *code* block sent by the front-end.
"""
if code.strip() == '':
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
shell = self._shell
output = None
interrupted = False
exec_error = None
# Check for cell magic and handle magic
_magic_match = self._cell_magic_re.search(code)
if(_magic_match is not None):
_exec_status, _exec_message = self._handle_magics(
_magic_match.group(1), code)
if _exec_status is True:
return {'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {}}
else:
return {'status': 'error',
'execution_count': self.execution_count,
'ename': str('CellMagicError'),
'evalue': str(1),
'traceback': _exec_message['traceback']}
# Handle plots separately to display inline.
# If there is a 'plot(...)' command in the code,
# ask the shell to save a .png hardcopy at the end
# and display the image inline.
_plot_match = self._plot_re.search(code)
# If there is a plot request, clear the plot window first.
#if(_plot_match is not None):
# self._shell.run_raw("clearAll()")
if self._exit_re.search(code) is not None:
execute_content = {'execution_count': self.execution_count,
'data': {'text/plain': "Do '<ctrl>D' to exit"},
'metadata': {}}
self.send_response(self.iopub_socket, 'execute_result',
execute_content)
return {'status': 'abort', 'execution_count': self.execution_count}
try:
output = shell.run_cell(code.rstrip())
except (zmq.ZMQError, KeyboardInterrupt):
self._handle_interrupt(signal.SIGINT, None)
interrupted = True
output = shell.output
except EOF:
output = shell.output + '\r\nRestarting Virtuoso'
self._start_virtuoso()
except VirtuosoExceptions as vexcp:
exec_error = vexcp.value
output = shell.output
#if(_plot_match is not None):
# # Ask the shell to save a hardcopy
# self._plt_file_name = '/tmp/jupyter_virtuoso_%s.png' % \
# str(time.time())
# _plt_cmd = ('saveGraphImage(?window __win_id__ ?fileName "%s" '
# '?width %f ?height %f ?units "inch" '
# '?resolution %d ?resolutionUnits "pixels/in" '
# '?saveEachSubwindowSeparately nil)') %\
# (self._plt_file_name, self._plt_width, self._plt_height,
# self._plt_resolution)
# self._shell.run_raw(_plt_cmd)
# if(os.path.isfile(self._plt_file_name)):
# # Display this image inline
# _image = Image(filename=self._plt_file_name)
# display_content = {'source': "kernel",
# 'data': {'image/png':
# _image.data.encode('base64')},
# 'metadata': {}}
# self.send_response(self.iopub_socket, 'display_data',
# display_content)
# # Delete the hardcopy
# os.remove(self._plt_file_name)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
if (not silent) and (output != ''):
execute_content = {'execution_count': self.execution_count,
'data': {'text/plain': output},
'metadata': {}}
self.send_response(self.iopub_socket, 'execute_result',
execute_content)
if exec_error is not None:
html_content = {'source': 'kernel', 'data': {'text/html':
self._err_header.data,
'text/plain':
(colorama.Fore.RED +
'Traceback:' +
colorama.Fore.RESET)
},
'metadata': {}}
self.send_response(self.iopub_socket, 'display_data', html_content)
# TODO: Need to get a proper traceback like in ultraTB
# tb_content = ["", 0, "", exec_error[2]]
tb_content = [exec_error[2]]
err_content = {'execution_count': self.execution_count,
'ename': str(exec_error[0]),
'evalue': str(exec_error[1]),
'traceback': tb_content}
self.send_response(self.iopub_socket, 'error', err_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': str(exec_error[0]),
'evalue': str(exec_error[1]),
'traceback': tb_content}
else:
return {'status': 'ok',
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {}}
def do_complete(self, code, cursor_pos):
code = code[:cursor_pos]
default = {'matches': [],
'cursor_start': 0,
'cursor_end': cursor_pos,
'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
_lines = code.splitlines(True)
if not _lines:
return default
_matches, _token = self._shell.get_matches(_lines[-1])
# when completing methods/attributes, _token is ''
_cstart = cursor_pos - len(_token)
if len(_matches) == 0:
return default
start = cursor_pos - len(_token)
return {'matches': _matches,
'cursor_start': _cstart,
'cursor_end': cursor_pos,
'metadata': dict(),
'status': 'ok'}
def _html_introspection(self, info, keyword):
import re
info = re.sub(r'(\?\w+)', r'<i>\1</i>', info, count=0)
info = re.sub(r'(%s)' % keyword, r'<b>\1</b>', info, count=0)
return HTML(info)
def do_inspect(self, code, cursor_pos, detail_level=0):
"""
Object introspection
"""
code = code[:cursor_pos]
default = {'status': 'ok',
'data': {},
'metadata': dict(),
'found': False}
if not code or code[-1] == ' ':
return default
_tokens = code.split()
if not _tokens:
return default
_token = _tokens[-1]
_info = self._shell.get_info(_token)
if len(_info) == 0:
return default
# 'text/html': HTML().data
# _html_info = self._html_introspection(_info, _token)
# _tt_info = self._pretty_introspection(_info, _token)
# return {'status': 'ok',
# 'data': {'text/html': _html_info.data,
# 'text/plain': _tt_info},
# 'metadata': dict(),
# 'found': True}
return {'status': 'ok',
'data': {'text/plain': _info},
'metadata': dict(),
'found': True}
def do_shutdown(self, restart):
"""
Shutdown the shell
"""
self._shell.shutdown(restart)
return {'restart': restart}
def _handle_magics(self, magic_code, code):
"""
Handle cell magics
"""
_exec_status = False
_content = None
err_content = None
if(magic_code == 'connect_info'):
try:
connection_file = get_connection_file()
_content = get_connection_info(unpack=False)
except Exception as e:
error("Could not get connection info: %r" % e)
return
if(magic_code == 'history'):
_args = re.search(r'^%(\S+)(?:\s*)(\d*)', code)
self._shell.run_raw('history(' + _args.group(2) + ')')
_content = self._shell.output[:-1]
if(magic_code == 'help'):
_args = re.search(r'^%(\S+)(?:\s*)(\S*)', code)
_content = self._shell.get_info(_args.group(2))
if(magic_code == 'image'):
_args = re.search(r'^%(\S+)(?:\s*)(\S+)', code)
if _args is not None:
return self._show_image_inline(_args.group(2))
if(magic_code == 'flush'):
_content = ''
if(_content is not None):
execute_content = {'execution_count': self.execution_count,
'data': {'text/plain': _content},
'metadata': {}}
self.send_response(self.iopub_socket, 'execute_result',
execute_content)
_exec_status = True
else:
err_content = {'execution_count': self.execution_count,
'ename': str('CellMagicError'),
'evalue': str(1),
'traceback': ['Invalid cell magic']}
self.send_response(self.iopub_socket, 'error', err_content)
return _exec_status, err_content
def _show_image_inline(self, filename):
_exec_status = False
err_content = None
if(os.path.isfile(filename)):
# Display this image inline
_image = Image(filename=filename)
display_content = {'source': "kernel",
'data': {'image/png':
_image.data.encode('base64')},
'metadata': {}}
self.send_response(self.iopub_socket, 'display_data',
display_content)
_exec_status = True
else:
err_content = {'execution_count': self.execution_count,
'ename': str('CellMagicError'),
'evalue': str(2),
'traceback': ['Image not found']}
self.send_response(self.iopub_socket, 'error', err_content)
return _exec_status, err_content
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from argparse import Namespace
from contextlib import contextmanager
from functools import wraps
import logging
import os
import shutil
import tempfile
import types
from testfixtures import LogCapture
from stevedore.extension import ExtensionManager, Extension
import haas
from ..haas_application import HaasApplication
from ..loader import Loader
from ..plugin_manager import PluginManager
from ..plugins.discoverer import Discoverer
from ..suite import TestSuite
from ..testing import unittest
from ..utils import cd
from .compat import mock
from . import builder
class MockLambda(object):
def __eq__(self, other):
if isinstance(other, types.FunctionType):
result_class = other(None, None, 1)
if isinstance(result_class, unittest.TestResult):
return True
return False
def __ne__(self, other):
return not (other == self)
def with_patched_test_runner(fn):
@wraps(fn)
def wrapper(*args):
with mock.patch('haas.haas_application.ResultCollector') as result_cls:
with mock.patch(
'haas.plugins.runner.BaseTestRunner') as runner_class:
environment_manager = ExtensionManager.make_test_instance(
[], namespace=PluginManager.ENVIRONMENT_HOOK,
)
result_handler = Extension(
'default', None, result_cls, None)
env_managers = [
(PluginManager.ENVIRONMENT_HOOK, environment_manager),
(
PluginManager.RESULT_HANDLERS,
ExtensionManager.make_test_instance(
[result_handler],
namespace=PluginManager.RESULT_HANDLERS),
),
]
runner = Extension('default', None, runner_class, None)
discoverer = Extension('default', None, Discoverer, None)
driver_managers = [
(
PluginManager.TEST_DISCOVERY,
ExtensionManager.make_test_instance(
[discoverer],
namespace=PluginManager.TEST_DISCOVERY),
),
(
PluginManager.TEST_RUNNER,
ExtensionManager.make_test_instance(
[runner], namespace=PluginManager.TEST_RUNNER),
),
]
plugin_manager = PluginManager.testing_plugin_manager(
hook_managers=env_managers,
driver_managers=driver_managers)
args_ = args + (runner_class, result_cls, plugin_manager,)
return fn(*args_)
return wrapper
class TestHaasApplication(unittest.TestCase):
def _run_with_arguments(self, runner_class, result_class, *args, **kwargs):
plugin_manager = kwargs.get('plugin_manager')
runner = mock.Mock()
runner_class.from_args.return_value = runner
result = mock.Mock()
result.wasSuccessful = mock.Mock()
result_class.return_value = result
run_method = mock.Mock(return_value=result)
runner.run = run_method
app = HaasApplication(['argv0'] + list(args))
app.run(plugin_manager=plugin_manager)
return run_method, result
@contextmanager
def _basic_test_fixture(self):
package_name = 'first'
module = builder.Module(
'test_something.py',
(
builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
),
),
)
fixture = builder.Directory(
'top',
(
builder.Package(package_name, (module,)),
),
)
tempdir = tempfile.mkdtemp(prefix='haas-tests-')
try:
fixture.create(tempdir)
top_level = os.path.join(tempdir, fixture.name)
with cd(top_level):
yield package_name
finally:
shutil.rmtree(tempdir)
@with_patched_test_runner
def test_main_default_arguments(self, runner_class, result_class,
plugin_manager):
# When
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
self.assertEqual(runner_class.from_args.call_count, 1)
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(dest, 'runner_')
self.assertEqual(ns.verbosity, 1)
self.assertFalse(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@with_patched_test_runner
def test_main_quiet(self, runner_class, result_class, plugin_manager):
# When
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '-q',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 0)
self.assertFalse(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@mock.patch('sys.stdout')
@mock.patch('sys.stderr')
@mock.patch('haas.plugins.runner.BaseTestRunner')
def test_main_quiet_and_verbose_not_allowed(self,
runner_class, stdout, stderr):
with self.assertRaises(SystemExit):
self._run_with_arguments(runner_class, mock.Mock(), '-q', '-v')
@with_patched_test_runner
def test_main_verbose(self, runner_class, result_class, plugin_manager):
# When
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '-v',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 2)
self.assertFalse(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@with_patched_test_runner
def test_main_failfast(self, runner_class, result_class, plugin_manager):
# When
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '-f',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 1)
self.assertTrue(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@with_patched_test_runner
def test_main_buffer(self, runner_class, result_class, plugin_manager):
# When
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '-b',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 1)
self.assertFalse(ns.failfast)
self.assertTrue(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@mock.patch('logging.getLogger')
@with_patched_test_runner
def test_with_logging(self, get_logger, runner_class, result_class,
plugin_manager):
# Given
logger = mock.Mock()
get_logger.side_effect = lambda name: logger
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '--log-level', 'debug',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
get_logger.assert_called_once_with(haas.__name__)
logger.setLevel.assert_called_once_with(logging.DEBUG)
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 1)
self.assertFalse(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@mock.patch('logging.getLogger')
@with_patched_test_runner
def test_with_logging_uppercase_loglevel(self, get_logger,
runner_class, result_class,
plugin_manager):
# Given
logger = mock.Mock()
get_logger.side_effect = lambda name: logger
with self._basic_test_fixture() as package_name:
run, result = self._run_with_arguments(
runner_class, result_class, '--log-level', 'WARNING',
plugin_manager=plugin_manager)
suite = Discoverer(Loader()).discover(package_name)
# Then
get_logger.assert_called_once_with(haas.__name__)
logger.setLevel.assert_called_once_with(logging.WARNING)
args = runner_class.from_args.call_args
args, kwargs = args
ns, dest = args
self.assertIsInstance(ns, Namespace)
self.assertEqual(ns.verbosity, 1)
self.assertFalse(ns.failfast)
self.assertFalse(ns.buffer)
run.assert_called_once_with(result, suite)
result.wasSuccessful.assert_called_once_with()
@mock.patch('sys.stdout')
@mock.patch('sys.stderr')
@mock.patch('coverage.coverage')
@mock.patch('haas.plugins.runner.BaseTestRunner')
def test_with_coverage_plugin(self, runner_class, coverage,
stdout, stderr):
# When
run, result = self._run_with_arguments(
runner_class, mock.Mock(), '--with-coverage')
# Then
coverage.assert_called_once_with()
def test_failfast(self):
def test_should_cause_early_stop(self1):
self1.fail()
def test_cause_failure(self1):
print('Did I fail?')
self.fail('Failfast test did not abort test run')
cls_dict = {
'test_should_cause_early_stop': test_should_cause_early_stop,
'test_cause_failure': test_cause_failure,
}
test_cls = type(str('TestFailfast'), (unittest.TestCase,), cls_dict)
suite = TestSuite(
[
TestSuite(
[
test_cls('test_should_cause_early_stop'),
test_cls('test_cause_failure'),
],
),
TestSuite(
[
test_cls('test_cause_failure'),
],
),
],
)
self.assertEqual(suite.countTestCases(), 3)
result = unittest.TestResult()
result.failfast = True
suite.run(result)
self.assertEqual(result.testsRun, 1)
@with_patched_test_runner
def test_multiple_start_directories(self, runner_class, result_class,
plugin_manager):
# Given
module = builder.Module(
'test_something.py',
(
builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
),
),
)
fixture = builder.Directory(
'top',
(
builder.Package('first', (module,)),
builder.Package('second', (module,)),
),
)
tempdir = tempfile.mkdtemp(prefix='haas-tests-')
try:
fixture.create(tempdir)
top_level = os.path.join(tempdir, fixture.name)
# When
with cd(top_level):
run, result = self._run_with_arguments(
runner_class, result_class, '-t', top_level, 'first',
'second', plugin_manager=plugin_manager,
)
loader = Loader()
suite1 = Discoverer(loader).discover('first', top_level)
suite2 = Discoverer(loader).discover('second', top_level)
suite = loader.create_suite((suite1, suite2))
# Then
run.assert_called_once_with(result, suite)
finally:
shutil.rmtree(tempdir)
@with_patched_test_runner
def test_multiple_start_directories_non_package(self, runner_class,
result_class,
plugin_manager):
# Given
module = builder.Module(
'test_something.py',
(
builder.Class(
'TestSomething',
(
builder.Method('test_method'),
),
),
),
)
fixture = builder.Directory(
'top',
(
builder.Package('first', (module,)),
builder.Directory('second', (module,)),
),
)
tempdir = tempfile.mkdtemp(prefix='haas-tests-')
try:
fixture.create(tempdir)
top_level = os.path.join(tempdir, fixture.name)
# When/Then
with cd(top_level):
with self.assertRaises(ImportError):
run, result = self._run_with_arguments(
runner_class, result_class, '-t', top_level, 'first',
'second', plugin_manager=plugin_manager)
finally:
shutil.rmtree(tempdir)
@with_patched_test_runner
def test_logging_propagate(self, runner_class, result_class,
plugin_manager):
# Given
logger = haas.logger
message = 'test log message'
# When
with LogCapture() as root_logging:
with LogCapture(haas.__name__) as haas_logging:
logger.info(message)
# Then
root_logging.check()
haas_logging.check(
(haas.__name__, logging.getLevelName(logging.INFO), message))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops as core_gen_dataset_ops
# TODO(rohanj): Add a python class that constructs resource in the __init__
# method and provides a get_next() that calls the prefetch op.
def function_buffering_resource(string_arg,
target_device,
f,
buffer_size,
container="",
shared_name=None,
name=None):
if shared_name is None:
shared_name = ""
return gen_dataset_ops.function_buffering_resource(
string_arg=string_arg,
target_device=target_device,
shared_name=shared_name,
f=f,
buffer_size=buffer_size,
container=container,
name=name)
def function_buffering_resource_get_next(function_buffer_resource,
output_types,
name=None):
return gen_dataset_ops.function_buffering_resource_get_next(
function_buffer_resource=function_buffer_resource,
output_types=output_types,
name=name)
def function_buffering_resource_reset(function_buffer_resource, name=None):
return gen_dataset_ops.function_buffering_resource_reset(
function_buffer_resource=function_buffer_resource, name=name)
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
Args:
input_dataset: The input dataset
one_shot: If true, we make a one shot iterator that's already initialized.
device: A fully specified device string where we want to prefetch to
buffer_size: Size of the prefetching buffer.
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An Iterator type object.
"""
def __init__(self,
input_dataset,
one_shot,
device,
buffer_size,
shared_name=None):
self._input_dataset = input_dataset
self._get_next_call_count = 0
self._one_shot = one_shot
if shared_name is None:
shared_name = ""
if self._one_shot:
self._input_iterator = input_dataset.make_one_shot_iterator()
else:
self._input_iterator = iterator_ops.Iterator.from_structure(
self._input_dataset.output_types, self._input_dataset.output_shapes,
shared_name, self._input_dataset.output_classes)
input_iterator_handle = self._input_iterator.string_handle()
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
"""Prefetches one element from `input_iterator`."""
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, self._input_iterator.output_types,
self._input_iterator.output_shapes,
self._input_iterator.output_classes)
ret = remote_iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
iterator_device = gen_dataset_ops.iterator_get_device(
self._input_iterator._iterator_resource)
with ops.device(device):
self._buffering_resource = function_buffering_resource(
f=_prefetch_fn,
target_device=iterator_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size,
shared_name=shared_name)
if not self._one_shot:
reset_op = function_buffering_resource_reset(self._buffering_resource)
with ops.control_dependencies([reset_op]):
self._initializer = self._input_iterator.make_initializer(
self._input_dataset)
def get_next(self, name=None):
"""See @{tf.data.Iterator.get_next}."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
flat_ret = gen_dataset_ops.function_buffering_resource_get_next(
self._buffering_resource,
output_types=nest.flatten(sparse.as_dense_types(
self.output_types, self.output_classes)), name=name)
ret = sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self.output_types, flat_ret),
self.output_types, self.output_shapes, self.output_classes)
for tensor, shape in zip(
nest.flatten(ret), nest.flatten(self.output_shapes)):
if isinstance(tensor, ops.Tensor):
tensor.set_shape(shape)
return ret
@property
def initializer(self):
if self._one_shot:
raise NotImplementedError("Can't initialize a one_shot_iterator")
return self._initializer
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class _PrefetchToDeviceEagerIterator(iterator_ops.EagerIterator):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
Args:
input_dataset: The input dataset
one_shot: If true, we make a one shot iterator that's already initialized.
device: A fully specified device string where we want to prefetch to
buffer_size: Size of the prefetching buffer.
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An Iterator type object.
"""
def __init__(self,
input_dataset,
device,
buffer_size):
with ops.device("/device:CPU:0"):
super(_PrefetchToDeviceEagerIterator, self).__init__(input_dataset)
input_iterator_handle = core_gen_dataset_ops.iterator_to_string_handle(
self._resource)
self._device = device
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
"""Prefetches one element from `input_iterator`."""
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, self.output_types, self.output_shapes, self.output_classes)
ret = remote_iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
_prefetch_fn.add_to_graph(None)
with ops.device(device):
self._buffering_resource = function_buffering_resource(
f=_prefetch_fn,
target_device=gen_dataset_ops.iterator_get_device(self._resource),
string_arg=input_iterator_handle,
buffer_size=buffer_size,
shared_name=iterator_ops._generate_shared_name(
"function_buffer_resource"))
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
with ops.device(self._device):
ret = gen_dataset_ops.function_buffering_resource_get_next(
function_buffer_resource=self._buffering_resource,
output_types=self._flat_output_types)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
# pylint: enable=protected-access
class _PrefetchToDeviceDataset(dataset_ops.Dataset):
"""A `Dataset` whose iterator prefetches elements to another device."""
def __init__(self, input_dataset, device, buffer_size):
self._input_dataset = input_dataset
self._device = device
self._buffer_size = buffer_size if buffer_size is not None else 1
# The static analysis cannot tell that the eager iterator's superclass has
# a `next()` method.
# pylint: disable=non-iterator-returned
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
return _PrefetchToDeviceEagerIterator(self._input_dataset, self._device,
self._buffer_size)
else:
raise RuntimeError("dataset.__iter__() is only supported when eager "
"execution is enabled.")
# pylint: enable=non-iterator-returned
def make_one_shot_iterator(self):
if context.executing_eagerly():
return _PrefetchToDeviceEagerIterator(self._input_dataset, self._device,
self._buffer_size)
else:
return _PrefetchToDeviceIterator(self._input_dataset, one_shot=True,
device=self._device,
buffer_size=self._buffer_size)
def make_initializable_iterator(self, shared_name=None):
return _PrefetchToDeviceIterator(
self._input_dataset,
one_shot=False,
device=self._device,
buffer_size=self._buffer_size,
shared_name=shared_name)
def _as_variant_tensor(self):
# TODO(mrry): Raise this error earlier (e.g. when one of the Dataset
# transformation methods is called.
# TODO(mrry): Investigate support for chaining further transformations after
# the prefetch, including GPU support.
raise NotImplementedError("`prefetch_to_device()` must be the last "
"transformation in a dataset pipeline.")
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_classes(self):
return self._input_dataset.output_classes
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a @{tf.data.Dataset}, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, device, buffer_size)
return _apply_fn
|
|
"""
Simulation with Analytic FDEM Solutions
=======================================
Here, the module *SimPEG.electromagnetics.analytics.FDEM* is used to simulate
harmonic electric and magnetic field for both electric and magnetic dipole
sources in a wholespace.
"""
#########################################################################
# Import modules
# --------------
#
import numpy as np
from SimPEG import utils
from SimPEG.electromagnetics.analytics.FDEM import (
ElectricDipoleWholeSpace,
MagneticDipoleWholeSpace,
)
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#####################################################################
# Magnetic Fields for a Magnetic Dipole Source
# --------------------------------------------
#
# Here, we compute the magnetic fields for a harmonic magnetic dipole
# source in the z direction. Based on the geometry of the problem, we
# expect magnetic fields in the x and z directions, but none in the y
# direction.
#
# Defining electric dipole location and frequency
source_location = np.r_[0, 0, 0]
frequency = 1e3
# Defining observation locations (avoid placing observation at source)
x = np.arange(-100.5, 100.5, step=1.0)
y = np.r_[0]
z = x
observation_locations = utils.ndgrid(x, y, z)
# Define wholespace conductivity
sig = 1e-2
# Compute the fields
Hx, Hy, Hz = MagneticDipoleWholeSpace(
observation_locations,
source_location,
sig,
frequency,
moment="Z",
fieldType="h",
mu_r=1,
eps_r=1,
)
# Plot
fig = plt.figure(figsize=(14, 5))
hxplt = Hx.reshape(x.size, z.size)
hzplt = Hz.reshape(x.size, z.size)
ax1 = fig.add_subplot(121)
absH = np.sqrt(Hx.real ** 2 + Hy.real ** 2 + Hz.real ** 2)
pc1 = ax1.pcolor(x, z, absH.reshape(x.size, z.size), norm=LogNorm())
ax1.streamplot(x, z, hxplt.real, hzplt.real, color="k", density=1)
ax1.set_xlim([x.min(), x.max()])
ax1.set_ylim([z.min(), z.max()])
ax1.set_title("Real Component")
ax1.set_xlabel("x")
ax1.set_ylabel("z")
cb1 = plt.colorbar(pc1, ax=ax1)
cb1.set_label("Re[H] (A/m)")
ax2 = fig.add_subplot(122)
absH = np.sqrt(Hx.imag ** 2 + Hy.imag ** 2 + Hz.imag ** 2)
pc2 = ax2.pcolor(x, z, absH.reshape(x.size, z.size), norm=LogNorm())
ax2.streamplot(x, z, hxplt.imag, hzplt.imag, color="k", density=1)
ax2.set_xlim([x.min(), x.max()])
ax2.set_ylim([z.min(), z.max()])
ax2.set_title("Imaginary Component")
ax2.set_xlabel("x")
ax2.set_ylabel("z")
cb2 = plt.colorbar(pc2, ax=ax2)
cb2.set_label("Im[H] (A/m)")
#####################################################################
# Electric Fields for a Magnetic Dipole Source
# --------------------------------------------
#
# Here, we compute the electric fields for a harmonic magnetic dipole
# source in the y direction. Based on the geometry of the problem, we
# expect rotational electric fields in the x and z directions, but none in the y
# direction.
#
# Defining electric dipole location and frequency
source_location = np.r_[0, 0, 0]
frequency = 1e3
# Defining observation locations (avoid placing observation at source)
x = np.arange(-100.5, 100.5, step=1.0)
y = np.r_[0]
z = x
observation_locations = utils.ndgrid(x, y, z)
# Define wholespace conductivity
sig = 1e-2
# Predict the fields
Ex, Ey, Ez = MagneticDipoleWholeSpace(
observation_locations,
source_location,
sig,
frequency,
moment="Y",
fieldType="e",
mu_r=1,
eps_r=1,
)
# Plot
fig = plt.figure(figsize=(14, 5))
explt = Ex.reshape(x.size, z.size)
ezplt = Ez.reshape(x.size, z.size)
ax1 = fig.add_subplot(121)
absE = np.sqrt(Ex.real ** 2 + Ey.real ** 2 + Ez.real ** 2)
pc1 = ax1.pcolor(x, z, absE.reshape(x.size, z.size), norm=LogNorm())
ax1.streamplot(x, z, explt.real, ezplt.real, color="k", density=1)
ax1.set_xlim([x.min(), x.max()])
ax1.set_ylim([z.min(), z.max()])
ax1.set_title("Real Component")
ax1.set_xlabel("x")
ax1.set_ylabel("z")
cb1 = plt.colorbar(pc1, ax=ax1)
cb1.set_label("Re[E] (V/m)")
ax2 = fig.add_subplot(122)
absE = np.sqrt(Ex.imag ** 2 + Ey.imag ** 2 + Ez.imag ** 2)
pc2 = ax2.pcolor(x, z, absE.reshape(x.size, z.size), norm=LogNorm())
ax2.streamplot(x, z, explt.imag, ezplt.imag, color="k", density=1)
ax2.set_xlim([x.min(), x.max()])
ax2.set_ylim([z.min(), z.max()])
ax2.set_title("Imaginary Component")
ax2.set_xlabel("x")
ax2.set_ylabel("z")
cb2 = plt.colorbar(pc2, ax=ax2)
cb2.set_label("Im[E] (V/m)")
#####################################################################
# Electric Field from a Harmonic Electric Current Dipole Source
# -------------------------------------------------------------
#
# Here, we compute the electric fields for a harmonic electric current dipole
# source in the z direction. Based on the geometry of the problem, we
# expect electric fields in the x and z directions, but none in the y
# direction.
#
# Defining electric dipole location and frequency
source_location = np.r_[0, 0, 0]
frequency = 1e3
# Defining observation locations (avoid placing observation at source)
x = np.arange(-100.5, 100.5, step=1.0)
y = np.r_[0]
z = x
observation_locations = utils.ndgrid(x, y, z)
# Define wholespace conductivity
sig = 1e-2
# Predict the fields
Ex, Ey, Ez = ElectricDipoleWholeSpace(
observation_locations,
source_location,
sig,
frequency,
moment=[0, 0, 1],
fieldType="e",
mu_r=1,
eps_r=1,
)
# Plot
fig = plt.figure(figsize=(14, 5))
explt = Ex.reshape(x.size, z.size)
ezplt = Ez.reshape(x.size, z.size)
ax1 = fig.add_subplot(121)
absE = np.sqrt(Ex.real ** 2 + Ey.real ** 2 + Ez.real ** 2)
pc1 = ax1.pcolor(x, z, absE.reshape(x.size, z.size), norm=LogNorm())
ax1.streamplot(x, z, explt.real, ezplt.real, color="k", density=1)
ax1.set_xlim([x.min(), x.max()])
ax1.set_ylim([z.min(), z.max()])
ax1.set_title("Real Component")
ax1.set_xlabel("x")
ax1.set_ylabel("z")
cb1 = plt.colorbar(pc1, ax=ax1)
cb1.set_label("Re[E] (V/m)")
ax2 = fig.add_subplot(122)
absE = np.sqrt(Ex.imag ** 2 + Ey.imag ** 2 + Ez.imag ** 2)
pc2 = ax2.pcolor(x, z, absE.reshape(x.size, z.size), norm=LogNorm())
ax2.streamplot(x, z, explt.imag, ezplt.imag, color="k", density=1)
ax2.set_xlim([x.min(), x.max()])
ax2.set_ylim([z.min(), z.max()])
ax2.set_title("Imaginary Component")
ax2.set_xlabel("x")
ax2.set_ylabel("z")
cb2 = plt.colorbar(pc2, ax=ax2)
cb2.set_label("Im[E] (V/m)")
#####################################################################
# Magnetic Field from a Harmonic Electric Dipole Source
# -----------------------------------------------------
#
# Here, we compute the magnetic fields for a harmonic electric current dipole
# source in the y direction. Based on the geometry of the problem, we
# expect rotational magnetic fields in the x and z directions, but no fields
# in the y direction.
#
# Defining electric dipole location and frequency
source_location = np.r_[0, 0, 0]
frequency = 1e3
# Defining observation locations (avoid placing observation at source)
x = np.arange(-100.5, 100.5, step=1.0)
y = np.r_[0]
z = x
observation_locations = utils.ndgrid(x, y, z)
# Define wholespace conductivity
sig = 1e-2
# Predict the fields
Hx, Hy, Hz = ElectricDipoleWholeSpace(
observation_locations,
source_location,
sig,
frequency,
moment=[0, 1, 0],
fieldType="h",
mu_r=1,
eps_r=1,
)
# Plot
fig = plt.figure(figsize=(14, 5))
hxplt = Hx.reshape(x.size, z.size)
hzplt = Hz.reshape(x.size, z.size)
ax1 = fig.add_subplot(121)
absH = np.sqrt(Hx.real ** 2 + Hy.real ** 2 + Hz.real ** 2)
pc1 = ax1.pcolor(x, z, absH.reshape(x.size, z.size), norm=LogNorm())
ax1.streamplot(x, z, hxplt.real, hzplt.real, color="k", density=1)
ax1.set_xlim([x.min(), x.max()])
ax1.set_ylim([z.min(), z.max()])
ax1.set_title("Real Component")
ax1.set_xlabel("x")
ax1.set_ylabel("z")
cb1 = plt.colorbar(pc1, ax=ax1)
cb1.set_label("Re[H] (A/m)")
ax2 = fig.add_subplot(122)
absH = np.sqrt(Hx.imag ** 2 + Hy.imag ** 2 + Hz.imag ** 2)
pc2 = ax2.pcolor(x, z, absH.reshape(x.size, z.size), norm=LogNorm())
ax2.streamplot(x, z, hxplt.imag, hzplt.imag, color="k", density=1)
ax2.set_xlim([x.min(), x.max()])
ax2.set_ylim([z.min(), z.max()])
ax2.set_title("Imaginary Component")
ax2.set_xlabel("x")
ax2.set_ylabel("z")
cb2 = plt.colorbar(pc2, ax=ax2)
cb2.set_label("Im[H] (A/m)")
|
|
# encoding: utf-8
"""
Tests of the AnalogSignal class
"""
from __future__ import division
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.core.analogsignal import AnalogSignal
import numpy
import quantities as pq
import pickle
from neo.test.tools import assert_arrays_almost_equal, assert_arrays_equal
import os
V = pq.V
mV = pq.mV
uV = pq.uV
Hz = pq.Hz
kHz = pq.kHz
ms = pq.ms
nA = pq.nA
pA = pq.pA
class TestConstructor(unittest.TestCase):
def test__create_from_list(self):
data = range(10)
rate = 1000*Hz
a = AnalogSignal(data, sampling_rate=rate, units="mV")
self.assertEqual(a.t_start, 0*ms)
self.assertEqual(a.t_stop, len(data)/rate)
self.assertEqual(a[9], 9000*uV)
def test__create_from_numpy_array(self):
data = numpy.arange(10.0)
rate = 1*kHz
a = AnalogSignal(data, sampling_rate=rate, units="uV")
self.assertEqual(a.t_start, 0*ms)
self.assertEqual(a.t_stop, data.size/rate)
self.assertEqual(a[9], 0.009*mV)
def test__create_from_quantities_array(self):
data = numpy.arange(10.0) * mV
rate = 5000*Hz
a = AnalogSignal(data, sampling_rate=rate)
self.assertEqual(a.t_start, 0*ms)
self.assertEqual(a.t_stop, data.size/rate)
self.assertEqual(a[9], 0.009*V)
def test__create_from_quantities_array_with_inconsistent_units_should_raise_ValueError(self):
data = numpy.arange(10.0) * mV
self.assertRaises(ValueError, AnalogSignal, data, sampling_rate=1*kHz, units="nA")
def test__create_with_copy_true_should_return_copy(self):
data = numpy.arange(10.0) * mV
rate = 5000*Hz
a = AnalogSignal(data, copy=True, sampling_rate=rate)
data[3] = 99*mV
self.assertNotEqual(a[3], 99*mV)
def test__create_with_copy_false_should_return_view(self):
data = numpy.arange(10.0) * mV
rate = 5000*Hz
a = AnalogSignal(data, copy=False, sampling_rate=rate)
data[3] = 99*mV
self.assertEqual(a[3], 99*mV)
def test__create_with_additional_argument(self):
a = AnalogSignal([1,2,3], units="mV", sampling_rate=1*kHz, file_origin='crack.txt', ratname='Nicolas')
self.assertEqual(a.annotations, {'ratname':'Nicolas'})
# This one is universally recommended and handled by BaseNeo
self.assertEqual(a.file_origin, 'crack.txt')
# signal must be 1D - should raise Exception if not 1D
class TestProperties(unittest.TestCase):
def setUp(self):
self.t_start = [0.0*ms, 100*ms, -200*ms]
self.rates = [1*kHz, 420*Hz, 999*Hz]
self.data = [numpy.arange(10.0)*nA, numpy.arange(-100.0, 100.0, 10.0)*mV,
numpy.random.uniform(size=100)*uV]
self.signals = [AnalogSignal(D, sampling_rate=r, t_start=t)
for r,D,t in zip(self.rates, self.data, self.t_start)]
def test__t_stop(self):
for i in range(3):
self.assertEqual(self.signals[i].t_stop,
self.t_start[i] + self.data[i].size/self.rates[i])
def test__duration(self):
for signal in self.signals:
self.assertAlmostEqual(signal.duration,
signal.t_stop - signal.t_start,
delta=1e-15)
def test__sampling_period(self):
for signal, rate in zip(self.signals, self.rates):
self.assertEqual(signal.sampling_period, 1/rate)
def test__times(self):
for i in range(3):
assert_arrays_almost_equal(self.signals[i].times,
numpy.arange(self.data[i].size)/self.rates[i] + self.t_start[i],
1e-12*ms)
class TestArrayMethods(unittest.TestCase):
def setUp(self):
self.signal = AnalogSignal(numpy.arange(10.0), units="nA", sampling_rate=1*kHz)
def test__slice_should_return_AnalogSignal(self):
sub = self.signal[3:8]
self.assertIsInstance(sub, AnalogSignal)
self.assertEqual(sub.size, 5)
self.assertEqual(sub.sampling_period, self.signal.sampling_period)
self.assertEqual(sub.sampling_rate, self.signal.sampling_rate)
self.assertEqual(sub.t_start,
self.signal.t_start+3*sub.sampling_period)
self.assertEqual(sub.t_stop,
sub.t_start + 5*sub.sampling_period)
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(sub.file_origin, self.signal.file_origin)
self.assertEqual(sub.name, self.signal.name)
self.assertEqual(sub.description, self.signal.description)
self.assertEqual(sub.annotations, self.signal.annotations)
sub = self.signal[3:8]
self.assertEqual(sub.file_origin, self.signal.file_origin)
self.assertEqual(sub.name, self.signal.name)
self.assertEqual(sub.description, self.signal.description)
self.assertEqual(sub.annotations, self.signal.annotations)
def test__slice_with_attributes(self):
# Set attributes, slice, test that they are copied
self.signal.file_origin = 'crack.txt'
self.signal.name = 'sig'
self.signal.description = 'a signal'
self.signal.annotate(ratname='Georges')
# slice
sub = self.signal[3:8]
# tests from other slice test
self.assertIsInstance(sub, AnalogSignal)
self.assertEqual(sub.size, 5)
self.assertEqual(sub.sampling_period, self.signal.sampling_period)
self.assertEqual(sub.sampling_rate, self.signal.sampling_rate)
self.assertEqual(sub.t_start,
self.signal.t_start+3*sub.sampling_period)
self.assertEqual(sub.t_stop,
sub.t_start + 5*sub.sampling_period)
# Test other attributes were copied over (in this case, set by user)
self.assertEqual(sub.file_origin, self.signal.file_origin)
self.assertEqual(sub.name, self.signal.name)
self.assertEqual(sub.description, self.signal.description)
self.assertEqual(sub.annotations, self.signal.annotations)
self.assertEqual(sub.annotations, {'ratname': 'Georges'})
def test__getitem_should_return_single_quantity(self):
self.assertEqual(self.signal[0], 0*nA)
self.assertEqual(self.signal[9], 9*nA)
self.assertRaises(IndexError, self.signal.__getitem__, 10)
def test_comparison_operators(self):
assert_arrays_equal(self.signal >= 5*nA,
numpy.array([False, False, False, False, False, True, True, True, True, True]))
assert_arrays_equal(self.signal >= 5*pA,
numpy.array([False, True, True, True, True, True, True, True, True, True]))
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal.__gt__, 5*mV)
def test_simple_statistics(self):
self.assertEqual(self.signal.max(), 9*nA)
self.assertEqual(self.signal.min(), 0*nA)
self.assertEqual(self.signal.mean(), 4.5*nA)
class TestEquality(unittest.TestCase):
def test__signals_with_different_data_complement_should_be_non_equal(self):
signal1 = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz)
signal2 = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=2*kHz)
self.assertNotEqual(signal1, signal2)
class TestCombination(unittest.TestCase):
def test__adding_a_constant_to_a_signal_should_preserve_data_complement(self):
signal = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz, name="foo")
signal_with_offset = signal + 65*mV
self.assertEqual(signal[9], 9*mV)
self.assertEqual(signal_with_offset[9], 74*mV)
for attr in "t_start", "sampling_rate":
self.assertEqual(getattr(signal, attr),
getattr(signal_with_offset, attr))
def test__adding_two_consistent_signals_should_preserve_data_complement(self):
signal1 = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz)
signal2 = AnalogSignal(numpy.arange(10.0, 20.0), units="mV", sampling_rate=1*kHz)
sum = signal1 + signal2
assert_arrays_equal(sum, AnalogSignal(numpy.arange(10.0, 30.0, 2.0), units="mV", sampling_rate=1*kHz))
def test__adding_signals_with_inconsistent_data_complement_should_raise_Exception(self):
signal1 = AnalogSignal(numpy.arange(10.0), units="mV", t_start=0.0*ms, sampling_rate=1*kHz)
signal2 = AnalogSignal(numpy.arange(10.0), units="mV", t_start=100.0*ms, sampling_rate=0.5*kHz)
self.assertRaises(Exception, signal1.__add__, signal2)
def test__subtracting_a_constant_from_a_signal_should_preserve_data_complement(self):
signal = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz, name="foo")
signal_with_offset = signal - 65*mV
self.assertEqual(signal[9], 9*mV)
self.assertEqual(signal_with_offset[9], -56*mV)
for attr in "t_start", "sampling_rate":
self.assertEqual(getattr(signal, attr),
getattr(signal_with_offset, attr))
def test__subtracting_a_signal_from_a_constant_should_return_a_signal(self):
signal = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz, name="foo")
signal_with_offset = 10*mV - signal
self.assertEqual(signal[9], 9*mV)
self.assertEqual(signal_with_offset[9], 1*mV)
for attr in "t_start", "sampling_rate":
self.assertEqual(getattr(signal, attr),
getattr(signal_with_offset, attr))
def test__multiplying_a_signal_by_a_constant_should_preserve_data_complement(self):
signal = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz, name="foo")
amplified_signal = signal * 2
self.assertEqual(signal[9], 9*mV)
self.assertEqual(amplified_signal[9], 18*mV)
for attr in "t_start", "sampling_rate":
self.assertEqual(getattr(signal, attr),
getattr(amplified_signal, attr))
def test__dividing_a_signal_by_a_constant_should_preserve_data_complement(self):
signal = AnalogSignal(numpy.arange(10.0), units="mV", sampling_rate=1*kHz, name="foo")
amplified_signal = signal/0.5
self.assertEqual(signal[9], 9*mV)
self.assertEqual(amplified_signal[9], 18*mV)
for attr in "t_start", "sampling_rate":
self.assertEqual(getattr(signal, attr),
getattr(amplified_signal, attr))
class TestFunctions(unittest.TestCase):
def test__pickle(self):
a = AnalogSignal([1,2,3,4],sampling_period=1*pq.ms,units=pq.S)
a.annotations['index'] = 2
f = open('./pickle','wb')
pickle.dump(a,f)
f.close()
f = open('./pickle','rb')
try:
b = pickle.load(f)
except ValueError:
b = None
assert_arrays_equal(a, b)
f.close()
os.remove('./pickle')
if __name__ == "__main__":
unittest.main()
|
|
# stdlib
from typing import List
# third party
import pytest
# syft absolute
import syft as sy
from syft.experimental_flags import flags
np = pytest.importorskip("numpy")
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_remote_numpy_array(
arrow_backend: str, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
# syft absolute
from syft.lib.numpy.array import SUPPORTED_BOOL_TYPES
from syft.lib.numpy.array import SUPPORTED_DTYPES
from syft.lib.numpy.array import SUPPORTED_FLOAT_TYPES
from syft.lib.numpy.array import SUPPORTED_INT_TYPES
test_arrays: List[np.ndarray] = [] # type: ignore
for dtype in SUPPORTED_DTYPES:
# test their bounds
if dtype in SUPPORTED_BOOL_TYPES:
if arrow_backend:
continue
lower = False
upper = True
mid = False
elif dtype in SUPPORTED_INT_TYPES:
bounds = np.iinfo(dtype)
lower = bounds.min
upper = bounds.max
mid = upper + lower # type: ignore
if lower == 0:
mid = round(mid / 2) # type: ignore
elif dtype in SUPPORTED_FLOAT_TYPES:
bounds = np.finfo(dtype)
lower = bounds.min
upper = bounds.max
mid = upper + lower # type: ignore
test_arrays.append(np.array([lower, mid, upper], dtype=dtype))
for test_array in test_arrays:
remote_array = test_array.send(root_client)
received_array = remote_array.get()
assert all(test_array == received_array)
assert test_array.dtype == received_array.dtype
# Attributes test
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_shape(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 2, 3, 4])
x_ptr = x.send(root_client)
shape_ptr = x_ptr.shape
local_shape_val = x.shape
shape_val = shape_ptr.get()
assert shape_val == (4,)
assert local_shape_val == shape_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_strides(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32)
x_ptr = x.send(root_client)
strides_ptr = x_ptr.strides
local_strides_val = x.strides
strides_val = strides_ptr.get()
assert strides_val == (20, 4)
assert local_strides_val == strides_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_ndim(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.zeros((2, 3, 4))
x_ptr = x.send(root_client)
ndim_ptr = x_ptr.ndim
local_ndim_val = x.ndim
ndim_val = ndim_ptr.get()
assert ndim_val == 3
assert local_ndim_val == ndim_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_size(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.zeros((3, 5, 2))
x_ptr = x.send(root_client)
size_ptr = x_ptr.size
local_size_val = x.size
size_val = size_ptr.get()
assert size_val == 30
assert local_size_val == size_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_itemsize(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 2, 3], dtype=np.float64)
x_ptr = x.send(root_client)
itemsize_ptr = x_ptr.itemsize
local_itemsize_val = x.itemsize
itemsize_val = itemsize_ptr.get()
assert itemsize_val == 8
assert local_itemsize_val == itemsize_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_nbytes(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.zeros((3, 5, 2))
x_ptr = x.send(root_client)
nbytes_ptr = x_ptr.nbytes
local_nbytes_val = x.nbytes
nbytes_val = nbytes_ptr.get()
assert nbytes_val == 240
assert local_nbytes_val == nbytes_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_transpose(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 2, 3])
x_ptr = x.send(root_client)
T_ptr = x_ptr.T
local_T_val = x.T
T_val = T_ptr.get()
assert (T_val == np.array([1, 2, 3])).all()
assert (local_T_val == T_val).all()
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_item(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([6, 8, 4, 7])
x_ptr = x.send(root_client)
item_ptr = x_ptr.item(3)
local_item_val = x.item(3)
item_val = item_ptr.get()
assert item_val == 7
assert local_item_val == item_val
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_byteswap(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 256, 8755], dtype=np.int16)
x_ptr = x.send(root_client)
byteswap_ptr = x_ptr.byteswap(inplace=True)
local_byteswap_val = x.byteswap(inplace=True)
byteswap_val = byteswap_ptr.get()
y = np.array([256, 1, 13090], dtype=np.int16)
assert (byteswap_val == y).all()
assert (local_byteswap_val == byteswap_val).all()
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_copy(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 2, 3])
x_ptr = x.send(root_client)
copy_ptr = x_ptr.copy()
local_copy = x.copy()
copy_val = copy_ptr.get()
y = np.array([1, 2, 3])
assert (copy_val == y).all()
assert (local_copy == copy_val).all()
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_view(arrow_backend: bool, root_client: sy.VirtualMachineClient) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([(1, 2, 3)])
x_ptr = x.send(root_client)
view_ptr = x_ptr.view()
local_view = x.view()
view_val = view_ptr.get()
y = np.array(
[[1, 2, 3]],
)
assert (view_val == y).all()
assert (local_view == view_val).all()
@pytest.mark.vendor(lib="numpy")
@pytest.mark.parametrize(
"dtype", [np.bool_, np.int8, np.uint8, np.int32, np.uint32, np.int64, np.uint64]
)
@pytest.mark.parametrize("arrow_backend", [False, True])
def test_serde(
arrow_backend: bool, dtype: np.dtype, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
x = np.array([1, 0, 3], dtype=dtype)
x_ptr = x.send(root_client)
y = x_ptr.get()
assert x.dtype == y.dtype
assert (x == y).all()
|
|
"""Module for reading, writing, compressing and converting files.
Please note, some of the functions in this module were created and tested using
VTK 5. VTK 6 introduced a number of backwards-incompatible changes, including
replacing 'SetInput()' with 'SetInputData()' and 'SetInputConnection'.
"""
import glob
import os
import csv
import vtk
import gzip
import StringIO
def compress(path='test.vtp'):
"""Compress file with gzip."""
with open(path, 'rb') as ifile:
with gzip.open(path + '.gz', 'wb') as ofile:
ofile.writelines(ifile)
def decompress(path='test.vtp.gz'):
"""Decompress file with gzip."""
with gzip.open(path, 'rb') as ifile:
with open(path[:-3], 'wb') as ofile:
ofile.write(ifile.read())
def csv_to_list(path):
"""Convert CSV-file to a nested list of strings."""
with open(path, 'rb') as f:
reader = csv.reader(f)
return list(reader)
def csv_to_dict(path):
"""Create nested dictionary from csv file. Workaround for when pandas is
unavailable and you want to select 2D array elements with row and column
names rather than integers.
* First row is used for column names
* First column is used for row names.
* Access data from dictionary x using x['rowname']['columnname']
* Extract all row names with x.keys()
* Extract all column names with x.values()[0].keys()
Note: Expects '\n' as newline character.
"""
x = {}
with open(path, 'rb') as f:
header = f.next().strip().split(',')[1:]
for line in f:
row = line.strip().split(',')
x[row[0]] = dict(
(header[i], v) for i, v in enumerate(row[1:]))
return x
def listdir(path, match='*', dirname=False, extension=False):
"""List all files and folders in specified directory.
Args:
path: Path to directory.
match: Specify file name pattern according to rules used by Unix
shell. For instance, 'match=*.pdf' gives you a list of names of all
the pdf-files in 'path'.
dirname (bool): Include whole path name.
extension (bool): Include file extension.
"""
items = glob.glob(os.path.join(path, match))
if not dirname:
items = [os.path.basename(item) for item in items]
if not extension:
items = [os.path.splitext(item)[0] for item in items]
return items
def readvti(path):
"""Read VTI-file, i.e. image in VTK XML format."""
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(path)
reader.Update()
return reader.GetOutput()
def readvtk(path, datatype='polydata'):
"""Read VTK-file.
Args:
path: Path to file.
type: 'imagedata', 'polydata', 'unstructeredgrid'
"""
if datatype=='imagedata':
reader = vtk.vtkStructuredPointsReader()
elif datatype=='polydata':
reader = vtk.vtkPolyDataReader()
elif datatype=='unstructeredgrid':
reader = vtk.vtkUnstructuredGridReader()
else:
print 'Invalid datatype'
reader.SetFileName(path)
reader.Update()
return reader.GetOutput()
def readvtp(path, dataarrays=True):
"""Read VTP-file, i.e. polydata in VTK XML format.
Args:
dataarrays (bool): Include point and cell data.
"""
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(path)
reader.Update()
if dataarrays == False:
for i in range(reader.GetNumberOfPointArrays()):
arrayname = reader.GetPointArrayName(i)
reader.SetPointArrayStatus(arrayname, 0)
for i in range(reader.GetNumberOfCellArrays()):
arrayname = reader.GetCellArrayName(i)
reader.SetPointArrayStatus(arrayname, 0)
reader.Update()
return reader.GetOutput()
def readvtu(path):
"""Read VTU-file, i.e. unstructured grid in VTK XML format."""
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(path)
reader.Update()
return reader.GetOutput()
def replacestring(lines, tag, value):
"""Replace string in list of strings.
Args:
lines: List of strings.
tag: String to replace.
value: String with which to replace 'tag'.
"""
output = []
for line in lines:
line = line.replace(tag, value)
output.append(line)
return output
def writepoints(points, filename):
"""Write points as VTP-file."""
polydata = vtk.vtkPolyData()
cellarray = vtk.vtkCellArray()
for i in range(points.GetNumberOfPoints()):
cellarray.InsertNextCell(1)
cellarray.InsertCellPoint(i)
polydata.SetPoints(points)
polydata.SetVerts(cellarray)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(filename)
writer.SetInput(polydata)
writer.Write()
def writevti(image, path):
"""Write VTI-files, i.e. images in VTK XML format."""
writer = vtk.vtkXMLImageDataWriter()
writer.SetInput(image)
writer.SetFileName(path)
writer.Write()
def writevtp(polydata, path):
"""Write VTP-files, i.e. polydata in VTK XML format."""
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInput(polydata)
writer.SetFileName(path)
writer.Write()
def writevtu(grid, path):
"""Write VTU-files, i.e. unstructured grids in VTK XML format."""
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetInput(grid)
writer.SetFileName(path)
writer.Write()
#-------------------------------------------------------------------------------
# CFX
#-------------------------------------------------------------------------------
def cfx2vtp(inputfile, outputfile, surface=True, ascii=False):
"""Convert polydata exported from CFX-Post to VTP.
Args:
surface (bool): Convert surface or line polydata.
ascii (bool): Return VTP file in ASCII format.
Export surface in CFX-Post with following options:
* file extension: csv
* export geometry information: line and face connectivity
* (optional) select variable(s)
* vector display: scalar
* separator: comma space
* include header
"""
f = open(inputfile, 'rb')
# derive data size from csv file
if surface:
for i, line in enumerate(f):
if line.strip() == '[Data]':
datalinenumber = i
if line.strip() == '[Faces]':
faceslinenumber = i
lastlinenumber = i
numberofnodes = faceslinenumber - datalinenumber - 3
numberofelements = lastlinenumber - faceslinenumber - 1
else:
for i, line in enumerate(f):
if line.strip() == '[Data]':
datalinenumber = i
if line.strip() == '[Lines]':
lineslinenumber = i
numberofnodes = lineslinenumber - datalinenumber - 3
# obtain list of variables names
f.seek(0)
for i in range(datalinenumber + 2):
arrayline = f.readline()
arraynames = arrayline.strip().split(', ')
arraynames[0:3] = []
# define polydata
points = vtk.vtkPoints()
cells = vtk.vtkCellArray()
points.SetNumberOfPoints(numberofnodes)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(cells) if surface else polydata.SetLines(cells)
for arrayname in arraynames:
array = vtk.vtkDoubleArray()
array.SetName(arrayname)
array.SetNumberOfTuples(numberofnodes)
polydata.GetPointData().AddArray(array)
# parse through the rest of the file using the csv module
reader = csv.reader(f)
# assign x,y,z coordinates and variable values to points
for i in range(numberofnodes):
dataline = reader.next()
point = [float(dataline[0]), float(dataline[1]), float(dataline[2])]
points.SetPoint(i, point)
for j in range(len(arraynames)):
dataarray = polydata.GetPointData().GetArray(arraynames[j])
dataarray.SetComponent(i, 0, float(dataline[j + 3]))
# skip element '[Faces]' (or '[Lines]') in csv-file
reader.next()
reader.next()
if surface:
# obtain and set connectivity
cellids = vtk.vtkIdList()
for i in range(numberofelements):
facesline = reader.next()
cellids.Initialize()
for j in range(len(facesline)):
cellids.InsertNextId(int(facesline[j]))
cells.InsertNextCell(cellids)
else:
# obtain connectivity
connectivitylist = []
for row in reader:
row = [int(item) for item in row]
connectivitylist.append(row)
connectivitylist = filter(None, connectivitylist)
# rearrange connectivity
linecounter = 0
for i in range(len(connectivitylist)):
if i == 0:
connectivity = [connectivitylist[i]]
elif connectivitylist[i][0] == connectivitylist[i - 1][1]:
connectivity[linecounter].append(connectivitylist[i][1])
else:
connectivity.append([])
linecounter += 1
connectivity[linecounter].append(connectivitylist[i][0])
connectivity[linecounter].append(connectivitylist[i][1])
# set connectivity
cellids = vtk.vtkIdList()
for i in range(len(connectivity)):
cellids.Initialize()
for j in range(len(connectivity[i])):
cellids.InsertNextId(int(connectivity[i][j]))
cells.InsertNextCell(cellids)
f.close()
# write vtk polydata
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInput(polydata)
if ascii: writer.SetDataModeToAscii()
writer.SetFileName(outputfile)
writer.Write()
def vtp2cfx(inputfile, outputfile, surface=True):
"""Convert VTP polydata to format that can be imported into CFX-Post.
Args:
surface (bool): Convert surface or line polydata.
"""
# read vtp file
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(inputfile)
reader.Update()
polydata = reader.GetOutput()
# read names of data arrays
arraynames = []
dataarrays = polydata.GetPointData()
numberofdataarrays = dataarrays.GetNumberOfArrays()
for i in range(numberofdataarrays):
array = dataarrays.GetArray(i)
arrayname = array.GetName()
arraynames.append(arrayname)
# append names of data arrays to header and write header
f = open(outputfile, 'wb')
header = "\n[Name]\nSEGMENT\n\n[Data]\nX [ m ], Y [ m ], Z [ m ]"
for i in range(numberofdataarrays):
header += ", " + arraynames[i]
header += "\n"
f.write(header)
# write values of x,y,z and data arrays row by row
for i in range(polydata.GetNumberOfPoints()):
point = polydata.GetPoint(i)
line = str(point[0]) + ', ' + str(point[1]) + ', ' + str(point[2])
for arrayname in arraynames:
array = dataarrays.GetArray(arrayname)
line += ', ' + str(array.GetComponent(i, 0))
line += '\n'
f.write(line)
# write list of connectivity
if surface:
line = '\n[Faces]\n'
f.write(line)
for i in range(polydata.GetNumberOfCells()):
cellpointids = polydata.GetCell(i).GetPointIds()
line = ''
for j in range(cellpointids.GetNumberOfIds()):
if (j > 0):
line += ', '
line += str(cellpointids.GetId(j))
line += '\n'
f.write(line)
else:
line = '\n[Lines]\n'
f.write(line)
for i in range(polydata.GetNumberOfCells()):
cellpointids = polydata.GetCell(i).GetPointIds()
line = ''
for j in range(cellpointids.GetNumberOfIds() - 1):
line += (str(cellpointids.GetId(j)) + ', ' +
str(cellpointids.GetId(j + 1)) + '\n')
f.write(line)
# add blank line to mimic exact same file structure as CFX-generated
# csv-file
line = '\n'
f.write(line)
f.close()
|
|
import pytz
from django.apps import apps
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from framework.analytics import increment_user_activity_counters
from osf.models.node_relation import NodeRelation
from osf.models.nodelog import NodeLog
from osf.models.tag import Tag
from website.exceptions import NodeStateError
from website import settings
class Versioned(models.Model):
"""A Model mixin class that saves delta versions."""
@classmethod
def _sig_pre_delete(cls, instance, *args, **kwargs):
"""dispatch the pre_delete method to a regular instance method. """
return instance.sig_pre_delete(*args, **kwargs)
@classmethod
def _sig_post_delete(cls, instance, *args, **kwargs):
"""dispatch the post_delete method to a regular instance method. """
return instance.sig_post_delete(*args, **kwargs)
@classmethod
def _sig_pre_save(cls, instance, *args, **kwargs):
"""dispatch the pre_save method to a regular instance method. """
return instance.sig_pre_save(*args, **kwargs)
@classmethod
def _sig_post_save(cls, instance, *args, **kwargs):
"""dispatch the post_save method to a regular instance method. """
return instance.sig_post_save(*args, **kwargs)
@classmethod
def connect(cls, signal):
"""Connect a django signal with this model."""
# List all signals you want to connect with here:
from django.db.models.signals import (pre_save, post_save, pre_delete, post_delete)
sig_handler = {
pre_save: cls._sig_pre_save,
post_save: cls._sig_post_save,
pre_delete: cls._sig_pre_delete,
post_delete: cls._sig_post_delete,
}[signal]
signal.connect(sig_handler, sender=cls)
class Meta:
abstract = True
class Loggable(models.Model):
# TODO: This should be in the NodeLog model
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True, request=None):
AbstractNode = apps.get_model('osf.AbstractNode')
user = None
if auth:
user = auth.user
elif request:
user = request.user
params['node'] = params.get('node') or params.get('project') or self._id
original_node = AbstractNode.load(params.get('node'))
log = NodeLog(
action=action, user=user, foreign_user=foreign_user,
params=params, node=self, original_node=original_node
)
if log_date:
log.date = log_date
log.save()
if self.logs.count() == 1:
self.date_modified = log.date.replace(tzinfo=pytz.utc)
else:
self.date_modified = self.logs.first().date
if save:
self.save()
if user and not self.is_collection:
increment_user_activity_counters(user._primary_key, action, log.date.isoformat())
return log
class Meta:
abstract = True
class Taggable(models.Model):
tags = models.ManyToManyField('Tag', related_name='%(class)s_tagged')
def add_tag(self, tag, auth=None, save=True, log=True, system=False):
if not system and not auth:
raise ValueError('Must provide auth if adding a non-system tag')
if not isinstance(tag, Tag):
tag_instance, created = Tag.all_tags.get_or_create(name=tag, system=system)
else:
tag_instance = tag
if not self.tags.filter(id=tag_instance.id).exists():
self.tags.add(tag_instance)
# TODO: Logging belongs in on_tag_added hook
if log:
self.add_tag_log(tag_instance, auth)
if save:
self.save()
self.on_tag_added(tag_instance)
return tag_instance
def add_system_tag(self, tag, save=True):
if isinstance(tag, Tag) and not tag.system:
raise ValueError('Non-system tag passed to add_system_tag')
return self.add_tag(tag=tag, auth=None, save=save, log=False, system=True)
def add_tag_log(self, *args, **kwargs):
raise NotImplementedError('Logging requires that add_tag_log method is implemented')
def on_tag_added(self, tag):
pass
class Meta:
abstract = True
class AddonModelMixin(models.Model):
# from addons.base.apps import BaseAddonConfig
settings_type = None
ADDONS_AVAILABLE = sorted([config for config in apps.get_app_configs() if config.name.startswith('addons.') and
config.label != 'base'])
class Meta:
abstract = True
@classmethod
def get_addon_key(cls, config):
return 2 << cls.ADDONS_AVAILABLE.index(config)
@property
def addons(self):
return self.get_addons()
def get_addons(self):
return filter(None, [
self.get_addon(config.short_name)
for config in self.ADDONS_AVAILABLE
])
def get_oauth_addons(self):
# TODO: Using hasattr is a dirty hack - we should be using issubclass().
# We can't, because importing the parent classes here causes a
# circular import error.
return [
addon for addon in self.get_addons()
if hasattr(addon, 'oauth_provider')
]
def has_addon(self, addon_name, deleted=False):
return bool(self.get_addon(addon_name, deleted=deleted))
def get_addon_names(self):
return [each.short_name for each in self.get_addons()]
def get_or_add_addon(self, name, *args, **kwargs):
addon = self.get_addon(name)
if addon:
return addon
return self.add_addon(name, *args, **kwargs)
def get_addon(self, name, deleted=False):
try:
settings_model = self._settings_model(name)
except LookupError:
return None
if not settings_model:
return None
try:
settings_obj = settings_model.objects.get(owner=self)
if not settings_obj.deleted or deleted:
return settings_obj
except ObjectDoesNotExist:
pass
return None
def add_addon(self, addon_name, auth=None, override=False, _force=False):
"""Add an add-on to the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool override: For shell use only, Allows adding of system addons
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to have
duplicate addons!
:return bool: Add-on was added
"""
if not override and addon_name in settings.SYSTEM_ADDED_ADDONS[self.settings_type]:
return False
# Reactivate deleted add-on if present
addon = self.get_addon(addon_name, deleted=True)
if addon:
if addon.deleted:
addon.undelete(save=True)
return addon
if not _force:
return False
config = apps.get_app_config('addons_{}'.format(addon_name))
model = self._settings_model(addon_name, config=config)
ret = model(owner=self)
ret.on_add()
ret.save() # TODO This doesn't feel right
return ret
def config_addons(self, config, auth=None, save=True):
"""Enable or disable a set of add-ons.
:param dict config: Mapping between add-on names and enabled / disabled
statuses
"""
for addon_name, enabled in config.iteritems():
if enabled:
self.add_addon(addon_name, auth)
else:
self.delete_addon(addon_name, auth)
if save:
self.save()
def delete_addon(self, addon_name, auth=None, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
addon = self.get_addon(addon_name)
if not addon:
return False
if self.settings_type in addon.config.added_mandatory and not _force:
raise ValueError('Cannot delete mandatory add-on.')
if getattr(addon, 'external_account', None):
addon.deauthorize(auth=auth)
addon.delete(save=True)
return True
def _settings_model(self, addon_model, config=None):
if not config:
config = apps.get_app_config('addons_{}'.format(addon_model))
return getattr(config, '{}_settings'.format(self.settings_type))
class NodeLinkMixin(models.Model):
class Meta:
abstract = True
def add_node_link(self, node, auth, save=True):
"""Add a node link to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if NodeRelation.objects.filter(parent=self, child=node, is_node_link=True).exists():
raise ValueError(
'Link to node {0} already exists'.format(node._id)
)
if self.is_registration:
raise NodeStateError('Cannot add a node link to a registration')
# If a folder, prevent more than one pointer to that folder.
# This will prevent infinite loops on the project organizer.
if node.is_collection and node.linked_from.exists():
raise ValueError(
'Node link to folder {0} already exists. '
'Only one node link to any given folder allowed'.format(node._id)
)
if node.is_collection and node.is_bookmark_collection:
raise ValueError(
'Node link to bookmark collection ({0}) not allowed.'.format(node._id)
)
# Append node link
node_relation, created = NodeRelation.objects.get_or_create(
parent=self,
child=node,
is_node_link=True
)
# Add log
if hasattr(self, 'add_log'):
self.add_log(
action=NodeLog.NODE_LINK_CREATED,
params={
'parent_node': self.parent_id,
'node': self._id,
'pointer': {
'id': node._id,
'url': node.url,
'title': node.title,
'category': node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return node_relation
add_pointer = add_node_link # For v1 compat
def rm_node_link(self, node_relation, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
AbstractNode = apps.get_model('osf.AbstractNode')
node_rel = None
if isinstance(node_relation, NodeRelation):
try:
node_rel = self.node_relations.get(is_node_link=True, id=node_relation.id)
except NodeRelation.DoesNotExist:
raise ValueError('Node link does not belong to the requested node.')
elif isinstance(node_relation, AbstractNode):
try:
node_rel = self.node_relations.get(is_node_link=True, child__id=node_relation.id)
except NodeRelation.DoesNotExist:
raise ValueError('Node link does not belong to the requested node.')
if node_rel is not None:
node_rel.delete()
node = node_rel.child
# Add log
if hasattr(self, 'add_log'):
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': node._id,
'url': node.url,
'title': node.title,
'category': node.category,
},
},
auth=auth,
save=False,
)
rm_pointer = rm_node_link # For v1 compat
@property
def nodes_pointer(self):
"""For v1 compat"""
return self.linked_nodes
def get_points(self, folders=False, deleted=False):
query = self.linked_from
if not folders:
query = query.exclude(type='osf.collection')
if not deleted:
query = query.exclude(is_deleted=True)
return list(query.all())
def fork_node_link(self, node_relation, auth, save=True):
"""Replace a linked node with a fork.
:param NodeRelation node_relation:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
node = self.node_relations.get(is_node_link=True, id=node_relation.id).child
except NodeRelation.DoesNotExist:
raise ValueError('Node link {0} not in list'.format(node_relation._id))
# Fork node to which current nodelink points
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
if hasattr(self, 'add_log'):
# Add log
self.add_log(
NodeLog.NODE_LINK_FORKED,
params={
'parent_node': self.parent_id,
'node': self._id,
'pointer': {
'id': node._id,
'url': node.url,
'title': node.title,
'category': node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Return forked content
return forked
fork_pointer = fork_node_link # For v1 compat
class CommentableMixin(object):
"""Abstract class that defines the interface for models that have comments attached to them."""
@property
def target_type(self):
""" The object "type" used in the OSF v2 API. E.g. Comment objects have the type 'comments'."""
raise NotImplementedError
@property
def root_target_page(self):
"""The page type associated with the object/Comment.root_target.
E.g. For a NodeWikiPage, the page name is 'wiki'."""
raise NotImplementedError
is_deleted = False
def belongs_to_node(self, node_id):
"""Check whether an object (e.g. file, wiki, comment) is attached to the specified node."""
raise NotImplementedError
def get_extra_log_params(self, comment):
"""Return extra data to pass as `params` to `Node.add_log` when a new comment is
created, edited, deleted or restored."""
return {}
|
|
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication, SessionAuthentication, MultiAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.validation import Validation
from tastypie.exceptions import BadRequest
from tastypie.utils import trailing_slash
from tastypie.cache import NoCache
from django.core.urlresolvers import resolve
from django.conf.urls import url
from django.core.validators import validate_ipv4_address, validate_ipv6_address
from django.db.models import Q
from django.contrib.auth.models import User
from main.models import Zone, Record, View, record_type_choices, zone_type_choices, ServerGroup, Server, ServerConfig
from dns.dns_system_actions import GenerateZone, SaveConfig, CloneZone
import re
class RecordValidation(Validation):
def is_valid(self, bundle, request=None):
if not bundle.data:
return {'__all__': 'No parameters passed'}
errors = {}
record_type = int(bundle.data['record_type'])
host = bundle.data['host']
answer = bundle.data['answer']
zone = bundle.data['zone']
if 'record_id' in bundle.data:
record_id = int(bundle.data['record_id'])
else:
record_id = False
view, args, kwargs = resolve(zone)
zone_pk = kwargs['pk']
record_type_text = dict(record_type_choices).get(record_type)
if self.if_duplicate(host, answer, zone_pk, record_id):
errors['duplicate'] = ['Duplicated host and answer']
if self.if_same_host(host, record_type_text, zone_pk, record_id):
errors['duplicate'] = ['Same host detected. RFC violation.']
if record_type_text == 'A':
try:
validate_ipv4_address(answer)
except:
errors['answer'] = ['Should be IPv4 address']
if not self.if_hostname(host):
errors['host'] = ['Should be valid hostname']
elif record_type_text == 'AAAA':
try:
validate_ipv6_address(answer)
except:
errors['answer'] = ['Should be IPv6 address']
if not self.if_hostname(host) or host == '@':
errors['host'] = ['Should be valid hostname']
elif record_type_text == 'CNAME':
if not self.if_fqdn(answer):
errors['answer'] = ['Should be valid FQDN']
if not self.if_hostname(host):
errors['host'] = ['Should be valid hostname']
elif record_type_text == 'NS':
if not self.if_fqdn(answer):
errors['answer'] = ['Should be valid FQDN']
if not self.if_hostname(host):
errors['host'] = ['Should be valid hostname']
elif record_type_text == 'MX':
if not self.if_fqdn(answer):
errors['answer'] = ['Should be valid FQDN']
if not self.if_hostname(host):
errors['host'] = ['Should be valid hostname']
elif record_type_text == 'PTR':
if not self.if_fqdn(answer):
errors['answer'] = ['Should be valid FQDN']
return errors
def if_fqdn(self, hostname):
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
else:
return False
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def if_hostname(self, hostname):
if len(hostname) > 255:
return False
if hostname[-1] == ".":
return False
if hostname == '@' or hostname == '*':
return True
if re.match('^\*\..+$', hostname):
hostname = hostname.lstrip('*.')
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def if_duplicate(self, host, answer, zone_pk, record_id):
if record_id:
rf = Record.objects.filter(~Q(pk=record_id),zone=zone_pk, host=host, answer=answer)
else:
rf = Record.objects.filter(zone=zone_pk, host=host, answer=answer)
if rf:
return True
return False
def if_same_host(self, host, record_type_text, zone_pk, record_id):
record_type_list = []
if record_type_text == 'CNAME':
record_type_list.append(self.get_record_type_id_by_text('A'))
record_type_list.append(self.get_record_type_id_by_text('CNAME'))
elif record_type_text == 'A':
record_type_list.append(self.get_record_type_id_by_text('CNAME'))
elif record_type_text == 'PTR':
record_type_list.append(self.get_record_type_id_by_text('PTR'))
else:
return False
if record_id:
rf = Record.objects.filter(~Q(pk=record_id),zone=zone_pk, host=host, record_type__in=record_type_list)
else:
rf = Record.objects.filter(zone=zone_pk, host=host, record_type__in=record_type_list)
if rf:
return True
return False
def get_record_type_id_by_text(self, record_type_text):
return list(dict(record_type_choices).keys())[list(dict(record_type_choices).values()).index(record_type_text)]
class ServerResource(ModelResource):
class Meta:
queryset = Server.objects.all()
resource_name = 'server'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
class ServerGroupResource(ModelResource):
servers = fields.ManyToManyField(ServerResource, "servers", null=False, related_name="servergroup", full=True)
class Meta:
queryset = ServerGroup.objects.all()
resource_name = 'servergroup'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
class ServerConfigResource(ModelResource):
group = fields.ForeignKey(ServerGroupResource, "group", full=True)
class Meta:
queryset = ServerConfig.objects.all()
resource_name = 'serverconfig'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/apply/(?P<config_id>[0-9]+)%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('apply_config'), name="api_record_apply_config"),
]
def apply_config(self, request, **kwargs):
apply_config = {}
# self.method_check(request, allowed=['get'])
config_id = kwargs['config_id']
message = []
try:
sc = SaveConfig(config_id)
message = sc.applyConfig()
except Exception as e:
raise BadRequest(str(e))
apply_config['apply'] = message
return self.create_response(request, apply_config)
class ViewResource(ModelResource):
class Meta:
queryset = View.objects.all()
resource_name = 'view'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
class ZoneResource(ModelResource):
view = fields.ForeignKey(ViewResource, 'view', full=True)
class Meta:
queryset = Zone.objects.all()
resource_name = 'zone'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
always_return_data = True
filtering = {
"zone": ALL,
}
max_limit = None
def dehydrate_type(self, bundle):
bundle.data['type'] = dict(zone_type_choices).get(bundle.data['type'])
return bundle.data['type']
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/clone%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('clone'), name="api_zone_clone"),
]
def clone(self, request, **kwargs):
clone = {}
message = []
data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
try:
cz = CloneZone(data['zone_id'], data['view_id'])
message = cz.cloneZone()
except Exception as e:
raise BadRequest(str(e))
clone['clone'] = message
return self.create_response(request, clone)
class RecordResource(ModelResource):
zone = fields.ForeignKey(ZoneResource, 'zone', full=True)
class Meta:
queryset = Record.objects.all()
resource_name = 'record'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
validation = RecordValidation()
always_return_data = True
filtering = {
"zone": ALL_WITH_RELATIONS,
"host": ALL,
"answer": ALL,
"record_type": ALL
}
max_limit = None
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/generate/(?P<zone_id>[0-9]+)%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('generate'), name="api_record_generate"),
]
def generate(self, request, **kwargs):
generate = {}
self.method_check(request, allowed=['get'])
zone_id = kwargs['zone_id']
message = []
try:
gz = GenerateZone(zone_id)
gz.updateSerial()
message = gz.printZone()
except Exception as e:
raise BadRequest(str(e))
generate['generate'] = message
return self.create_response(request, generate)
def hydrate_host(self, bundle):
bundle.data['success'] = 1
if not 'host' in bundle.data or not bundle.data['host']:
bundle.data['host'] = '@'
return bundle
def hydrate_ttl(self, bundle):
if not 'ttl' in bundle.data or bundle.data['ttl']:
bundle.data['ttl'] = 600
return bundle
def hydrate_record_type(self, bundle):
if 'record_type_text' in bundle.data:
try:
bundle.data['record_type'] = list(dict(record_type_choices).keys())[list(dict(record_type_choices).values()).index(bundle.data['record_type_text'])]
except:
return bundle
return bundle
def dehydrate_record_type(self, bundle):
bundle.data['record_type_text'] = dict(record_type_choices).get(bundle.data['record_type'])
return bundle.data['record_type']
class DjangoUserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = "users"
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
always_return_data = True
list_allowed_methods = ['get']
excludes = ['email', 'is_active', 'password', 'last_login', '_password', 'is_staff', 'id', 'date_joined']
class RecordHistoryResource(ModelResource):
zone = fields.ForeignKey(ZoneResource, 'zone', full=True)
history_user = fields.ForeignKey(DjangoUserResource, 'history_user', full=True)
class Meta:
queryset = Record.history.all()
resource_name = 'rhistory'
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication())
authorization = DjangoAuthorization()
always_return_data = True
list_allowed_methods = ['get']
filtering = {
"zone": ALL_WITH_RELATIONS,
"host": ALL,
"answer": ALL,
"record_type": ALL
}
max_limit = 100
def dehydrate_record_type(self, bundle):
bundle.data['record_type_text'] = dict(record_type_choices).get(bundle.data['record_type'])
return bundle.data['record_type']
def dehydrate_history_date(self, bundle):
bundle.data['history_date'] = bundle.data['history_date'].strftime('%Y-%m-%d %H:%M:%S')
return bundle.data['history_date']
|
|
#!/usr/bin/python
# Copyright (C) 2014 Harun Emektar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# http://fuse.sourceforge.net/wiki/index.php/FusePython
import fuse
import HTMLParser
import stat
import errno
import urllib2
import os
from time import time, strptime
fuse.fuse_python_api = (0, 2)
class WebDirParser(HTMLParser.HTMLParser):
class Stat():
def __init__(self, isDir):
if isDir:
self.st_mode = stat.S_IFDIR | 0555
self.st_size = 4096
else:
self.st_mode = stat.S_IFREG | 0444
self.st_size = 0
self.st_time = int(time())
def isDir(self):
return self.st_mode & stat.S_IFDIR
def __init__(self):
HTMLParser.HTMLParser.__init__(self);
self.path=""
self.entries={}
self._curTag=""
self._entriesStarted = False
self._lastFile = None
def handle_starttag(self, tag, attr):
self._curTag = tag
def handle_endtag(self, tag):
self._curTag = ""
if tag == "pre":
self._lastFile = None
def handle_data(self, data):
print "handle_data", data
if self._curTag == "h1":
self.path=data[len("Index of "):]
elif self._curTag == "a" and data == "Parent Directory":
self._entriesStarted = True
elif self._curTag == "a" and self._entriesStarted:
isDir = len(data.split("/")) > 1
self.entries[ data.split("/")[0] ] = WebDirParser.Stat(isDir)
self._lastFile = data.split("/")[0]
elif self._entriesStarted and self._lastFile:
attr = data.strip().split()
print attr
if len(attr) == 3:
if not self.entries[self._lastFile].isDir():
size = attr[-1]
isize = 0
if size[-1] in "KMG":
isize = float(size[0:-1])
if size[-1] == "K":
isize *= 1024
elif size[-1] == "M":
isize *= 1024 * 1024
elif size[-1] == "G":
isize *= 1024 * 1024 * 1024
isize = int(isize)
else:
isize = int(size)
self.entries[self._lastFile].st_size = isize
strtime = attr[0] + " " + attr[1]
time = strptime(strtime, '%d-%b-%Y %H:%M')
self.entries[self._lastFile].st_time = time;
class WebFSStat(fuse.Stat):
def __init__(self, isDir=True):
if isDir:
self.st_mode = stat.S_IFDIR | 0555
else:
self.st_mode = stat.S_IFREG | 0444
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = int(time())
self.st_mtime = self.st_atime
self.st_ctime = self.st_atime
def isDir(self):
return self.st_mode & stat.S_IFDIR
class ResourceNotFound(Exception):
pass
class WebFSProxy():
def __init__(self, rootURL):
self._rootURL = rootURL
class WebFS(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._rootURL = "http://old-releases.ubuntu.com/"
self._rootDirs = ("releases", "ubuntu")
self._latestDirEntries = {}
def readdir(self, path, offset):
print path, offset
dirents = [ ".", ".."]
if path == "/":
dirents += self._rootDirs
else:
url = self._rootURL + path
webDir = urllib2.urlopen(url)
if webDir.getcode() != 200 and webDir.getcode() != 301:
return -errno.ENOENT
parser = WebDirParser()
for line in webDir:
parser.feed(line)
dirents += parser.entries.keys()
self._latestDirEntries[path] = parser.entries
retEnt = []
for r in dirents:
retEnt += [ fuse.Direntry(r) ]
return retEnt
def read(self, path, size, offset):
print "reading ", path
request = urllib2.Request(self._rootURL + path, headers={"Range": "bytes=" + str(offset) + "-" + str(offset + size)})
res = urllib2.urlopen(request)
content = res.read(size)
return content
def _isDir(self, path):
request = urllib2.Request(self._rootURL + path, headers={"Range": "bytes=0-0"})
info = urllib2.urlopen(request)
returnCode = info.getcode()
print "return code for", path, returnCode
if returnCode != 200 and returnCode != 301 and returnCode != 206:
raise ResourceNotFound()
contentType = info.info().getheaders("Content-Type")[0]
print "content type of ", path, contentType
retval = contentType.find("html") != -1
try:
self._latestDirEntries[os.path.dirname(path)][os.path.basename(path)] = WebFSStat(retval)
except KeyError:
self._latestDirEntries[os.path.dirname(path)] = {os.path.basename(path) : WebFSStat(retval)}
return retval
def getattr(self, path):
st = WebFSStat()
if path == "/":
return st
if len(path.split("/")) == 2 and path.split("/")[1] not in self._rootDirs:
print path, "doesnt exist"
return -errno.ENOENT
isDir = True;
try:
dirlist = self._latestDirEntries[os.path.dirname(path)]
print "entry found",
isDir = dirlist[os.path.basename(path)].isDir()
st = dirlist[os.path.basename(path)]
print "isDir ", str(isDir)
except KeyError:
# figure out type
try:
isDir = self._isDir(path)
except ResourceNotFound:
return -errno.ENOENT
if not isDir:
st.st_mode = stat.S_IFREG | 0555
return st
def chmod ( self, path, mode ):
print '*** chmod', path, oct(mode)
return -errno.EPERM
def chown ( self, path, uid, gid ):
print '*** chown', path, uid, gid
return -errno.EPERM
def fsync ( self, path, isFsyncFile ):
print '*** fsync', path, isFsyncFile
return 0
def link ( self, targetPath, linkPath ):
print '*** link', targetPath, linkPath
return -errno.EPERM
def mkdir ( self, path, mode ):
print '*** mkdir', path, oct(mode)
return -errno.EPERM
def mknod ( self, path, mode, dev ):
print '*** mknod', path, oct(mode), dev
return -errno.EPERM
def open ( self, path, flags ):
if path == "/":
return 0
if len(path.split("/")) == 2 and path.split("/")[1] not in self._rootDirs:
print path, "doesnt exist"
return -errno.ENOENT
isDir = True;
try:
dirlist = self._latestDirEntries[os.path.dirname(path)]
print "entry found",
isDir = dirlist[os.path.basename(path)].isDir()
st = dirlist[os.path.basename(path)]
print "isDir ", str(isDir)
except KeyError:
# figure out type
try:
isDir = self._isDir(path)
except ResourceNotFound:
return -errno.ENOENT
if not isDir:
st.st_mode = stat.S_IFREG | 0555
request = urllib2.Request(self._rootURL + path, headers={"Range": "bytes=0-0"})
info = urllib2.urlopen(request)
rng = info.info().getheaders("Content-Range")[0]
print "range::", rng
rng = rng.split("/")[1]
self._latestDirEntries[os.path.dirname(path)][os.path.basename(path)].st_size=int(rng)
return 0
def readlink ( self, path ):
print '*** readlink', path
return 0
def release ( self, path, flags ):
print '*** release', path, flags
return 0
def rename ( self, oldPath, newPath ):
print '*** rename', oldPath, newPath
return -errno.EPERM
def rmdir ( self, path ):
print '*** rmdir', path
return -errno.EPERM
def statfs ( self ):
print '*** statfs'
return 0
def symlink ( self, targetPath, linkPath ):
print '*** symlink', targetPath, linkPath
return -errno.EPERM
def truncate ( self, path, size ):
print '*** truncate', path, size
return -errno.EPERM
def unlink ( self, path ):
print '*** unlink', path
return -errno.EPERM
def utime ( self, path, times ):
print '*** utime', path, times
return -errno.EPERM
def write ( self, path, buf, offset ):
print '*** write', path, buf, offset
return -errno.EPERM
def main():
webFS = WebFS()
webFS.parse(errex=1)
webFS.main()
if __name__ == "__main__":
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
import six
from muranorepository.openstack.common.gettextutils import _ # noqa
from muranorepository.openstack.common import importutils
from muranorepository.openstack.common import jsonutils
from muranorepository.openstack.common import local
from muranorepository.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in d.iterkeys():
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
|
|
import re
import os
import time
import urllib
import urllib2
import urlparse
import cStringIO
import collections
import pkg_resources
from setuptools.package_index import PackageIndex
from pkgtools.pypi import PyPIXmlRpc, PyPIJson, real_name
from pyg.core import Version, args_manager
from pyg.utils import name, ext, right_egg, version_egg, is_windows
from pyg.log import logger
__all__ = ['PREFERENCES', 'ReqManager', 'get_versions', 'get_links', \
'highest_version', 'request']
## This constant holds files priority
PREFERENCES = ('.tar.gz', '.tar.bz2', '.zip', '.egg')
if is_windows():
PREFERENCES = ('.exe', '.msi') + PREFERENCES
def get_versions(req):
'''
Return all versions the given requirement can match.
For example, if requirement is `pyg>=0.6` it will return: [0.6, 0.7].
When a package has no files on PyPI (but at least a release) we have to
look for version manually, with regular expressions.
`req` should be a Requirement object (from pyg.core).
'''
if req.is_dev:
return iter((Version('dev'),))
_version_re = r'{0}-([\d\w.-]*).*'
name = req.name
pypi = PyPIXmlRpc()
versions = map(Version, pypi.package_releases(name, True))
## Slow way: we need to search versions by ourselves
if not versions:
_vre = re.compile(_version_re.format(name), re.I)
data = request((args_manager['install']['packages_url']+'/{0}').format(name))
versions = map(Version, set(v.strip('.') for v in _vre.findall(data)))
return (v for v in versions if req.match(v))
def highest_version(req):
'''Return the highest version the given requirement can match.'''
return max(get_versions(req))
def request(url):
'''Perform a GET request to `url`.'''
return urllib2.urlopen(url).read()
def convert_bytes(bytes):
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '{0:.1f} Tb'.format(terabytes)
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '{0:.1f} Gb'.format(gigabytes)
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '{0:.1f} Mb'.format(megabytes)
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '{0:.1f} Kb'.format(kilobytes)
else:
size = '{0:.1f} b'.format(bytes)
return size
def format_time(seconds):
if seconds == '':
return ''
hours, minutes = seconds // 3600, seconds // 60
seconds -= int(3600 * hours + 60 * minutes)
if minutes:
if hours:
return '{0:02d}h {1:02d}m {2:02d}s remaining'.format(*map(int, [hours, minutes, seconds]))
return '{0:02d}m {1:02d}s remaining'.format(*map(int, [minutes, seconds]))
return '{0:02d}s remaining'.format(int(seconds))
def download(url, msg, add_remaining=True):
def hook(blocks, block_size, total_size):
'''
Callback function for `urllib.urlretrieve` that is called when connection is
created and then once for each block.
Display the amount of data transferred so far and it percentage.
Use sys.stdout.write() instead of "print,", because it allows one more
symbol at the line end without linefeed on Windows
:param blocks: Number of blocks transferred so far.
:param block_size: Size of each block in bytes.
:param total_size: Total size of the HTTP object in bytes. Can be -1 if server doesn't return it.
'''
if block_size > total_size:
logger.info('\r{0} [100% - {1}]', msg, convert_bytes(total_size), addn=False)
return
downloaded = block_size * blocks
ratio = downloaded / float(total_size)
## When the last block makes the downloaded size greater than the total size
if ratio > 1:
ratio = 1
downloaded = total_size
## Calculate elapsed and remaining time
elapsed = func() - starttime
speed = downloaded / float(elapsed)
try:
remaining = (total_size - downloaded) / float(speed)
except ZeroDivisionError:
remaining = ''
if ratio == 1:
## When we finish the download we want this string to hide
remaining = ''
if add_remaining:
logger.info('\r{0} [{1:.0%} - {2} / {3}] {4}', msg, ratio, convert_bytes(downloaded),
convert_bytes(total_size), format_time(remaining), addn=False)
else:
logger.info('\r{0} [{1:.0%} - {2} / {3}]', msg, ratio, convert_bytes(downloaded),
convert_bytes(total_size), addn=False)
if is_windows():
## On Windows time.clock should be more precise.
func = time.clock
else:
func = time.time
starttime = func()
path = urllib.urlretrieve(url, reporthook=hook)[0]
logger.newline()
with open(path) as f:
return cStringIO.StringIO(f.read())
class ReqManager(object):
_pkg_re = re.compile('(?P<package_name>[\w][\w\d]+)-' # alphanumeric / underscore + alphanumeric / digit / underscore
'(?P<version>\d[\d\w.]+)' # digit + digit / dot / alphanumeric
'.*?' # anything
'(?P<ext>\.(?:tar\.gz|tar\.bz2|zip|egg|tgz))' # the extension
)
def __init__(self, req, pref=None):
self.req = req
self.req.name = self.name = real_name(self.req.name)
if self.req.op == '==': ## LOL
self.package_manager = PyPIJson(self.name, self.req.version)
else:
hi = highest_version(self.req)
self.req.version = hi
self.package_manager = PyPIJson(self.name, hi)
url = args_manager['install']['index_url'] + '/' + self.package_manager.URL.split('/pypi/', 1)[1]
self.package_manager.URL = url
self._set_prefs(pref)
self.downloaded_name = None
self.downloaded_version = None
def _set_prefs(self, pref):
if pref is None:
pref = PREFERENCES
pref = list(pref)
if len(pref) < len(PREFERENCES):
for p in PREFERENCES:
if p not in pref:
pref.append(p)
self.pref = pref
def _setuptools_find(self):
def _get_all(url):
match = self._pkg_re.search(url)
if match is None:
return None, None, None
return map(match.group, ('package_name', 'version', 'ext'))
def _remove_query(url):
return urlparse.urlunsplit(urlparse.urlsplit(url)[:3] + ('',) * 2)
def _get_version(filename):
## A bit hacky but there is no solution because some packages
## are in the form {package_name}-{version}-{something_else}-{?pyx.y}.{ext}
## and we cannot predict where is the version in that mess.
_version_re = re.compile(r'[\d\w.]*')
parts = name(filename).split('-')
for part in parts:
match = _version_re.search(part)
if match is not None:
return match.group()
logger.warn('Warning: did not find any files on PyPI')
found = []
for link in get_links(str(self.req), args_manager['install']['packages_url']):
package_name = _remove_query(link).split('/')[-1]
version = _get_version(package_name)
e = ext(package_name)
if package_name is None or version is None:
package_name, version, e = _get_all(link)
found.append((version, package_name, None, link, e))
return found
def find(self):
if self.req.is_dev:
links = get_links(str(self.req))
return [('dev', self.req.name, None, link, ext(link)) for link in links]
return list(self.package_manager.find()) or self._setuptools_find()
def files(self):
files = collections.defaultdict(list)
for release in self.find():
e = release[-1]
if e not in self.pref:
logger.debug('debug: Skipping {0}, unknown extension', release[-2])
files[e].append(release[:-1])
return files
def download(self, dest):
""" you can set dest to None, it will executed a try run """
if dest:
dest = os.path.abspath(dest)
files = self.files()
downloaded = []
## We need a placeholder because of the nested for loops
success = False
for p in self.pref:
if success:
break
if not files[p]:
logger.warn('{0} files not found. Continue searching...', p)
continue
for v, name, hash, url in files[p]:
if success:
break
if p == '.egg' and not right_egg(name):
logger.info('Found egg file for another Python version: {0}. Continue searching...', version_egg(name))
continue
if dest:
try:
data = download(url, 'Retrieving data for {0}'.format(self.name)).getvalue()
except (urllib2.URLError, urllib2.HTTPError) as e:
logger.debug('urllib2 error: {0}', e.args)
continue
if not data:
logger.debug('debug: Request failed')
continue
if not os.path.exists(dest):
os.makedirs(dest)
try:
# Fix for packages with no version in the name
if '-' not in name:
name = '{0}-{1}{2}'.format(name, v, p)
logger.info('Writing data into {0}', name)
with open(os.path.join(dest, name), 'w') as f:
f.write(data)
except (IOError, OSError):
logger.debug('debug: Error while writing data')
continue
downloaded.append({'url': url, 'hash': hash})
logger.success('{0} downloaded successfully', self.name)
success = True
self.downloaded_name = name
self.downloaded_version = v
return downloaded
class PygPackageIndex(PackageIndex):
'''
Pyg's own PackageIndex derived from setuptools' one. This PackageIndex does
not download any files but crawl the index looking for links available for
the download.
'''
def __init__(self, *a, **k):
PackageIndex.__init__(self, *a, **k)
self.urls = set()
def _download_to(self, url, filename):
self.urls.add(url)
return
def download(self, spec, tmpdir=None):
self.urls.add(spec)
return
def get_links(package, index_url=None):
## Correction for standard installations when index_url looks standard
## http://pypi.python.org/pypi.
if index_url is None:
index_url = args_manager['install']['packages_url']
logger.info('Looking for packages at {0}', index_url)
urls = set()
package_index = PygPackageIndex(index_url)
req = pkg_resources.Requirement.parse(str(package))
for source in (True, False):
package_index.fetch_distribution(req, None, force_scan=True, \
source=source, develop_ok=False)
for url in package_index.urls:
## PackageIndex looks for local distributions too, and we
## don't want that.
if url.startswith(('http', 'https')):
urls.add(urlparse.urldefrag(url)[0])
return urls
## OLD! We are using Json to interoperate with pypi.
## We use it only if we don't find any files with the Json API
## UPDATE: Now we use PyPIJson (from pktools) in combination with get_links
## (from setuptools).
##
## Old link finder we used to retrieve packages' links.
## Now we use setuptools' PackageIndex and PyPI Json API.
## (above)
##
##
#######################################################################
#
#class LinkFinder(object):
#
# INDEX = None
# FILE = r'href\s?=\s?("|\')(?P<file>.*{0}-{1}\.(?:tar\.gz|tar\.bz2|zip|egg))(?:\1)'
# LINK = r'<a\s?href="(?P<href>[^"]+)"\srel="(?P<rel>[^"]+)">(?P<version>[\d\.]+[\w^.]+)(?P<name>[^\<]+)</a><br/>'
# SIMPLE_LINK = r'<a\shref="(?P<href>[^"]+)">(?P<name>[^<]+)</a>'
#
# def __init__(self, package_name, index=None):
# self.package_name = package_name
# if index is None:
# index = 'http://pypi.python.org/simple/'
# if not index.endswith('/'):
# index += '/'
# self.INDEX = index
#
# def _check_link(self, link, version):
# '''
# Check whether the link is good or not. The link must satisfy the following conditions:
#
# * It have to end with a right extension (.tar.gz, .tar.bz2, .zip, or .egg).
# * It have to be the newest (i.e. the version must be the one specified).
# '''
#
# base = link.split('/')[-1]
# e = ext(base)
# if e not in ('.tar.gz', '.tar.bz2', '.zip', '.egg'):
# return False
# return '{0}-{1}{2}'.format(self.package_name, version, e) == base
#
# def find_best_link(self):
# data = request('{0}{1}'.format(self.INDEX, self.package_name))
# d = {}
# for href, name in re.compile(self.SIMPLE_LINK).findall(data):
# e = ext(name)
# if e in ('.tar', '.tar.gz', '.tar.bz2', '.zip'):
# version = name.split('-')[-1][:-len(e)]
# elif e in ('.exe', '.msi'):
# version = '.'.join(name.split('-')[-2].split('.')[:-1])
# else:
# try:
# version = pkg_resources.Distribution.from_filename(name).version
# except ValueError:
# logger.debug('debug: Failed to find version for {0}, continuing...', name)
# continue
# if not href.startswith('http'):
# href = '/'.join([self.INDEX, self.package_name, href])
# d[Version(version)] = href
# for href, rel, version, name in re.compile(self.LINK).findall(data):
# if rel == 'download':
# d[Version(version)] = href
#
# ## Find highest version and returns its link
# try:
# v = max(d)
# return v, d[v]
# except ValueError:
# return None, None
#
# def find_files(self, url, version):
# url = url + '/'[:not url.endswith('/')]
# base = '{0}://{1}/'.format(*urlparse.urlparse(url)[:2])
# logger.info('Reading {0}', url)
#
# ## This is horrible, but there is no alternative...
# ## We cannot use standard regex because on external sites HTML can be
# ## different and we would run up against problems.
# data = request(url).split('</a>')
# links = set()
# for item in data:
# if 'href' in item:
# i = item.index('href="')
# item = item[i + 6:]
# link = item[:item.index('">')]
# if not link.startswith('http'):
# link = base + link
# links.add(link)
# return [l for l in links if self._check_link(l, version)]
#
# def find(self):
# version, link = self.find_best_link()
# if version is None:
# raise PygError('Error: did not find any files')
# link = urlparse.urldefrag(link)[0]
# if ext(link) in PREFERENCES:
# return [link]
# return self.find_files(link, version)
|
|
# -*- coding: utf-8 -*-
import logging
import httplib as http
import math
from itertools import islice
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException, ValidationValueError
from framework import status
from framework.utils import iso8601format
from framework.mongo import StoredObject
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import from_mongo, get_or_http_error
from website import language
from website.util import paths
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import clean_template_name, new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public,
must_be_contributor,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.util.permissions import ADMIN, READ, WRITE
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, get_pointer_parent, NodeUpdateError
from website.project.forms import NewNodeForm
from website.models import Node, Pointer, WatchConfig, PrivateLink
from website import settings
from website.views import _render_nodes, find_dashboard
from website.profile import utils
from website.project import new_folder
from website.util.sanitize import strip_html
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = strip_html(post_data.get('value', ''))
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = Node.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_valid_project
@must_be_logged_in
def folder_new_post(auth, node, **kwargs):
user = auth.user
title = request.json.get('title')
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
@collect_auth
def add_folder(auth, **kwargs):
data = request.get_json()
node_id = data.get('node_id')
node = get_or_http_error(Node, node_id)
user = auth.user
title = strip_html(data.get('title'))
if not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
node = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationValueError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
message = (
'Your component was created successfully. You can keep working on the component page below, '
'or return to the <u><a href="{url}">Project Page</a></u>.'
).format(url=node.url)
status.push_status_message(message, 'info')
return {
'status': 'success',
}, 201, None, node.url
else:
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def node_fork_page(auth, node, **kwargs):
if settings.DISK_SAVING_MODE:
raise HTTPError(
http.METHOD_NOT_ALLOWED,
redirect_url=node.url
)
try:
fork = node.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node.url
)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True)
@must_be_valid_project
@must_be_logged_in
@must_be_contributor
def node_setting(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node']
], key=lambda addon: addon.full_name.lower())
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = Node.CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = paths.resolve_addon_path(addon.config, 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth, primary=primary)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
return ret
# Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, node, **kwargs):
node.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, node, **kwargs):
node.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(n.split(':'))
for n in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
n for n in node.nodes
if not n.is_deleted
]
deleted_nodes = [
n for n in node.nodes
if n.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
node.nodes = nodes_new + deleted_nodes
node.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_before_set_public(node, **kwargs):
prompt = node.callback('before_make_public')
anonymous_link_warning = any(private_link.anonymous for private_link in node.private_links_active)
if anonymous_link_warning:
prompt.append('Anonymized view-only links <b>DO NOT</b> anonymize '
'contributors after a project or component is made public.')
return {
'prompts': prompt
}
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and discription which can be edited by write permission contributor
try:
return {
'updated_fields': {
key: getattr(node, key)
for key in
node.update(request.get_json(), auth=auth)
}
}
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} deleted'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_have_permission(ADMIN)
@must_not_be_registration
def delete_folder(auth, node, **kwargs):
"""Remove folder node
"""
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if not node.has_permission(user, 'write'):
return has_wiki and wiki_page and wiki_page.html(node)
else:
return has_wiki
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
if user:
dashboard = find_dashboard(user)
dashboard_id = dashboard._id
in_dashboard = dashboard.pointing_at(node._primary_key) is not None
else:
in_dashboard = False
dashboard_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, dismissible=False)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_dashboard,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.date_created),
'date_modified': iso8601format(node.logs[-1].date) if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes),
'is_registration': node.is_registration,
'is_retracted': node.is_retracted,
'pending_retraction': node.pending_retraction,
'retracted_justification': getattr(node.retraction, 'justification', None),
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'pending_embargo': node.pending_embargo,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': iso8601format(node.registered_date) if node.is_registration else '',
'root_id': node.root._id,
'registered_meta': [
{
'name_no_ext': from_mongo(meta),
'name_clean': clean_template_name(meta),
}
for meta in node.registered_meta or []
],
'registration_count': len(node.node__registrations),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': len(node.forks),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': len(node.get_points(deleted=False, folders=False)),
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(getattr(node, 'commented', [])),
'has_children': bool(getattr(node, 'commented', False)),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'has_read_permissions': node.has_permission(user, 'read'),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': dashboard_id,
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': Node.CATEGORY_MAP,
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.can_edit(auth):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
'is_public': child.is_public,
'parent_id': child.parent_id,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
def get_editable_children(auth, node, **kwargs):
if not node.can_edit(auth):
return
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
def _get_user_activity(node, auth, rescale_ratio):
# Counters
total_count = len(node.logs)
# Note: It's typically much faster to find logs of a given node
# attached to a given user using node.logs.find(...) than by
# loading the logs into Python and checking each one. However,
# using deep caching might be even faster down the road.
if auth.user:
ua_count = node.logs.find(Q('user', 'eq', auth.user)).count()
else:
ua_count = 0
non_ua_count = total_count - ua_count # base length of blue bar
# Normalize over all nodes
try:
ua = ua_count / rescale_ratio * 100
except ZeroDivisionError:
ua = 0
try:
non_ua = non_ua_count / rescale_ratio * 100
except ZeroDivisionError:
non_ua = 0
return ua_count, ua, non_ua
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, rescale_ratio, primary=True, link_id=None, show_path=False):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_retracted': node.is_retracted,
'pending_retraction': node.pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'pending_embargo': node.pending_embargo,
'archiving': node.archiving,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'nlogs': None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public,
'parent_title': node.parent_node.title if node.parent_node else None,
'parent_is_public': node.parent_node.is_public if node.parent_node else False,
'show_path': show_path
})
if rescale_ratio:
ua_count, ua, non_ua = _get_user_activity(node, auth, rescale_ratio)
summary.update({
'nlogs': len(node.logs),
'ua_count': ua_count,
'ua': ua,
'non_ua': non_ua,
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_summary(auth, node, **kwargs):
rescale_ratio = kwargs.get('rescale_ratio')
if rescale_ratio is None and request.args.get('rescale_ratio'):
try:
rescale_ratio = float(request.args.get('rescale_ratio'))
except (TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
show_path = kwargs.get('show_path', False)
return _get_summary(
node, auth, rescale_ratio, primary=primary, link_id=link_id, show_path=show_path
)
@must_be_contributor_or_public
def get_children(auth, node, **kwargs):
user = auth.user
if request.args.get('permissions'):
perm = request.args['permissions'].lower().strip()
nodes = [
each
for each in node.nodes
if perm in each.get_permissions(user) and not each.is_deleted
]
else:
nodes = [
each
for each in node.nodes
if not each.is_deleted
]
return _render_nodes(nodes, auth)
@must_be_contributor_or_public
def get_folder_pointers(auth, node, **kwargs):
if not node.is_folder:
return []
nodes = [
each.resolve()._id
for each in node.nodes
if each is not None and not each.is_deleted and not each.primary
]
return nodes
@must_be_contributor_or_public
def get_forks(auth, node, **kwargs):
return _render_nodes(nodes=node.forks, auth=auth)
@must_be_contributor_or_public
def get_registrations(auth, node, **kwargs):
registrations = [n for n in node.node__registrations if not n.is_deleted] # get all registrations, including archiving
return _render_nodes(registrations, auth)
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [Node.load(node_id) for node_id in node_ids]
has_public_node = any(node.is_public for node in nodes)
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
if anonymous and has_public_node:
status.push_status_message(
'Anonymized view-only links <b>DO NOT</b> '
'anonymize contributors of public project or component.'
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
new_name = request.json.get('value', '')
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
private_link.name = new_name
private_link.save()
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
first_author = node.visible_contributors[0]
return {
'id': node._id,
'title': title,
'firstAuthor': first_author.family_name or first_author.given_name or first_author.full_name,
'etal': len(node.visible_contributors) > 1,
}
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
nodes = Node.find(odm_query)
count = nodes.count()
pages = math.ceil(count / size)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointers_from_folder(auth, node, **kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
node = get_pointer_parent(pointer)
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
# exclude folders
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
if not get_pointer_parent(each).is_folder
]}
|
|
"""
NetLogger interactions with the Python logging module.
"""
__rcsid__ = "$Id: logutil.py 772 2008-05-23 22:59:22Z dang $"
import logging
import logging.handlers
import optparse
import os
import sys
import time
import traceback
import types
from Pegasus.netlogger import nlapi
from Pegasus.netlogger.nlapi import Level
from Pegasus.netlogger.version import *
# extra logging levels
TRACE = logging.DEBUG - 1
# Top-level qualified name for netlogger
PROJECT_NAMESPACE = "netlogger"
# Global holder of the "correct" NetLogger class
# to use when instantiating new loggers
_logger_class = None
def setLoggerClass(clazz):
"""Set the class used by NetLogger logging"""
global _logger_class
_logger_class = clazz
# consistent with new naming style
set_logger_class = setLoggerClass
def get_logger(filename):
"""
Return a NetLogger logger with qualified name based on the provied
filename. This method is indended to be called by scripts and
modules by passing in their own __file__ as filename after already
having initialized the logging module via the NL OptionParser or
some equivalent action.
If the logger name starts with a '.', it will be taken as-is, with
the leading '.' stripped.
Otherwise, the logger will be rooted at PROJECT_NAMESPACE.
Parameters:
filename - The full filename of the NL script or module requesting
a logger, i.e. __file__
"""
if filename == "":
qname = ""
elif filename[0] == ".":
qname = filename
else:
qname = ".".join(_modlist(filename))
return _logger(qname)
def get_root_logger():
"""Return root for all NetLogger loggers."""
return _logger("") # logging.getLogger(PROJECT_NAMESPACE)
def _logger(qualname):
"""
Return a logger based on the provided qualified name
Prepend PROJECT_NAMESPACE if not already there, unless
qualified name starts with a '.'.
"""
# Mess with qualified name
if not qualname:
qualname = PROJECT_NAMESPACE
elif qualname[0] == ".":
qualname = qualname[1:]
elif not qualname.startswith(PROJECT_NAMESPACE):
qualname = PROJECT_NAMESPACE + "." + qualname
# Swap in "my" logger class, create logger, swap back out
orig_class = logging.getLoggerClass()
logging.setLoggerClass(_logger_class)
logger = logging.getLogger(qualname)
logging.setLoggerClass(orig_class)
# Return "my" new logger instance
return logger
def _modlist(path):
"""
Return a list of module names based on the path provided. The
expected path list will be rooted at either "netlogger" or
"scripts" so won't contain either as one of the module names. Any
tailing python extension is also trimmed.
"""
if path == "/":
return []
head, tail = os.path.split(path)
# ignore python extensions
if tail.endswith(".py") or tail.endswith(".pyc"):
tail = os.path.splitext(tail)[0]
# stop if at top of source tree
if tail in ("netlogger", "scripts"):
return []
# stop if at root of path
if head == "" or head == ".":
return [tail]
# otherwise continue
return _modlist(head) + [tail]
class DoesLogging:
"""Mix-in class that creates the attribute 'log', setting its qualified
name to the name of the module and class.
"""
def __init__(self, name=None):
if name is None:
if self.__module__ != "__main__":
name = "{}.{}".format(self.__module__, self.__class__.__name__)
else:
name = self.__class__.__name__
self.log = _logger(name)
# cache whether log is debug or higher in a flag to
# lower overhead of debugging statements
self._dbg = self.log.isEnabledFor(logging.DEBUG)
self._trace = self.log.isEnabledFor(TRACE)
class BPLogger(logging.Logger):
"""Logger class that writes Best-Practices formatted logs.
Usage:
The arguments are not exactly the same as for the Logger in
the logging module. Instead they consist of an event name
and keywords that are name=value pairs for the event contents.
An exception to this is the exc() or exception() method,
which takes an Exception instance as its second argument
in addition to the event name.
Example:
log.info("my.event", value=88.7, units="seconds")
# output
# ts=2009-07-24T20:18:04.775650Z event=netlogger.my.event level=INFO units=seconds value=88.700000
"""
def __init__(self, qualname):
self._qualname = qualname
self._format = nlapi.Log(newline=False, level=nlapi.Level.ALL)
logging.Logger.__init__(self, qualname)
def set_meta(self, **kw):
"""Set metadata to be logged with every event, e.g.
an identifier or host name.
"""
self._format.setMeta(None, **kw)
def log(self, level, nl_level, event, exc_info=None, **kwargs):
ts = time.time()
if self._qualname:
event = self._qualname + "." + event
# replace '__' with '.'
kwargs = {key.replace("__", "."): value for key, value in kwargs.iteritems()}
# format as BP
msg = self._format(event, ts, nl_level, **kwargs)
logging.Logger.log(self, level, msg, exc_info=exc_info)
def trace(self, event, **kwargs):
if self.isEnabledFor(TRACE):
self.log(TRACE, Level.TRACE, event, **kwargs)
def debug(self, event, **kwargs):
if self.isEnabledFor(logging.DEBUG):
self.log(logging.DEBUG, Level.DEBUG, event, **kwargs)
def info(self, event, **kwargs):
self.log(logging.INFO, Level.INFO, event, **kwargs)
def warning(self, event, **kwargs):
self.log(logging.WARN, Level.WARN, event, **kwargs)
warn = warning
def error(self, event, **kwargs):
self.log(logging.ERROR, Level.ERROR, event, **kwargs)
def critical(self, event, **kwargs):
self.log(logging.CRITICAL, Level.FATAL, event, **kwargs)
def exception(self, event, err, **kwargs):
estr = traceback.format_exc()
estr = " | ".join(e.strip() for e in estr.split("\n"))
self.log(
logging.ERROR,
Level.ERROR,
event,
msg=str(err),
status=-1,
traceback=estr,
**kwargs,
)
exc = exception
class BPSysLogger(BPLogger):
"""This is a hack that prepends a header to the
output of BPLogger in order to work-around some bug with the
Python SysLogHandler and Ubuntu rsylog that otherwise splits
out the first section of the timestamp as part of the header.
"""
header = "netlogger" # header prefix
def __init__(self, qualname):
BPLogger.__init__(self, qualname)
self._orig_format = self._format
self._format = self.syslog_format
self._hdr = self.header + ": "
def set_meta(self, **kw):
"""See set_meta() in superclass.
Repeated here because superclass method accesses a protected
attribute that was modified in the constructor.
"""
self._orig_format.setMeta(None, **kw)
def flush(self):
self._orig_format.flush()
def syslog_format(self, *arg, **kw):
return self._hdr + self._orig_format(*arg, **kw)
###############################################
## Set BPLogger as default logging class
###############################################
setLoggerClass(BPLogger)
class PrettyBPLogger(BPLogger):
"""Logger class that writes 'pretty' Best-Practices formatted logs.
This is a variation on BP format. Stack traces logged with the
method exc() or exception() will be in their original form.
Usage:
See Usage notes for BPLogger.
Example:
log.info("my.event", value=88.7, units="seconds")
# output
# 2009-07-24T20:18:04.716913Z INFO netlogger.my.event - units=seconds,value=88.7
"""
def __init__(self, qualname):
BPLogger.__init__(self, qualname)
self._format = nlapi.Log(newline=False, level=nlapi.Level.ALL, pretty=True)
def exception(self, event, err, **kwargs):
tbstr = traceback.format_exc()
self.log(logging.ERROR, Level.ERROR, event, traceback=tbstr, **kwargs)
exc = exception
class RawBPLogger(logging.Logger):
"""Logger class that does not modify the message, just leaves
it as a 'raw' dictionary. This is useful for network communication
that is just going to pickle the event anyways.
"""
def log(self, level, nl_level, event, exc_info=None, **kwargs):
ts = time.time()
if self._qualname:
event = self._qualname + "." + event
# replace '__' with '.'
kwargs = {key.replace("__", "."): value for key, value in kwargs.iteritems()}
# build msg dictionary
msg = {"event": event, "ts": ts, "level": nl_level}
msg.update(kwargs)
# 'write' out
logging.Logger.log(self, level, msg, exc_info=exc_info)
class FakeBPLogger(logging.Logger):
def __init__(self, qualname):
logging.Logger.__init__(self, qualname)
def set_meta(self, **kw):
pass
def log(self, level, nl_level, event, **kwargs):
pass
def trace(self, event, **kwargs):
pass
def debug(self, event, **kwargs):
pass
def info(self, event, **kwargs):
pass
def warning(self, event, **kwargs):
pass
warn = warning
def error(self, event, **kwargs):
pass
def critical(self, event, **kwargs):
pass
def exception(self, event, err, **kwargs):
pass
exc = exception
def profile(func):
"""decorator for start,end event function profiling with netlogger."""
if os.getenv("NETLOGGER_ON", False) in ("off", "0", "no", "false", "", False):
return func
if not isinstance(func, types.FunctionType):
return func
if func.__module__ == "__main__":
f = func.__globals__["__file__"] or "unknown"
event = "%s" % os.path.splitext(os.path.basename(f))[0]
log = _logger("script")
log.set_meta(file=f, pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
else:
event = "%s" % func.__name__
log = _logger("%s" % func.__module__)
log.set_meta(pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
def nl_profile_func(*args, **kw):
log.debug("%s.start" % event)
try:
v = func(*args, **kw)
except Exception:
log.error("%s.end" % event)
raise
log.debug("%s.end" % event)
return v
return nl_profile_func
def profile_result(func):
"""decorator for start,end event function profiling with netlogger.
return value is logged as result.
"""
if os.getenv("NETLOGGER_ON", False) in ("off", "0", "no", "false", "", False):
return func
if not isinstance(func, types.FunctionType):
return func
if func.__module__ == "__main__":
f = func.__globals__["__file__"] or "unknown"
event = "%s" % os.path.splitext(os.path.basename(f))[0]
log = _logger("script")
log.set_meta(file=f, pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
else:
event = "%s" % func.__name__
log = _logger("%s" % func.__module__)
log.set_meta(pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
def nl_profile_func(*args, **kw):
log.debug("%s.start" % event)
try:
v = func(*args, **kw)
except Exception:
log.error("%s.end" % event)
raise
log.debug("%s.end" % event, result=v)
return v
return nl_profile_func
class Profiler(type):
"""metaclass that will wrap all user defined methods with start and end event logs.
Currently wrapping only instancemethod type.
Variables:
profiler_skip_methods: list of methods profiler will skip
profile_skip_all: profiler will not wrap any methods
_log: Logging object to use with class
Usage:
class MyClass:
__metaclass__ = Profiler
profiler_skip_methods = ['__init__', 'getsecret']
profiler_skip_all = False
"""
profiler_skip_methods = ["__init__"]
profiler_skip_all = False
@staticmethod
def __profile_method(func):
"""decorator for start,end event method profiling with netlogger
skips any classmethod or staticmethod types.
"""
if not isinstance(func, types.FunctionType):
return func
event = "%s" % func.__name__
def nl_profile_method(self, *args, **kw):
self._log.debug("%s.start" % event)
try:
v = func(self, *args, **kw)
except Exception:
self._log.error("%s.end" % event)
raise
self._log.debug("%s.end" % event)
return v
return nl_profile_method
def __new__(cls, classname, bases, classdict):
if os.getenv("NETLOGGER_ON", False) in ("off", "0", "no", "false", "", False):
setLoggerClass(FakeBPLogger)
classdict["_log"] = _logger(
"{}.{}".format(classdict["__module__"], classname)
)
return type.__new__(cls, classname, bases, classdict)
classdict["_log"] = log = _logger(
"{}.{}".format(classdict["__module__"], classname)
)
log.set_meta(pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
keys = []
if not classdict.get("profiler_skip_all", cls.profiler_skip_all):
keys = [
k
for k in classdict.keys()
if isinstance(classdict[k], types.FunctionType)
and k
not in classdict.get("profiler_skip_methods", cls.profiler_skip_methods)
]
for k in keys:
classdict[k] = cls.__profile_method(classdict[k])
return type.__new__(cls, classname, bases, classdict)
class MethodProfiler(Profiler):
"""metaclass that will wrap all user defined methods with start and end event logs.
Currently wrapping only instancemethod type.
"""
profiler_skip_all = False
class BasicProfiler(Profiler):
"""metaclass does not wrap methods with 'start' and 'end' tags, to do that use 'Profiler'.
Useful for classes where one only wants to do 'precision' logging.
"""
profiler_skip_all = True
class OptionParser(optparse.OptionParser):
"""Set logging 'tude for scripts.
Usage:
parser = NLOptionParser(..., can_be_daemon=True/False)
# add rest of options to 'parser'...
# This next line sets up logging as a side-effect
parser.parse_args()
# rest of program ..
*******************************************************
| Pseudo-code description of logic to determine which |
| types of logs to produce, and where to send them |
*******************************************************
Variables:
D - daemon mode [True | False]
L - log file [Missing | Empty | filename]
Logic:
if (D) then
case (L = Missing)
error!
case (L = Empty)
error!
case (L = filename)
stderr -> filename
BP logs -> filename
else
case (L = Missing)
stderr -> stderr
Pretty logs -> stderr
case (L = Empty)
stderr -> stderr
BP logs -> stderr
case (L = filename)
stderr -> stderr
BP logs -> filename
*******************************************************
| Pseudo-code description of logic for verbosity |
*******************************************************
Variables:
V - verbosity [0 .. N]
Q - quiet [True | False]
Logic:
if (Q) then
case (V > 0)
error!
else
set verbosity -> OFF
else
case V = 0
set verbosity -> WARN
case V = 1
set verbosity -> INFO
case V = 2
set verbosity -> DEBUG
case V >= 3
set verbosity -> TRACE
"""
# Attribute (option parser 'dest') names
DEST_LOG = "log_file"
DEST_VERBOSE = "verbose"
DEST_QUIET = "quiet"
DEST_DAEMON = "daemon"
DEST_ROT = "log_rotate"
# Option names, by attribute
OPTIONS = {
DEST_LOG: ("-L", "--log"),
DEST_VERBOSE: ("-v", "--verbose"),
DEST_QUIET: ("-q", "--quiet"),
DEST_DAEMON: (None, "--daemon"),
DEST_ROT: ("-R", "--logrotate"),
}
# Verbosity (number of -v's) to logging level
VBMAP = (logging.WARN, logging.INFO, logging.DEBUG, TRACE)
def __init__(self, can_be_daemon=False, **kwargs):
"""Add logging-related command-line options
to an option parser.
Parameters:
can_be_daemon - if True, add an option for daemonizing
kwargs - additional keywords for OptionParser.
The 'version' argument will override the default
version
"""
if "version" not in kwargs:
version_str = "%prog, NetLogger Toolkit version: {}\n {}".format(
NL_VERSION, NL_CREATE_DATE,
)
version_str += "\n\n" + NL_COPYRIGHT
kwargs["version"] = version_str
optparse.OptionParser.__init__(self, **kwargs)
self._dmn = can_be_daemon
def _add_options(self):
group = optparse.OptionGroup(self, "Logging options")
if self._dmn:
self.add_option(
self.OPTIONS[self.DEST_DAEMON][1],
action="store_true",
dest=self.DEST_DAEMON,
default=False,
help="run in daemon mode",
)
logfile_default = "required"
else:
logfile_default = "default=stderr"
group.add_option(
self.OPTIONS[self.DEST_LOG][0],
self.OPTIONS[self.DEST_LOG][1],
default=None,
action="store",
dest=self.DEST_LOG,
metavar="FILE",
help="write logs to FILE (%s)" % logfile_default,
)
group.add_option(
self.OPTIONS[self.DEST_ROT][0],
self.OPTIONS[self.DEST_ROT][1],
default=None,
action="store",
dest=self.DEST_ROT,
metavar="TIME",
help="rotate logs at an interval (<N>d or <N>h or <N>m)",
)
group.add_option(
self.OPTIONS[self.DEST_VERBOSE][0],
self.OPTIONS[self.DEST_VERBOSE][1],
action="count",
default=0,
dest=self.DEST_VERBOSE,
help="more verbose logging",
)
group.add_option(
self.OPTIONS[self.DEST_QUIET][0],
self.OPTIONS[self.DEST_QUIET][1],
action="store_true",
default=False,
dest=self.DEST_QUIET,
help="quiet mode, no logging",
)
self.add_option_group(group)
def check_required(self, opt):
"""Simplify checks for required values.
The default value for a required option must be None.
The easiest way to achieve this is not to provide a default.
Call error() if the required option is not present.
"""
option = self.get_option(opt)
# Assumes the option's 'default' is set to None!
if getattr(self.values, option.dest) is None:
self.error("%s option not supplied" % option)
def parse_args(self, args=None):
"""Process command-line options.
Parameters:
args - same as OptionParser.parse_args
Return:
True if all went well, False if not
Post-conditions:
If the return was True, logging levels and handlers
are properly set for qualified name 'netlogger'.
Otherwise, an error will be reported via the
'error' method of the parser passed to the constructor.
"""
if args is None:
args = sys.argv[1:]
self._add_options()
options, pargs = optparse.OptionParser.parse_args(self, args)
# Where and in what format to write logs
if self._dmn:
is_daemon = getattr(options, self.DEST_DAEMON)
else:
is_daemon = False
logfile = getattr(options, self.DEST_LOG, None)
logrot = getattr(options, self.DEST_ROT, None)
if ((not logfile) or logfile == "-") and logrot:
self.error("Log rotation requires a logfile")
if logrot:
if len(logrot) < 1:
self.error("Bad log rotation interval, too short")
tm_unit = logrot[-1].lower()
if tm_unit not in ("h", "m", "d"):
self.error("Bad log rotation unit '%s' " "not in m,h,d" % tm_unit)
try:
tm_interval = int(logrot[:-1])
except ValueError:
self.error("Log rotation value '%s' must be an integer" % logrot[:-1])
do_logrot = True
_tfrh = logging.handlers.TimedRotatingFileHandler
else:
do_logrot = False
log = logging.getLogger(PROJECT_NAMESPACE)
# Set handler and logger class
handler = None
if is_daemon:
if logfile is None or logfile == "" or logfile == "-": # missing/empty
self.error("log file is required in daemon mode")
return # defensive
else:
# stderr and BP logs -> logfile
setLoggerClass(BPLogger)
logfile = logfile.strip()
try:
if do_logrot:
handler = _tfrh(logfile, when=tm_unit, interval=tm_interval)
else:
handler = logging.FileHandler(logfile)
except OSError as err:
self.error("Cannot open log file '{}': {}".format(logfile, err))
sys.stderr = handler.stream
handler.setFormatter(logging.Formatter("%(message)s"))
else:
if logfile is None or logfile == "": # missing
# Pretty-BP logs -> stderr
setLoggerClass(PrettyBPLogger)
handler = logging.StreamHandler()
elif logfile.strip() == "-": # empty
# BP logs -> stderr
setLoggerClass(BPLogger)
handler = logging.StreamHandler()
else:
# BP logs -> logfile
logfile = logfile.strip()
setLoggerClass(BPLogger)
try:
if do_logrot:
handler = _tfrh(logfile, when=tm_unit, interval=tm_interval)
else:
handler = logging.FileHandler(logfile)
except OSError as err:
self.error("Cannot open log file '{}': {}".format(logfile, err))
handler.setFormatter(logging.Formatter("%(message)s"))
if handler:
log.addHandler(handler)
# Verbosity level
quiet = getattr(options, self.DEST_QUIET, False)
# delattr(options, self.DEST_QUIET)
vb = getattr(options, self.DEST_VERBOSE, 0)
# delattr(options, self.DEST_VERBOSE)
if quiet and (vb > 0):
self.error("quiet and verbosity options conflict")
return # defensive
if quiet:
log.setLevel(logging.CRITICAL + 1)
else:
log.setLevel(self.VBMAP[min(vb, len(self.VBMAP) - 1)])
# Return remaining options and args to caller
return options, pargs
|
|
import itertools
import math
import numpy as np
from scipy import ndimage as ndi
from scipy.ndimage import filters as ndif
from collections import OrderedDict
from ..exposure import histogram
from .._shared.utils import assert_nD, warn
from ..transform import integral_image
from .. import util
from skimage import dtype_limits, img_as_ubyte
__all__ = ['try_all_threshold',
'threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li',
'threshold_minimum',
'threshold_mean',
'threshold_niblack',
'threshold_sauvola',
'threshold_triangle']
def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True):
"""Returns a figure comparing the outputs of different methods.
Parameters
----------
image : (N, M) ndarray
Input image.
methods : dict, optional
Names and associated functions.
Functions must take and return an image.
figsize : tuple, optional
Figure size (in inches).
num_cols : int, optional
Number of columns.
verbose : bool, optional
Print function name for each method.
Returns
-------
fig, ax : tuple
Matplotlib figure and axes.
"""
from matplotlib import pyplot as plt
num_rows = math.ceil((len(methods) + 1.) / num_cols)
num_rows = int(num_rows) # Python 2.7 support
fig, ax = plt.subplots(num_rows, num_cols, figsize=figsize,
sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = ax.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original')
i = 1
for name, func in methods.items():
ax[i].imshow(func(image), cmap=plt.cm.gray)
ax[i].set_title(name)
i += 1
if verbose:
print(func.__orifunc__)
for a in ax:
a.axis('off')
fig.tight_layout()
return fig, ax
def try_all_threshold(image, figsize=(8, 5), verbose=True):
"""Returns a figure comparing the outputs of different thresholding methods.
Parameters
----------
image : (N, M) ndarray
Input image.
figsize : tuple, optional
Figure size (in inches).
verbose : bool, optional
Print function name for each method.
Returns
-------
fig, ax : tuple
Matplotlib figure and axes.
Notes
-----
The following algorithms are used:
* isodata
* li
* mean
* minimum
* otsu
* triangle
* yen
Examples
--------
>>> from skimage.data import text
>>> fig, ax = try_all_threshold(text(), figsize=(10, 6), verbose=False)
"""
def thresh(func):
"""
A wrapper function to return a thresholded image.
"""
def wrapper(im):
return im > func(im)
try:
wrapper.__orifunc__ = func.__orifunc__
except AttributeError:
wrapper.__orifunc__ = func.__module__ + '.' + func.__name__
return wrapper
# Global algorithms.
methods = OrderedDict({'Isodata': thresh(threshold_isodata),
'Li': thresh(threshold_li),
'Mean': thresh(threshold_mean),
'Minimum': thresh(threshold_minimum),
'Otsu': thresh(threshold_otsu),
'Triangle': thresh(threshold_triangle),
'Yen': thresh(threshold_yen)})
return _try_all(image, figsize=figsize,
methods=methods, verbose=verbose)
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Odd size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
if block_size % 2 == 0:
raise ValueError("The kwarg ``block_size`` must be odd! Given "
"``block_size`` {0} is even.".format(block_size))
assert_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode)
ndi.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : (N, M) ndarray
Grayscale input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
Raises
------
ValueError
If `image` only contains a single grayscale value.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
Notes
-----
The input image must be grayscale.
"""
if len(image.shape) > 2 and image.shape[-1] in (3, 4):
msg = "threshold_otsu is expected to work correctly only for " \
"grayscale images; image shape {0} looks like an RGB image"
warn(msg.format(image.shape))
# Check if the image is multi-colored or not
if image.min() == image.max():
raise ValueError("threshold_otsu is expected to work with images "
"having more than one color. The input image seems "
"to have just one color {0}.".format(image.min()))
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : (N, M) ndarray
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378. DOI:10.1109/83.366472
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165, DOI:10.1117/1.1631315
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : (N, M) ndarray
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
DOI:10.1109/TSMC.1978.4310039
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
DOI:10.1117/1.1631315
.. [3] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : (N, M) ndarray
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
DOI:10.1016/0031-3203(93)90115-D
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
DOI:10.1016/S0167-8655(98)00057-9
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
DOI:10.1117/1.1631315
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
# Make sure image has more than one value
if np.all(image == image.flat[0]):
raise ValueError("threshold_li is expected to work with images "
"having more than one value. The input image seems "
"to have just one value {0}.".format(image.flat[0]))
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 0.5 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
return threshold + immin
def threshold_minimum(image, nbins=256, bias='min', max_iter=10000):
"""Return threshold value based on minimum method.
The histogram of the input `image` is computed and smoothed until there are
only two maxima. Then the minimum in between is the threshold value.
Parameters
----------
image : (M, N) ndarray
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
bias : {'min', 'mid', 'max'}, optional
'min', 'mid', 'max' return lowest, middle, or highest pixel value
with minimum histogram value.
max_iter: int, optional
Maximum number of iterations to smooth the histogram.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
Raises
------
RuntimeError
If unable to find two local maxima in the histogram or if the
smoothing takes more than 1e4 iterations.
References
----------
.. [1] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell images",
Annals of the New York Academy of Sciences 128: 1035-1053
DOI:10.1111/j.1749-6632.1965.tb11715.x
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_minimum(image)
>>> binary = image > thresh
"""
def find_local_maxima(hist):
# We can't use scipy.signal.argrelmax
# as it fails on plateaus
maximums = list()
direction = 1
for i in range(hist.shape[0] - 1):
if direction > 0:
if hist[i + 1] < hist[i]:
direction = -1
maximums.append(i)
else:
if hist[i + 1] > hist[i]:
direction = 1
return maximums
if bias not in ('min', 'mid', 'max'):
raise ValueError("Unknown bias: {0}".format(bias))
hist, bin_centers = histogram(image.ravel(), nbins)
smooth_hist = np.copy(hist)
for counter in range(max_iter):
smooth_hist = ndif.uniform_filter1d(smooth_hist, 3)
maximums = find_local_maxima(smooth_hist)
if len(maximums) < 3:
break
if len(maximums) != 2:
raise RuntimeError('Unable to find two maxima in histogram')
elif counter == max_iter - 1:
raise RuntimeError('Maximum iteration reached for histogram'
'smoothing')
# Find lowest point between the maxima, biased to the low end (min)
minimum = smooth_hist[maximums[0]]
threshold = maximums[0]
for i in range(maximums[0], maximums[1]+1):
if smooth_hist[i] < minimum:
minimum = smooth_hist[i]
threshold = i
if bias == 'min':
return bin_centers[threshold]
else:
upper_bound = threshold
while smooth_hist[upper_bound] == smooth_hist[threshold]:
upper_bound += 1
upper_bound -= 1
if bias == 'max':
return bin_centers[upper_bound]
elif bias == 'mid':
return bin_centers[(threshold + upper_bound) // 2]
def threshold_mean(image):
"""Return threshold value based on the mean of grayscale values.
Parameters
----------
image : (N, M[, ..., P]) ndarray
Grayscale input image.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] C. A. Glasbey, "An analysis of histogram-based thresholding
algorithms," CVGIP: Graphical Models and Image Processing,
vol. 55, pp. 532-537, 1993.
DOI:10.1006/cgip.1993.1040
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_mean(image)
>>> binary = image > thresh
"""
return np.mean(image)
def threshold_triangle(image, nbins=256):
"""Return threshold value based on the triangle algorithm.
Parameters
----------
image : (N, M[, ..., P]) ndarray
Grayscale input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Zack, G. W., Rogers, W. E. and Latt, S. A., 1977,
Automatic Measurement of Sister Chromatid Exchange Frequency,
Journal of Histochemistry and Cytochemistry 25 (7), pp. 741-753
DOI:10.1177/25.7.70454
.. [2] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_triangle(image)
>>> binary = image > thresh
"""
# nbins is ignored for integer arrays
# so, we recalculate the effective nbins.
hist, bin_centers = histogram(image.ravel(), nbins)
nbins = len(hist)
# Find peak, lowest and highest gray levels.
arg_peak_height = np.argmax(hist)
peak_height = hist[arg_peak_height]
arg_low_level, arg_high_level = np.where(hist>0)[0][[0, -1]]
# Flip is True if left tail is shorter.
flip = arg_peak_height - arg_low_level < arg_high_level - arg_peak_height
if flip:
hist = hist[::-1]
arg_low_level = nbins - arg_high_level - 1
arg_peak_height = nbins - arg_peak_height - 1
# If flip == True, arg_high_level becomes incorrect
# but we don't need it anymore.
del(arg_high_level)
# Set up the coordinate system.
width = arg_peak_height - arg_low_level
x1 = np.arange(width)
y1 = hist[x1 + arg_low_level]
# Normalize.
norm = np.sqrt(peak_height**2 + width**2)
peak_height /= norm
width /= norm
# Maximize the length.
# The ImageJ implementation includes an additional constant when calculating
# the length, but here we omit it as it does not affect the location of the
# minimum.
length = peak_height * x1 - width * y1
arg_level = np.argmax(length) + arg_low_level
if flip:
arg_level = nbins - arg_level - 1
return bin_centers[arg_level]
def _mean_std(image, w):
"""Return local mean and standard deviation of each pixel using a
neighborhood defined by a rectangular window with size w times w.
The algorithm uses integral images to speedup computation. This is
used by threshold_niblack and threshold_sauvola.
Parameters
----------
image : ndarray
Input image.
w : int
Odd window size (e.g. 3, 5, 7, ..., 21, ...).
Returns
-------
m : 2-D array of same size of image with local mean values.
s : 2-D array of same size of image with local standard
deviation values.
References
----------
.. [1] F. Shafait, D. Keysers, and T. M. Breuel, "Efficient
implementation of local adaptive thresholding techniques
using integral images." in Document Recognition and
Retrieval XV, (San Jose, USA), Jan. 2008.
DOI:10.1117/12.767755
"""
if w == 1 or w % 2 == 0:
raise ValueError(
"Window size w = %s must be odd and greater than 1." % w)
padded = np.pad(image.astype('float'), (2, 1), mode='reflect')
padded_sq = padded * padded
integral = integral_image(padded)
integral_sq = integral_image(padded_sq)
kern = np.zeros((w + 1,) * image.ndim)
for indices in itertools.product(*([[0, -1]] * image.ndim)):
kern[indices] = (-1) ** (image.ndim % 2 != np.sum(indices) % 2)
sum_full = ndi.correlate(integral, kern, mode='constant')
m = util.crop(sum_full, (2, 1)) / (w * w)
sum_sq_full = ndi.correlate(integral_sq, kern, mode='constant')
g2 = util.crop(sum_sq_full, (2, 1)) / (w * w)
s = np.sqrt(g2 - m * m)
return m, s
def threshold_niblack(image, window_size=15, k=0.2):
"""Applies Niblack local threshold to an array.
A threshold T is calculated for every pixel in the image using the
following formula:
T = m(x,y) - k * s(x,y)
where m(x,y) and s(x,y) are the mean and standard deviation of
pixel (x,y) neighborhood defined by a rectangular window with size w
times w centered around the pixel. k is a configurable parameter
that weights the effect of standard deviation.
Parameters
----------
image: (N, M) ndarray
Grayscale input image.
window_size : int, optional
Odd size of pixel neighborhood window (e.g. 3, 5, 7...).
k : float, optional
Value of parameter k in threshold formula.
Returns
-------
threshold : (N, M) ndarray
Threshold mask. All pixels with an intensity higher than
this value are assumed to be foreground.
Notes
-----
This algorithm is originally designed for text recognition.
References
----------
.. [1] Niblack, W (1986), An introduction to Digital Image
Processing, Prentice-Hall.
Examples
--------
>>> from skimage import data
>>> image = data.page()
>>> binary_image = threshold_niblack(image, window_size=7, k=0.1)
"""
m, s = _mean_std(image, window_size)
return m - k * s
def threshold_sauvola(image, window_size=15, k=0.2, r=None):
"""Applies Sauvola local threshold to an array. Sauvola is a
modification of Niblack technique.
In the original method a threshold T is calculated for every pixel
in the image using the following formula:
T = m(x,y) * (1 + k * ((s(x,y) / R) - 1))
where m(x,y) and s(x,y) are the mean and standard deviation of
pixel (x,y) neighborhood defined by a rectangular window with size w
times w centered around the pixel. k is a configurable parameter
that weights the effect of standard deviation.
R is the maximum standard deviation of a greyscale image.
Parameters
----------
image: (N, M) ndarray
Grayscale input image.
window_size : int, optional
Odd size of pixel neighborhood window (e.g. 3, 5, 7...).
k : float, optional
Value of the positive parameter k.
r : float, optional
Value of R, the dynamic range of standard deviation.
If None, set to the half of the image dtype range.
offset : float, optional
Constant subtracted from obtained local thresholds.
Returns
-------
threshold : (N, M) ndarray
Threshold mask. All pixels with an intensity higher than
this value are assumed to be foreground.
Notes
-----
This algorithm is originally designed for text recognition.
References
----------
.. [1] J. Sauvola and M. Pietikainen, "Adaptive document image
binarization," Pattern Recognition 33(2),
pp. 225-236, 2000.
DOI:10.1016/S0031-3203(99)00055-2
Examples
--------
>>> from skimage import data
>>> image = data.page()
>>> binary_sauvola = threshold_sauvola(image,
... window_size=15, k=0.2)
"""
if r is None:
imin, imax = dtype_limits(image, clip_negative=False)
r = 0.5 * (imax - imin)
m, s = _mean_std(image, window_size)
return m * (1 + k * ((s / r) - 1))
|
|
#!/usr/bin/env/python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for interacting with llvm-profdata
This script is taken from the chromium build tools and is synced
manually on an as-needed basis:
https://source.chromium.org/chromium/chromium/src/+/master:testing/merge_scripts/code_coverage/merge_lib.py
"""
import logging
import multiprocessing
import os
import re
import shutil
import subprocess
_DIR_SOURCE_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
_JAVA_PATH = os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'jdk', 'current',
'bin', 'java')
logging.basicConfig(
format='[%(asctime)s %(levelname)s] %(message)s', level=logging.DEBUG)
def _call_profdata_tool(profile_input_file_paths,
profile_output_file_path,
profdata_tool_path,
retries=3):
"""Calls the llvm-profdata tool.
Args:
profile_input_file_paths: A list of relative paths to the files that
are to be merged.
profile_output_file_path: The path to the merged file to write.
profdata_tool_path: The path to the llvm-profdata executable.
Returns:
A list of paths to profiles that had to be excluded to get the merge to
succeed, suspected of being corrupted or malformed.
Raises:
CalledProcessError: An error occurred merging profiles.
"""
try:
subprocess_cmd = [
profdata_tool_path, 'merge', '-o', profile_output_file_path,
'-sparse=true'
]
subprocess_cmd.extend(profile_input_file_paths)
# Redirecting stderr is required because when error happens, llvm-profdata
# writes the error output to stderr and our error handling logic relies on
# that output.
output = subprocess.check_output(subprocess_cmd, stderr=subprocess.STDOUT)
logging.info('Merge succeeded with output: %r', output)
except subprocess.CalledProcessError as error:
if len(profile_input_file_paths) > 1 and retries >= 0:
logging.warning('Merge failed with error output: %r', error.output)
# The output of the llvm-profdata command will include the path of
# malformed files, such as
# `error: /.../default.profraw: Malformed instrumentation profile data`
invalid_profiles = [
f for f in profile_input_file_paths if f in error.output
]
if not invalid_profiles:
logging.info(
'Merge failed, but wasn\'t able to figure out the culprit invalid '
'profiles from the output, so skip retry and bail out.')
raise error
valid_profiles = list(
set(profile_input_file_paths) - set(invalid_profiles))
if valid_profiles:
logging.warning(
'Following invalid profiles are removed as they were mentioned in '
'the merge error output: %r', invalid_profiles)
logging.info('Retry merging with the remaining profiles: %r',
valid_profiles)
return invalid_profiles + _call_profdata_tool(
valid_profiles, profile_output_file_path, profdata_tool_path,
retries - 1)
logging.error('Failed to merge profiles, return code (%d), output: %r' %
(error.returncode, error.output))
raise error
logging.info('Profile data is created as: "%r".', profile_output_file_path)
return []
def _get_profile_paths(input_dir,
input_extension,
input_filename_pattern='.*'):
"""Finds all the profiles in the given directory (recursively)."""
paths = []
for dir_path, _sub_dirs, file_names in os.walk(input_dir):
paths.extend([
os.path.join(dir_path, fn)
for fn in file_names
if fn.endswith(input_extension) and re.search(input_filename_pattern,fn)
])
return paths
def _validate_and_convert_profraws(profraw_files, profdata_tool_path):
"""Validates and converts profraws to profdatas.
For each given .profraw file in the input, this method first validates it by
trying to convert it to an indexed .profdata file, and if the validation and
conversion succeeds, the generated .profdata file will be included in the
output, otherwise, won't.
This method is mainly used to filter out invalid profraw files.
Args:
profraw_files: A list of .profraw paths.
profdata_tool_path: The path to the llvm-profdata executable.
Returns:
A tulple:
A list of converted .profdata files of *valid* profraw files.
A list of *invalid* profraw files.
A list of profraw files that have counter overflows.
"""
for profraw_file in profraw_files:
if not profraw_file.endswith('.profraw'):
raise RuntimeError('%r is expected to be a .profraw file.' % profraw_file)
cpu_count = multiprocessing.cpu_count()
counts = max(10, cpu_count - 5) # Use 10+ processes, but leave 5 cpu cores.
pool = multiprocessing.Pool(counts)
output_profdata_files = multiprocessing.Manager().list()
invalid_profraw_files = multiprocessing.Manager().list()
counter_overflows = multiprocessing.Manager().list()
for profraw_file in profraw_files:
logging.info('Converting profraw file: %r', profraw_file)
pool.apply_async(
_validate_and_convert_profraw,
(profraw_file, output_profdata_files, invalid_profraw_files,
counter_overflows, profdata_tool_path))
pool.close()
pool.join()
# Remove inputs, as they won't be needed and they can be pretty large.
for input_file in profraw_files:
os.remove(input_file)
return list(output_profdata_files), list(invalid_profraw_files), list(
counter_overflows)
def _validate_and_convert_profraw(profraw_file, output_profdata_files,
invalid_profraw_files, counter_overflows,
profdata_tool_path):
output_profdata_file = profraw_file.replace('.profraw', '.profdata')
subprocess_cmd = [
profdata_tool_path, 'merge', '-o', output_profdata_file, '-sparse=true',
profraw_file
]
profile_valid = False
counter_overflow = False
validation_output = None
# 1. Determine if the profile is valid.
try:
# Redirecting stderr is required because when error happens, llvm-profdata
# writes the error output to stderr and our error handling logic relies on
# that output.
logging.info('Converting %r to %r', profraw_file, output_profdata_file)
validation_output = subprocess.check_output(
subprocess_cmd, stderr=subprocess.STDOUT)
logging.info('Validating and converting %r to %r succeeded with output: %r',
profraw_file, output_profdata_file, validation_output)
if 'Counter overflow' in validation_output:
counter_overflow = True
else:
profile_valid = True
except subprocess.CalledProcessError as error:
logging.warning('Validating and converting %r to %r failed with output: %r',
profraw_file, output_profdata_file, error.output)
validation_output = error.output
# 2. Add the profile to the appropriate list(s).
if profile_valid:
output_profdata_files.append(output_profdata_file)
else:
invalid_profraw_files.append(profraw_file)
if counter_overflow:
counter_overflows.append(profraw_file)
# 3. Log appropriate message
if not profile_valid:
template = 'Bad profile: %r, output: %r'
if counter_overflow:
template = 'Counter overflow: %r, output: %r'
logging.warning(template, profraw_file, validation_output)
# 4. Delete profdata for invalid profiles if present.
if os.path.exists(output_profdata_file):
# The output file may be created before llvm-profdata determines the
# input is invalid. Delete it so that it does not leak and affect other
# merge scripts.
os.remove(output_profdata_file)
def merge_java_exec_files(input_dir, output_path, jacococli_path):
"""Merges generated .exec files to output_path.
Args:
input_dir (str): The path to traverse to find input files.
output_path (str): Where to write the merged .exec file.
jacococli_path: The path to jacococli.jar.
Raises:
CalledProcessError: merge command failed.
"""
exec_input_file_paths = _get_profile_paths(input_dir, '.exec')
if not exec_input_file_paths:
logging.info('No exec file found under %s', input_dir)
return
cmd = [_JAVA_PATH, '-jar', jacococli_path, 'merge']
cmd.extend(exec_input_file_paths)
cmd.extend(['--destfile', output_path])
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
logging.info('Merge succeeded with output: %r', output)
def merge_profiles(input_dir,
output_file,
input_extension,
profdata_tool_path,
input_filename_pattern='.*'):
"""Merges the profiles produced by the shards using llvm-profdata.
Args:
input_dir (str): The path to traverse to find input profiles.
output_file (str): Where to write the merged profile.
input_extension (str): File extension to look for in the input_dir.
e.g. '.profdata' or '.profraw'
profdata_tool_path: The path to the llvm-profdata executable.
input_filename_pattern (str): The regex pattern of input filename. Should be
a valid regex pattern if present.
Returns:
The list of profiles that had to be excluded to get the merge to
succeed and a list of profiles that had a counter overflow.
"""
profile_input_file_paths = _get_profile_paths(input_dir,
input_extension,
input_filename_pattern)
invalid_profraw_files = []
counter_overflows = []
if input_extension == '.profraw':
profile_input_file_paths, invalid_profraw_files, counter_overflows = (
_validate_and_convert_profraws(profile_input_file_paths,
profdata_tool_path))
logging.info('List of converted .profdata files: %r',
profile_input_file_paths)
logging.info((
'List of invalid .profraw files that failed to validate and convert: %r'
), invalid_profraw_files)
if counter_overflows:
logging.warning('There were %d profiles with counter overflows',
len(counter_overflows))
# The list of input files could be empty in the following scenarios:
# 1. The test target is pure Python scripts test which doesn't execute any
# C/C++ binaries, such as devtools_type_check.
# 2. The test target executes binary and does dumps coverage profile data
# files, however, all of them turned out to be invalid.
if not profile_input_file_paths:
logging.info('There is no valid profraw/profdata files to merge, skip '
'invoking profdata tools.')
return invalid_profraw_files, counter_overflows
invalid_profdata_files = _call_profdata_tool(
profile_input_file_paths=profile_input_file_paths,
profile_output_file_path=output_file,
profdata_tool_path=profdata_tool_path)
# Remove inputs when merging profraws as they won't be needed and they can be
# pretty large. If the inputs are profdata files, do not remove them as they
# might be used again for multiple test types coverage.
if input_extension == '.profraw':
for input_file in profile_input_file_paths:
os.remove(input_file)
return invalid_profraw_files + invalid_profdata_files, counter_overflows
# We want to retry shards that contain one or more profiles that cannot be
# merged (typically due to corruption described in crbug.com/937521).
def get_shards_to_retry(bad_profiles):
bad_shard_ids = set()
def is_task_id(s):
# Swarming task ids are 16 hex chars. The pythonic way to validate this is
# to cast to int and catch a value error.
try:
assert len(s) == 16, 'Swarming task IDs are expected be of length 16'
_int_id = int(s, 16)
return True
except (AssertionError, ValueError):
return False
for profile in bad_profiles:
# E.g. /b/s/w/ir/tmp/t/tmpSvBRii/44b643576cf39f10/profraw/default-1.profraw
_base_path, task_id, _profraw, _filename = os.path.normpath(profile).rsplit(
os.path.sep, 3)
# Since we are getting a task_id from a file path, which is less than ideal,
# do some checking to at least verify that the snippet looks like a valid
# task id.
assert is_task_id(task_id)
bad_shard_ids.add(task_id)
return bad_shard_ids
|
|
"""
FormWizard class -- implements a multi-page form, validating between each
step and storing the form's state as HTML hidden fields so that no state is
stored on the server side.
"""
import cPickle as pickle
from django import forms
from django.conf import settings
from django.contrib.formtools.utils import security_hash, form_hmac
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.crypto import constant_time_compare
from django.utils.hashcompat import md5_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
class FormWizard(object):
# The HTML (and POST data) field name for the "step" variable.
step_field_name="wizard_step"
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form_list, initial=None):
"""
Start a new wizard with a list of forms.
form_list should be a list of Form classes (not instances).
"""
self.form_list = form_list[:]
self.initial = initial or {}
# Dictionary of extra template context variables.
self.extra_context = {}
# A zero-based counter keeping track of which step we're in.
self.step = 0
def __repr__(self):
return "step: %d\nform_list: %s\ninitial_data: %s" % (self.step, self.form_list, self.initial)
def get_form(self, step, data=None):
"Helper method that returns the Form instance for the given step."
return self.form_list[step](data, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None))
def num_steps(self):
"Helper method that returns the number of steps."
# You might think we should just set "self.num_steps = len(form_list)"
# in __init__(), but this calculation needs to be dynamic, because some
# hook methods might alter self.form_list.
return len(self.form_list)
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
if constant_time_compare(token, expected):
return True
else:
# Fall back to Django 1.2 method, for compatibility with forms that
# are in the middle of being used when the upgrade occurs. However,
# we don't want to do this fallback if a subclass has provided their
# own security_hash method - because they might have implemented a
# more secure method, and this would punch a hole in that.
# PendingDeprecationWarning <- left here to remind us that this
# compatibility fallback should be removed in Django 1.5
FormWizard_expected = FormWizard.security_hash(self, request, form)
if expected == FormWizard_expected:
# They didn't override security_hash, do the fallback:
old_expected = security_hash(request, form)
return constant_time_compare(token, old_expected)
else:
return False
@method_decorator(csrf_protect)
def __call__(self, request, *args, **kwargs):
"""
Main method that does all the hard work, conforming to the Django view
interface.
"""
if 'extra_context' in kwargs:
self.extra_context.update(kwargs['extra_context'])
current_step = self.determine_step(request, *args, **kwargs)
self.parse_params(request, *args, **kwargs)
# Sanity check.
if current_step >= self.num_steps():
raise Http404('Step %s does not exist' % current_step)
# Process the current step. If it's valid, go to the next step or call
# done(), depending on whether any steps remain.
if request.method == 'POST':
form = self.get_form(current_step, request.POST)
else:
form = self.get_form(current_step)
if form.is_valid():
# Validate all the forms. If any of them fail validation, that
# must mean the validator relied on some other input, such as
# an external Web site.
# It is also possible that validation might fail under certain
# attack situations: an attacker might be able to bypass previous
# stages, and generate correct security hashes for all the
# skipped stages by virtue of:
# 1) having filled out an identical form which doesn't have the
# validation (and does something different at the end),
# 2) or having filled out a previous version of the same form
# which had some validation missing,
# 3) or previously having filled out the form when they had
# more privileges than they do now.
#
# Since the hashes only take into account values, and not other
# other validation the form might do, we must re-do validation
# now for security reasons.
current_form_list = [self.get_form(i, request.POST) for i in range(current_step)]
for i, f in enumerate(current_form_list):
if not self._check_security_hash(request.POST.get("hash_%d" % i, ''), request, f):
return self.render_hash_failure(request, i)
if not f.is_valid():
return self.render_revalidation_failure(request, i, f)
else:
self.process_step(request, f, i)
# Now progress to processing this step:
self.process_step(request, form, current_step)
next_step = current_step + 1
if next_step == self.num_steps():
return self.done(request, current_form_list)
else:
form = self.get_form(next_step)
self.step = current_step = next_step
return self.render(form, request, current_step)
def render(self, form, request, step, context=None):
"Renders the given Form object, returning an HttpResponse."
old_data = request.POST
prev_fields = []
if old_data:
hidden = forms.HiddenInput()
# Collect all data from previous steps and render it as HTML hidden fields.
for i in range(step):
old_form = self.get_form(i, old_data)
hash_name = 'hash_%s' % i
prev_fields.extend([bf.as_hidden() for bf in old_form])
prev_fields.append(hidden.render(hash_name, old_data.get(hash_name, self.security_hash(request, old_form))))
return self.render_template(request, form, ''.join(prev_fields), step, context)
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def prefix_for_step(self, step):
"Given the step, returns a Form prefix to use."
return str(step)
def render_hash_failure(self, request, step):
"""
Hook for rendering a template if a hash check failed.
step is the step that failed. Any previous step is guaranteed to be
valid.
This default implementation simply renders the form for the given step,
but subclasses may want to display an error message, etc.
"""
return self.render(self.get_form(step), request, step, context={'wizard_error': _('We apologize, but your form has expired. Please continue filling out the form from this page.')})
def render_revalidation_failure(self, request, step, form):
"""
Hook for rendering a template if final revalidation failed.
It is highly unlikely that this point would ever be reached, but See
the comment in __call__() for an explanation.
"""
return self.render(form, request, step)
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def determine_step(self, request, *args, **kwargs):
"""
Given the request object and whatever *args and **kwargs were passed to
__call__(), returns the current step (which is zero-based).
Note that the result should not be trusted. It may even be a completely
invalid number. It's not the job of this method to validate it.
"""
if not request.POST:
return 0
try:
step = int(request.POST.get(self.step_field_name, 0))
except ValueError:
return 0
return step
def parse_params(self, request, *args, **kwargs):
"""
Hook for setting some state, given the request object and whatever
*args and **kwargs were passed to __call__(), sets some state.
This is called at the beginning of __call__().
"""
pass
def get_template(self, step):
"""
Hook for specifying the name of the template to use for a given step.
Note that this can return a tuple of template names if you'd like to
use the template system's select_template() hook.
"""
return 'forms/wizard.html'
def render_template(self, request, form, previous_fields, step, context=None):
"""
Renders the template for the given step, returning an HttpResponse object.
Override this method if you want to add a custom context, return a
different MIME type, etc. If you only need to override the template
name, use get_template() instead.
The template will be rendered with the following context:
step_field -- The name of the hidden field containing the step.
step0 -- The current step (zero-based).
step -- The current step (one-based).
step_count -- The total number of steps.
form -- The Form instance for the current step (either empty
or with errors).
previous_fields -- A string representing every previous data field,
plus hashes for completed forms, all in the form of
hidden fields. Note that you'll need to run this
through the "safe" template filter, to prevent
auto-escaping, because it's raw HTML.
"""
context = context or {}
context.update(self.extra_context)
return render_to_response(self.get_template(step), dict(context,
step_field=self.step_field_name,
step0=step,
step=step + 1,
step_count=self.num_steps(),
form=form,
previous_fields=previous_fields
), context_instance=RequestContext(request))
def process_step(self, request, form, step):
"""
Hook for modifying the FormWizard's internal state, given a fully
validated Form object. The Form is guaranteed to have clean, valid
data.
This method should *not* modify any of that data. Rather, it might want
to set self.extra_context or dynamically alter self.form_list, based on
previously submitted forms.
Note that this method is called every time a page is rendered for *all*
submitted steps.
"""
pass
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, form_list):
"""
Hook for doing something with the validated data. This is responsible
for the final processing.
form_list is a list of Form instances, each containing clean, valid
data.
"""
raise NotImplementedError("Your %s class has not defined a done() method, which is required." % self.__class__.__name__)
|
|
import ast
import copy
import os
import sys
import unittest
from vistrails.core.configuration import ConfigurationObject, ConfigField, ConfigPath, ConfigURL, get_vistrails_persistent_configuration, get_vistrails_temp_configuration
from vistrails.core.modules.vistrails_module import Module, NotCacheable, ModuleError
from vistrails.core.modules.config import IPort, OPort, ModuleSettings
import vistrails.core.system
class OutputMode(object):
mode_type = None
priority = -1
@classmethod
def can_compute(cls):
return False
def compute_output(self, output_module, configuration=None):
raise NotImplementedError("Subclass of OutputMode should implement "
"this")
# Ideally, these are globally and locally configurable so that we use
# global settings if nothing is set locally (e.g. output directory)
class OutputModeConfig(dict):
mode_type = None
_fields = []
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
for k, v in self.iteritems():
if not self.has_field(k):
raise ValueError('Field "%s" is not declared for class "%s"' %
(k, self.__class__.__name__))
@classmethod
def ensure_field_dict(cls):
if '_field_dict' not in cls.__dict__:
if '_fields' in cls.__dict__:
cls._field_dict = dict((f.name, f) for f in cls._fields)
else:
cls._field_dict = {}
@classmethod
def has_field(cls, k):
cls_list = [cls]
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, OutputModeConfig):
c.ensure_field_dict()
if k in c._field_dict:
return True
cls_list.extend(c.__bases__)
return False
@classmethod
def get_field(cls, k):
cls_list = [cls]
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, OutputModeConfig):
c.ensure_field_dict()
if k in c._field_dict:
return c._field_dict[k]
cls_list.extend(c.__bases__)
return None
@classmethod
def get_all_fields(cls):
field_dicts = []
cls_list = [cls]
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, OutputModeConfig):
c.ensure_field_dict()
field_dicts.append(c._field_dict)
cls_list.extend(c.__bases__)
field_dict = {}
for fd in reversed(field_dicts):
field_dict.update(fd)
fields = field_dict.values()
fields.sort()
return fields
@classmethod
def get_default(cls, k):
f = cls.get_field(k)
if f is not None:
return f.default_val
return None
@classmethod
def has_from_config(cls, config, k):
if hasattr(cls, 'mode_type'):
mode_type = cls.mode_type
if config.has(mode_type):
subconfig = getattr(config, mode_type)
if subconfig.has(k):
return True
return False
@classmethod
def get_from_config(cls, config, k):
if hasattr(cls, 'mode_type'):
mode_type = cls.mode_type
if config.has(mode_type):
subconfig = getattr(config, mode_type)
if subconfig.has(k):
return getattr(subconfig, k)
return None
@classmethod
def has_override(cls, k):
config = get_vistrails_temp_configuration().outputSettings
return cls.has_from_config(config, k)
@classmethod
def get_override(cls, k):
config = get_vistrails_temp_configuration().outputSettings
str_val = cls.get_from_config(config, k)
return cls.get_field(k).from_string(str_val)
@classmethod
def has_global_setting(cls, k):
config = get_vistrails_persistent_configuration().outputDefaultSettings
return cls.has_from_config(config, k)
@classmethod
def get_global_setting(cls, k):
config = get_vistrails_persistent_configuration().outputDefaultSettings
return cls.get_from_config(config, k)
@classmethod
def has_config_setting(cls, k):
return cls.has_override(k) or cls.has_global_setting(k)
def __setitem__(self, k, v):
if not self.has_field(k):
raise ValueError('Setting "%s" is not declared for class "%s"' %
(k, self.__class__.__name__))
dict.__setitem__(self, k, v)
def __getitem__(self, k):
if self.has_override(k):
return self.get_override(k)
try:
return dict.__getitem__(self, k)
except KeyError, e:
if self.has_global_setting(k):
return self.get_global_setting(k)
else:
if self.has_field(k):
return self.get_default(k)
raise e
def __hasitem__(self, k):
return (self.has_field(k) or dict.__hasitem__(self, k) or
self.has_override(k) or self.has_global_setting(k))
class OutputModule(NotCacheable, Module):
_input_ports = [IPort('value', "Variant"),
IPort('mode_type', "String"),
IPort('configuration', "Dictionary")]
_settings = ModuleSettings(abstract=True)
# configuration is a dictionary of dictionaries where root-level
# keys are mode_types and the inner dictionaries are
# workflow-specific settings
# want to have mode inheritance here...
@classmethod
def ensure_mode_dict(cls):
if '_output_modes_dict' not in cls.__dict__:
if '_output_modes' in cls.__dict__:
cls._output_modes_dict = \
dict((mcls.mode_type, (mcls, mcls.priority))
for mcls in cls._output_modes)
else:
cls._output_modes_dict = {}
@classmethod
def register_output_mode(cls, mode_cls, priority=None):
if mode_cls.mode_type is None:
raise ValueError("mode_cls.mode_type must not be None")
if priority is None:
priority = mode_cls.priority
cls.ensure_mode_dict()
if '_output_modes' not in cls.__dict__:
cls._output_modes = []
cls._output_modes.append(mode_cls)
cls._output_modes_dict[mode_cls.mode_type] = (mode_cls, priority)
@classmethod
def set_mode_priority(cls, mode_type, priority):
cls.ensure_mode_dict()
if mode_type not in cls._output_modes_dict:
raise ValueError('mode_type "%s" is not set for this module' %
mode_type)
cls._output_modes_dict[mode_type][1] = priority
@classmethod
def get_mode_class(cls, mode_type):
cls_list = [cls]
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, OutputModule):
c.ensure_mode_dict()
if mode_type in c._output_modes_dict:
return c._output_modes_dict[mode_type][0]
cls_list.extend(c.__bases__)
return None
@classmethod
def get_sorted_mode_list(cls):
cls_list = [cls]
idx = 0
while idx < len(cls_list):
for c in cls_list[idx].__bases__:
if issubclass(c, OutputModule):
c.ensure_mode_dict()
cls_list.append(c)
idx += 1
mode_dict = {}
for c in reversed(cls_list):
mode_dict.update(c._output_modes_dict)
mode_list = [c for c, _ in reversed(sorted(mode_dict.itervalues(),
key=lambda x: x[1]))]
return mode_list
@classmethod
def get_mode_tree(cls):
cls_list = [cls]
idx = 0
while idx < len(cls_list):
for c in cls_list[idx].__bases__:
if issubclass(c, OutputModule):
c.ensure_mode_dict()
cls_list.append(c)
idx += 1
mode_tree = {}
for c in reversed(cls_list):
c.ensure_mode_dict()
def get_mode_config(self, mode_cls):
mode_config_cls = mode_cls.config_cls
mode_config_dict = {}
configuration = self.force_get_input('configuration')
if configuration is not None:
# want to search through all mode classes in case we have
# base class settings that should trump
cls_list = [mode_config_cls]
mode_config_cls_list = []
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, OutputModeConfig):
mode_config_cls_list.append(c)
cls_list.extend(c.__bases__)
mode_config_cls_list.reverse()
for mode_config_cls in mode_config_cls_list:
for k, v in configuration.iteritems():
if k == mode_config_cls.mode_type:
mode_config_dict.update(v)
mode_config = mode_config_cls(mode_config_dict)
return mode_config
def compute(self):
mode_cls = None
self.ensure_mode_dict()
if self.has_input("mode_type"):
# use user-specified mode_type
mode_type = self.get_input("mode_type")
mode_cls = self.get_mode_class(mode_type)
if mode_cls is None:
raise ModuleError(self, 'Cannot output in mode "%s" because '
'that mode has not been defined' % mode_type)
else:
# FIXME should have user-setable priorities!
# determine mode_type based on registered modes by priority,
# checking if each is possible
for mcls in self.get_sorted_mode_list():
if mcls.can_compute():
mode_cls = mcls
break
if mode_cls is None:
raise ModuleError(self, "No output mode is valid, output cannot "
"be generated")
mode_config = self.get_mode_config(mode_cls)
mode = mode_cls()
self.annotate({"output_mode": mode.mode_type})
mode.compute_output(self, mode_config)
class StdoutModeConfig(OutputModeConfig):
mode_type = "stdout"
_fields = []
class StdoutMode(OutputMode):
mode_type = "stdout"
priority = 2
config_cls = StdoutModeConfig
@classmethod
def can_compute(cls):
return True
class FileModeConfig(OutputModeConfig):
mode_type = "file"
_fields = [ConfigField('file', None, ConfigPath),
ConfigField('basename', None, str),
ConfigField('prefix', None, str),
ConfigField('suffix', None, str),
ConfigField('dir', None, ConfigPath),
ConfigField('series', False, bool),
ConfigField('overwrite', True, bool),
ConfigField('seriesPadding', 3, int),
ConfigField('seriesStart', 0, int),
ConfigField('format', None, str)]
class FileMode(OutputMode):
mode_type = "file"
priority = 1
config_cls = FileModeConfig
formats = []
# need to reset this after each execution!
series_next = 0
@classmethod
def can_compute(cls):
return True
@classmethod
def get_formats(cls):
formats = []
cls_list = [cls]
while len(cls_list) > 0:
c = cls_list.pop(0)
if issubclass(c, FileMode):
if 'formats' in c.__dict__:
return c.formats
cls_list.extend(c.__bases__)
return []
def get_format(self, configuration=None):
format_map = {'png': 'png',
'jpeg': 'jpg',
'jpg': 'jpg',
'tif': 'tif',
'tiff': 'tif'}
if configuration is not None and 'format' in configuration:
conf_format = configuration['format']
if conf_format.lower() in format_map:
return format_map[conf_format.lower()]
return conf_format
# default is the first listed if it exists
format_list = self.get_formats()
if len(format_list) > 0:
return format_list[0]
return None
def get_series_num(self):
retval = FileMode.series_next
FileMode.series_next += 1
return retval
# FIXME should add format into this computation
def get_filename(self, configuration, full_path=None, filename=None,
dirname=None, basename=None, prefix=None, suffix=None,
overwrite=True, series=False, series_padding=3):
# if prefix/suffix/series are overridden, want to use them
# instead of name...
if full_path is None:
# use file if overridden or none of the file params are
# overridden and the file is not None
overwrite = configuration['overwrite']
if (configuration.has_override('file') or
(not (configuration.has_override('basename') or
configuration.has_override('prefix') or
configuration.has_override('suffix') or
configuration.has_override('dir') or
configuration.has_override('series') or
configuration.has_override('seriesPadding') or
configuration.has_override('seriesStart')) and
'file' in configuration and
configuration['file'] is not None)):
full_path = configuration['file']
else:
if configuration['basename'] is not None:
basename = configuration['basename']
if configuration['prefix'] is not None:
prefix = configuration['prefix']
if configuration['suffix'] is not None:
suffix = configuration['suffix']
if configuration['dir'] is not None:
dirname = configuration['dir']
if configuration['series'] is not None:
series = configuration['series']
if configuration['seriesPadding'] is not None:
series_padding = configuration['seriesPadding']
if full_path is None:
# should any of these necessitate series=True?
if basename is None:
basename = 'vt_out'
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if dirname is None:
# FIXME should unify with VisTrails output
# directory global! should check for abspath (if
# not, use relative to global output directory)
dirname = ''
# seriesPadding and series have defaults so no
# need to default them
if not overwrite and series:
# need to find first open slot
full_path = None
while full_path is None or os.path.exists(full_path):
series_str = (("%%0%dd" % series_padding) %
self.get_series_num())
full_path = os.path.join(dirname, "%s%s%s%s" %
(prefix, basename,
series_str, suffix))
else:
if series:
series_str = (("%%0%dd" % series_padding) %
self.get_series_num())
else:
series_str = ""
full_path = os.path.join(dirname, "%s%s%s%s" %
(prefix, basename, series_str,
suffix))
if not overwrite and os.path.exists(full_path):
raise IOError('File "%s" exists and overwrite is False' % full_path)
return full_path
class FileToFileMode(FileMode):
def compute_output(self, output_module, configuration=None):
old_fname = output_module.get_input('value').name
full_path = self.get_filename(configuration)
# we know we are in overwrite mode because it would have been
# flagged otherwise
if os.path.exists(full_path):
try:
os.remove(full_path)
except OSError, e:
raise ModuleError(output_module,
('Could not delete existing '
'path "%s"' % full_path))
try:
vistrails.core.system.link_or_copy(old_fname, full_path)
except OSError, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(output_module, msg)
class FileToStdoutMode(StdoutMode):
def compute_output(self, output_module, configuration=None):
fname = output_module.get_input('value').name
with open(fname, 'r') as f:
for line in f:
sys.stdout.write(line)
class GenericToStdoutMode(StdoutMode):
def compute_output(self, output_module, configuration=None):
value = output_module.get_input('value')
print >>sys.stdout, value
class GenericToFileMode(FileMode):
def compute_output(self, output_module, configuration=None):
value = output_module.get_input('value')
filename = self.get_filename(configuration)
with open(filename, 'w') as f:
print >>f, value
class GenericOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules.output_configuration:OutputModuleConfigurationWidget")
_output_modes = [GenericToStdoutMode, GenericToFileMode]
class FileOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules.output_configuration:OutputModuleConfigurationWidget")
# should set file as a higher priority here...
_input_ports = [('value', 'File')]
_output_modes = [FileToStdoutMode, FileToFileMode]
class ImageFileModeConfig(FileModeConfig):
mode_type = "imageFile"
_fields = [ConfigField('width', 800, int),
ConfigField('height', 600, int)]
class ImageFileMode(FileMode):
config_cls = ImageFileModeConfig
mode_type = "imageFile"
class RichTextOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules.output_configuration:OutputModuleConfigurationWidget")
# need specific spreadsheet richtext mode here
pass
_modules = [OutputModule, GenericOutput, FileOutput]
# need to put WebOutput, ImageOutput, RichTextOutput, SVGOutput, etc. elsewhere
class TestOutputModeConfig(unittest.TestCase):
def test_fields(self):
class AlteredFileModeConfig(FileModeConfig):
_fields = [ConfigField("newattr", 3, int)]
self.assertTrue(AlteredFileModeConfig.has_field("overwrite"))
self.assertTrue(AlteredFileModeConfig.has_field("newattr"))
def test_config(self):
config_obj = ConfigurationObject(file=ConfigurationObject(seriesStart=5))
self.assertTrue(FileModeConfig.has_from_config(config_obj,
"seriesStart"))
self.assertEqual(FileModeConfig.get_from_config(config_obj,
"seriesStart"), 5)
def test_subclass_config(self):
class AlteredFileModeConfig(FileModeConfig):
mode_type = "file"
_fields = [ConfigField("newattr", 3, int)]
config_obj = ConfigurationObject(file=ConfigurationObject(seriesStart=5))
self.assertEqual(AlteredFileModeConfig.get_from_config(config_obj,
"seriesStart"), 5)
def test_get_item(self):
config = FileModeConfig()
self.assertEqual(config["seriesStart"], 0)
def test_get_default(self):
self.assertEqual(FileModeConfig.get_default("seriesStart"), 0)
if __name__ == '__main__':
import vistrails.core.application
app = vistrails.core.application.init()
unittest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.auth import password_validation
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.password_hashers import get_dovecot_schemes
from modoboa.core.password_hashers.base import PasswordHasher
from modoboa.lib import fields as lib_fields
from modoboa.lib.form_utils import (
HorizontalRadioSelect, SeparatorField, YesNoField
)
from modoboa.parameters import forms as param_forms, tools as param_tools
from . import constants
def enabled_applications():
"""Return the list of installed extensions."""
from modoboa.core.extensions import exts_pool
result = [("user", _("User profile"))]
for extension in exts_pool.list_all():
if "topredirection_url" not in extension:
continue
result.append((extension["name"], extension["label"]))
return sorted(result, key=lambda e: e[0])
class GeneralParametersForm(param_forms.AdminParametersForm):
"""General parameters."""
app = "core"
sep1 = SeparatorField(label=ugettext_lazy("Authentication"))
authentication_type = forms.ChoiceField(
label=ugettext_lazy("Authentication type"),
choices=[("local", ugettext_lazy("Local")),
("ldap", "LDAP")],
initial="local",
help_text=ugettext_lazy("The backend used for authentication"),
widget=HorizontalRadioSelect()
)
password_scheme = forms.ChoiceField(
label=ugettext_lazy("Default password scheme"),
choices=[(hasher.name, ugettext_lazy(hasher.label))
for hasher in PasswordHasher.get_password_hashers()
if hasher().scheme in get_dovecot_schemes()],
initial="sha512crypt",
help_text=ugettext_lazy("Scheme used to crypt mailbox passwords"),
widget=forms.Select(attrs={"class": "form-control"})
)
rounds_number = forms.IntegerField(
label=ugettext_lazy("Rounds"),
initial=70000,
help_text=ugettext_lazy(
"Number of rounds to use (only used by sha256crypt and "
"sha512crypt). Must be between 1000 and 999999999, inclusive."
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
update_scheme = YesNoField(
label=ugettext_lazy("Update password scheme at login"),
initial=True,
help_text=ugettext_lazy(
"Update user password at login to use the default password scheme"
)
)
default_password = forms.CharField(
label=ugettext_lazy("Default password"),
initial="password",
help_text=ugettext_lazy(
"Default password for automatically created accounts.")
)
random_password_length = forms.IntegerField(
label=ugettext_lazy("Random password length"),
min_value=8,
initial=8,
help_text=ugettext_lazy(
"Length of randomly generated passwords.")
)
# LDAP specific settings
ldap_sep = SeparatorField(label=ugettext_lazy("LDAP settings"))
ldap_server_address = forms.CharField(
label=ugettext_lazy("Server address"),
initial="localhost",
help_text=ugettext_lazy(
"The IP address or the DNS name of the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_server_port = forms.IntegerField(
label=ugettext_lazy("Server port"),
initial=389,
help_text=ugettext_lazy("The TCP port number used by the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_secured = forms.ChoiceField(
label=ugettext_lazy("Use a secured connection"),
choices=constants.LDAP_SECURE_MODES,
initial="none",
help_text=ugettext_lazy(
"Use an SSL/STARTTLS connection to access the LDAP server")
)
ldap_auth_method = forms.ChoiceField(
label=ugettext_lazy("Authentication method"),
choices=[("searchbind", ugettext_lazy("Search and bind")),
("directbind", ugettext_lazy("Direct bind"))],
initial="searchbind",
help_text=ugettext_lazy("Choose the authentication method to use"),
widget=forms.Select(attrs={"class": "form-control"})
)
ldap_bind_dn = forms.CharField(
label=ugettext_lazy("Bind DN"),
initial="",
help_text=ugettext_lazy(
"The distinguished name to use when binding to the LDAP server. "
"Leave empty for an anonymous bind"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_bind_password = forms.CharField(
label=ugettext_lazy("Bind password"),
initial="",
help_text=ugettext_lazy(
"The password to use when binding to the LDAP server "
"(with 'Bind DN')"
),
widget=forms.PasswordInput(
attrs={"class": "form-control"}, render_value=True),
required=False
)
ldap_search_base = forms.CharField(
label=ugettext_lazy("Users search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find users"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_search_filter = forms.CharField(
label=ugettext_lazy("Search filter"),
initial="(mail=%(user)s)",
help_text=ugettext_lazy(
"An optional filter string (e.g. '(objectClass=person)'). "
"In order to be valid, it must be enclosed in parentheses."
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_user_dn_template = forms.CharField(
label=ugettext_lazy("User DN template"),
initial="",
help_text=ugettext_lazy(
"The template used to construct a user's DN. It should contain "
"one placeholder (ie. %(user)s)"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_password_attribute = forms.CharField(
label=ugettext_lazy("Password attribute"),
initial="userPassword",
help_text=ugettext_lazy("The attribute used to store user passwords"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_is_active_directory = YesNoField(
label=ugettext_lazy("Active Directory"),
initial=False,
help_text=ugettext_lazy(
"Tell if the LDAP server is an Active Directory one")
)
ldap_admin_groups = forms.CharField(
label=ugettext_lazy("Administrator groups"),
initial="",
help_text=ugettext_lazy(
"Members of those LDAP Posix groups will be created as domain "
"administrators. Use ';' characters to separate groups."
),
required=False
)
ldap_group_type = forms.ChoiceField(
label=ugettext_lazy("Group type"),
initial="posixgroup",
choices=constants.LDAP_GROUP_TYPES,
help_text=ugettext_lazy(
"The LDAP group type to use with your directory."
)
)
ldap_groups_search_base = forms.CharField(
label=ugettext_lazy("Groups search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find groups"
),
required=False
)
dash_sep = SeparatorField(label=ugettext_lazy("Dashboard"))
rss_feed_url = forms.URLField(
label=ugettext_lazy("Custom RSS feed"),
required=False,
help_text=ugettext_lazy(
"Display custom RSS feed to resellers and domain administrators"
)
)
hide_features_widget = YesNoField(
label=ugettext_lazy("Hide features widget"),
initial=False,
help_text=ugettext_lazy(
"Hide features widget for resellers and domain administrators"
)
)
notif_sep = SeparatorField(label=ugettext_lazy("Notifications"))
sender_address = lib_fields.UTF8EmailField(
label=_("Sender address"),
initial="[email protected]",
help_text=_(
"Email address used to send notifications."
)
)
api_sep = SeparatorField(label=ugettext_lazy("Public API"))
enable_api_communication = YesNoField(
label=ugettext_lazy("Enable communication"),
initial=True,
help_text=ugettext_lazy(
"Enable communication with Modoboa public API")
)
check_new_versions = YesNoField(
label=ugettext_lazy("Check new versions"),
initial=True,
help_text=ugettext_lazy(
"Automatically checks if a newer version is available")
)
send_statistics = YesNoField(
label=ugettext_lazy("Send statistics"),
initial=True,
help_text=ugettext_lazy(
"Send statistics to Modoboa public API "
"(counters and used extensions)")
)
sep3 = SeparatorField(label=ugettext_lazy("Miscellaneous"))
inactive_account_threshold = forms.IntegerField(
label=_("Inactive account threshold"),
initial=30,
help_text=_(
"An account with a last login date greater than this threshold "
"(in days) will be considered as inactive"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
top_notifications_check_interval = forms.IntegerField(
label=_("Top notifications check interval"),
initial=30,
help_text=_(
"Interval between two top notification checks (in seconds)"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
log_maximum_age = forms.IntegerField(
label=ugettext_lazy("Maximum log record age"),
initial=365,
help_text=ugettext_lazy("The maximum age in days of a log record"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
items_per_page = forms.IntegerField(
label=ugettext_lazy("Items per page"),
initial=30,
help_text=ugettext_lazy("Number of displayed items per page"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
default_top_redirection = forms.ChoiceField(
label=ugettext_lazy("Default top redirection"),
choices=[],
initial="user",
help_text=ugettext_lazy(
"The default redirection used when no application is specified"
),
widget=forms.Select(attrs={"class": "form-control"})
)
# Visibility rules
visibility_rules = {
"ldap_sep": "authentication_type=ldap",
"ldap_server_address": "authentication_type=ldap",
"ldap_server_port": "authentication_type=ldap",
"ldap_secured": "authentication_type=ldap",
"ldap_auth_method": "authentication_type=ldap",
"ldap_bind_dn": "ldap_auth_method=searchbind",
"ldap_bind_password": "ldap_auth_method=searchbind",
"ldap_search_base": "ldap_auth_method=searchbind",
"ldap_search_filter": "ldap_auth_method=searchbind",
"ldap_user_dn_template": "ldap_auth_method=directbind",
"ldap_password_attribute": "authentication_type=ldap",
"ldap_is_active_directory": "authentication_type=ldap",
"ldap_admin_groups": "authentication_type=ldap",
"ldap_group_type": "authentication_type=ldap",
"ldap_groups_search_base": "authentication_type=ldap",
"check_new_versions": "enable_api_communication=True",
"send_statistics": "enable_api_communication=True",
}
def __init__(self, *args, **kwargs):
super(GeneralParametersForm, self).__init__(*args, **kwargs)
self.fields["default_top_redirection"].choices = enabled_applications()
def clean_ldap_user_dn_template(self):
tpl = self.cleaned_data["ldap_user_dn_template"]
try:
tpl % {"user": "toto"}
except (KeyError, ValueError):
raise forms.ValidationError(_("Invalid syntax"))
return tpl
def clean_rounds_number(self):
value = self.cleaned_data["rounds_number"]
if value < 1000 or value > 999999999:
raise forms.ValidationError(_("Invalid rounds number"))
return value
def clean_default_password(self):
"""Check password complexity."""
value = self.cleaned_data["default_password"]
password_validation.validate_password(value)
return value
def clean(self):
"""Custom validation method
Depending on 'ldap_auth_method' value, we check for different
required parameters.
"""
super(GeneralParametersForm, self).clean()
cleaned_data = self.cleaned_data
if cleaned_data["authentication_type"] != "ldap":
return cleaned_data
if cleaned_data["ldap_auth_method"] == "searchbind":
required_fields = ["ldap_search_base", "ldap_search_filter"]
else:
required_fields = ["ldap_user_dn_template"]
for f in required_fields:
if f not in cleaned_data or cleaned_data[f] == u'':
self.add_error(f, _("This field is required"))
return cleaned_data
def to_django_settings(self):
"""Apply LDAP related parameters to Django settings.
Doing so, we can use the django_auth_ldap module.
"""
try:
import ldap
from django_auth_ldap.config import (
LDAPSearch, PosixGroupType, GroupOfNamesType)
ldap_available = True
except ImportError:
ldap_available = False
values = dict(param_tools.get_global_parameters("core"))
if not ldap_available or values["authentication_type"] != "ldap":
return
if not hasattr(settings, "AUTH_LDAP_USER_ATTR_MAP"):
setattr(settings, "AUTH_LDAP_USER_ATTR_MAP", {
"first_name": "givenName",
"email": "mail",
"last_name": "sn"
})
ldap_uri = "ldaps://" if values["ldap_secured"] == "ssl" else "ldap://"
ldap_uri += "%s:%s" % (
values["ldap_server_address"], values["ldap_server_port"])
setattr(settings, "AUTH_LDAP_SERVER_URI", ldap_uri)
if values["ldap_secured"] == "starttls":
setattr(settings, "AUTH_LDAP_START_TLS", True)
if values["ldap_group_type"] == "groupofnames":
setattr(settings, "AUTH_LDAP_GROUP_TYPE", GroupOfNamesType())
searchfilter = "(objectClass=groupOfNames)"
else:
setattr(settings, "AUTH_LDAP_GROUP_TYPE", PosixGroupType())
searchfilter = "(objectClass=posixGroup)"
setattr(settings, "AUTH_LDAP_GROUP_SEARCH", LDAPSearch(
values["ldap_groups_search_base"], ldap.SCOPE_SUBTREE,
searchfilter
))
if values["ldap_auth_method"] == "searchbind":
setattr(settings, "AUTH_LDAP_BIND_DN", values["ldap_bind_dn"])
setattr(
settings, "AUTH_LDAP_BIND_PASSWORD",
values["ldap_bind_password"]
)
search = LDAPSearch(
values["ldap_search_base"], ldap.SCOPE_SUBTREE,
values["ldap_search_filter"]
)
setattr(settings, "AUTH_LDAP_USER_SEARCH", search)
else:
setattr(
settings, "AUTH_LDAP_USER_DN_TEMPLATE",
values["ldap_user_dn_template"]
)
if values["ldap_is_active_directory"]:
if not hasattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS"):
setattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS", {
ldap.OPT_REFERRALS: False
})
else:
settings.AUTH_LDAP_GLOBAL_OPTIONS[ldap.OPT_REFERRALS] = False
|
|
"""This module contains classes for creating ELFI graphs (`ElfiModel`).
The ElfiModel is a directed acyclic graph (DAG), whose nodes represent
parts of the inference task, for example the parameters to be inferred,
the simulator or a summary statistic.
https://en.wikipedia.org/wiki/Directed_acyclic_graph
"""
import inspect
import logging
import os
import pickle
import re
import uuid
from functools import partial
import numpy as np
import scipy.spatial
import elfi.client
from elfi.model.graphical_model import GraphicalModel
from elfi.model.utils import distance_as_discrepancy, rvs_from_distribution
from elfi.store import OutputPool
from elfi.utils import observed_name, random_seed, scipy_from_str
__all__ = [
'ElfiModel', 'ComputationContext', 'NodeReference', 'Constant', 'Operation', 'RandomVariable',
'Prior', 'Simulator', 'Summary', 'Discrepancy', 'Distance', 'AdaptiveDistance',
'get_default_model', 'set_default_model', 'new_model', 'load_model'
]
logger = logging.getLogger(__name__)
_default_model = None
def get_default_model():
"""Return the current default ``ElfiModel`` instance.
New nodes will be added to this model by default.
"""
global _default_model
if _default_model is None:
_default_model = ElfiModel()
return _default_model
def set_default_model(model=None):
"""Set the current default ``ElfiModel`` instance.
New nodes will be placed the given model by default.
Parameters
----------
model : ElfiModel, optional
If None, creates a new ``ElfiModel``.
"""
global _default_model
if model is None:
model = ElfiModel()
if not isinstance(model, ElfiModel):
raise ValueError('{} is not an instance of ElfiModel'.format(ElfiModel))
_default_model = model
def new_model(name=None, set_default=True):
"""Create a new ``ElfiModel`` instance.
In addition to making a new ElfiModel instance, this method sets the new instance as
the default for new nodes.
Parameters
----------
name : str, optional
set_default : bool, optional
Whether to set the newly created model as the current model.
"""
model = ElfiModel(name=name)
if set_default:
set_default_model(model)
return model
def load_model(name, prefix=None, set_default=True):
"""Load the pickled ElfiModel.
Assumes there exists a file "name.pkl" in the current directory. Also sets the loaded
model as the default model for new nodes.
Parameters
----------
name : str
Name of the model file to load (without the .pkl extension).
prefix : str
Path to directory where the model file is located, optional.
set_default : bool, optional
Set the loaded model as the default model. Default is True.
Returns
-------
ElfiModel
"""
model = ElfiModel.load(name, prefix=prefix)
if set_default:
set_default_model(model)
return model
def random_name(length=4, prefix=''):
"""Generate a random string.
Parameters
----------
length : int, optional
prefix : str, optional
"""
return prefix + str(uuid.uuid4().hex[0:length])
# TODO: move to another file?
class ComputationContext:
"""Container object for key components for consistent computation results.
Attributes
----------
seed : int
batch_size : int
pool : OutputPool
num_submissions : int
Number of submissions using this context.
sub_seed_cache : dict
Caches the sub seed generation state variables. This is
Notes
-----
The attributes are immutable.
"""
def __init__(self, batch_size=None, seed=None, pool=None):
"""Set up a ComputationContext.
Parameters
----------
batch_size : int, optional
seed : int, None, 'global', optional
When None generates a random integer seed. When `'global'` uses the global
numpy random state. Only recommended for debugging.
pool : elfi.OutputPool, optional
Used for storing output.
"""
# Check pool context
if pool is not None and pool.has_context:
if batch_size is None:
batch_size = pool.batch_size
elif batch_size != pool.batch_size:
raise ValueError('Pool batch_size differs from the given batch_size!')
if seed is None:
seed = pool.seed
elif seed != pool.seed:
raise ValueError('Pool seed differs from the given seed!')
self._batch_size = batch_size or 1
self._seed = random_seed() if seed is None else seed
self._pool = pool
# Caches will not be used if they are not found from the caches dict
self.caches = {'executor': {}, 'sub_seed': {}}
# Count the number of submissions from this context
self.num_submissions = 0
if pool is not None and not pool.has_context:
self._pool.set_context(self)
@property
def pool(self):
"""Return the output pool."""
return self._pool
@property
def batch_size(self):
"""Return the batch size."""
return self._batch_size
@property
def seed(self):
"""Return the random seed."""
return self._seed
def callback(self, batch, batch_index):
"""Add the batch to pool.
Parameters
----------
batch : dict
batch_index : int
"""
if self._pool is not None:
self._pool.add_batch(batch, batch_index)
class ElfiModel(GraphicalModel):
"""A container for the inference model.
The ElfiModel is a directed acyclic graph (DAG), whose nodes represent
parts of the inference task, for example the parameters to be inferred,
the simulator or a summary statistic.
"""
def __init__(self, name=None, observed=None, source_net=None):
"""Initialize the inference model.
Parameters
----------
name : str, optional
observed : dict, optional
Observed data with node names as keys.
source_net : nx.DiGraph, optional
set_current : bool, optional
Sets this model as the current (default) ELFI model
"""
super(ElfiModel, self).__init__(source_net)
self.name = name or "model_{}".format(random_name())
self.observed = observed or {}
@property
def name(self):
"""Return name of the model."""
return self.source_net.graph['name']
@name.setter
def name(self, name):
"""Set the name of the model."""
self.source_net.graph['name'] = name
@property
def observed(self):
"""Return the observed data for the nodes in a dictionary."""
return self.source_net.graph['observed']
@observed.setter
def observed(self, observed):
"""Set the observed data of the model.
Parameters
----------
observed : dict
"""
if not isinstance(observed, dict):
raise ValueError("Observed data must be given in a dictionary with the node"
"name as the key")
self.source_net.graph['observed'] = observed
def generate(self, batch_size=1, outputs=None, with_values=None, seed=None):
"""Generate a batch of outputs.
This method is useful for testing that the ELFI graph works.
Parameters
----------
batch_size : int, optional
outputs : list, optional
with_values : dict, optional
You can specify values for nodes to use when generating data
seed : int, optional
Defaults to global numpy seed.
"""
if outputs is None:
outputs = self.source_net.nodes()
elif isinstance(outputs, str):
outputs = [outputs]
if not isinstance(outputs, list):
raise ValueError('Outputs must be a list of node names')
if seed is None:
seed = 'global'
pool = None
if with_values is not None:
pool = OutputPool(with_values.keys())
pool.add_batch(with_values, 0)
context = ComputationContext(batch_size, seed=seed, pool=pool)
client = elfi.client.get_client()
compiled_net = client.compile(self.source_net, outputs)
loaded_net = client.load_data(compiled_net, context, batch_index=0)
return client.compute(loaded_net)
def get_reference(self, name):
"""Return a new reference object for a node in the model.
Parameters
----------
name : str
"""
cls = self.get_node(name)['attr_dict']['_class']
return cls.reference(name, self)
def get_state(self, name):
"""Return the state of the node.
Parameters
----------
name : str
"""
return self.source_net.nodes[name]
def update_node(self, name, updating_name):
"""Update `node` with `updating_node` in the model.
The node with name `name` gets the state (operation), parents and observed
data (if applicable) of the updating_node. The updating node is then removed
from the graph.
Parameters
----------
name : str
updating_name : str
"""
update_observed = False
obs = None
if updating_name in self.observed:
update_observed = True
obs = self.observed.pop(updating_name)
super(ElfiModel, self).update_node(name, updating_name)
# Move data to the updated node
if update_observed:
self.observed[name] = obs
def remove_node(self, name):
"""Remove a node from the graph.
Parameters
----------
name : str
"""
if name in self.observed:
self.observed.pop(name)
super(ElfiModel, self).remove_node(name)
@property
def parameter_names(self):
"""Return a list of model parameter names in an alphabetical order."""
return sorted([n for n in self.nodes if '_parameter' in self.get_state(n)['attr_dict']])
@parameter_names.setter
def parameter_names(self, parameter_names):
"""Set the model parameter nodes.
For each node name in parameters, the corresponding node will be marked as being a
parameter node. Other nodes will be marked as not being parameter nodes.
Parameters
----------
parameter_names : list
A list of parameter names
"""
parameter_names = set(parameter_names)
for n in self.nodes:
state = self.get_state(n)['attr_dict']
if n in parameter_names:
parameter_names.remove(n)
state['_parameter'] = True
else:
if '_parameter' in state:
state.pop('_parameter')
if len(parameter_names) > 0:
raise ValueError('Parameters {} not found from the model'.format(parameter_names))
def copy(self):
"""Return a copy of the ElfiModel instance.
Returns
-------
ElfiModel
"""
kopy = super(ElfiModel, self).copy()
kopy.name = "{}_copy_{}".format(self.name, random_name())
return kopy
def save(self, prefix=None):
"""Save the current model to pickled file.
Parameters
----------
prefix : str, optional
Path to the directory under which to save the model. Default is the current working
directory.
"""
path = self.name + '.pkl'
if prefix is not None:
os.makedirs(prefix, exist_ok=True)
path = os.path.join(prefix, path)
pickle.dump(self, open(path, "wb"))
@classmethod
def load(cls, name, prefix):
"""Load the pickled ElfiModel.
Assumes there exists a file "name.pkl" in the current directory.
Parameters
----------
name : str
Name of the model file to load (without the .pkl extension).
prefix : str
Path to directory where the model file is located, optional.
Returns
-------
ElfiModel
"""
path = name + '.pkl'
if prefix is not None:
path = os.path.join(prefix, path)
return pickle.load(open(path, "rb"))
def __getitem__(self, node_name):
"""Return a new reference object for a node in the model.
Parameters
----------
node_name : str
"""
return self.get_reference(node_name)
class InstructionsMapper:
@property
def state(self):
raise NotImplementedError()
@property
def uses_meta(self):
return self.state['attr_dict'].get('_uses_meta', False)
@uses_meta.setter
def uses_meta(self, val):
self.state['attr_dict']['_uses_meta'] = val
class NodeReference(InstructionsMapper):
"""A base class for node objects in the model.
A user of ELFI will typically use, e.g. `elfi.Prior` or `elfi.Simulator` to create
state dictionaries for nodes.
Each node has a state dictionary that describes how the node ultimately produces its
output (see module documentation for more details). The state is stored in the
`ElfiModel` so that serializing the model is straightforward. `NodeReference` and its
subclasses are convenience classes that make it easy to manipulate the state. They
only contain a reference to the corresponding state in the `ElfiModel`.
Examples
--------
::
elfi.Simulator(fn, arg1, ...)
creates a node to `self.model.source_net` with the following state dictionary::
dict(_operation=fn, _class=elfi.Simulator, ...)
and adds and edge from arg1 to to the new simulator node in the
`self.model.source_net`.
"""
def __init__(self, *parents, state=None, model=None, name=None):
"""Initialize a NodeReference.
Parameters
----------
parents : variable, optional
name : string, optional
If name ends in an asterisk '*' character, the asterisk will be replaced with
a random string and the name is ensured to be unique within the model.
state : dict, optional
model : elfi.ElfiModel, optional
Examples
--------
>>> node = NodeReference(name='name*') # doctest: +SKIP
>>> node.name # doctest: +SKIP
name_1f4rgh
"""
state = state or {}
state['_class'] = self.__class__
model = self._determine_model(model, parents)
name = self._give_name(name, model)
model.add_node(name, state)
self._init_reference(name, model)
self._add_parents(parents)
def _add_parents(self, parents):
for parent in parents:
if not isinstance(parent, NodeReference):
parent_name = self._new_name('_' + self.name)
parent = Constant(parent, name=parent_name, model=self.model)
self.model.add_edge(parent.name, self.name)
def _determine_model(self, model, parents):
if not isinstance(model, ElfiModel) and model is not None:
return ValueError('Invalid model passed {}'.format(model))
# Check that parents belong to the same model and inherit the model if needed
for p in parents:
if isinstance(p, NodeReference):
if model is None:
model = p.model
elif model != p.model:
raise ValueError('Parents are from different models!')
if model is None:
model = get_default_model()
return model
@property
def parents(self):
"""Get all positional parent nodes (inputs) of this node.
Returns
-------
parents : list
List of positional parents
"""
return [self.model[p] for p in self.model.get_parents(self.name)]
@classmethod
def reference(cls, name, model):
"""Construct a reference for an existing node in the model.
Parameters
----------
name : string
name of the node
model : ElfiModel
Returns
-------
NodePointer instance
"""
instance = cls.__new__(cls)
instance._init_reference(name, model)
return instance
def become(self, other_node):
"""Make this node become the `other_node`.
The children of this node will be preserved.
Parameters
----------
other_node : NodeReference
"""
if other_node.model is not self.model:
raise ValueError('The other node belongs to a different model')
self.model.update_node(self.name, other_node.name)
# Update the reference class
_class = self.state.get('_class', NodeReference)
if not isinstance(self, _class):
self.__class__ = _class
# Update also the other node reference
other_node.name = self.name
other_node.model = self.model
def _init_reference(self, name, model):
"""Initialize all internal variables of the instance.
Parameters
----------
name : name of the node in the model
model : ElfiModel
"""
self.name = name
self.model = model
def generate(self, batch_size=1, with_values=None):
"""Generate output from this node.
Useful for testing.
Parameters
----------
batch_size : int, optional
with_values : dict, optional
"""
result = self.model.generate(batch_size, self.name, with_values=with_values)
return result[self.name]
def _give_name(self, name, model):
if name is not None:
if name[-1] == '*':
# Generate unique name
name = self._new_name(name[:-1], model)
return name
try:
name = self._inspect_name()
except BaseException:
logger.warning("Automatic name inspection failed, using a random name "
"instead. This may be caused by using an interactive Python "
"shell. You can provide a name parameter e.g. "
"elfi.Prior('uniform', name='nodename') to suppress this "
"warning.")
name = None
if name is None or model.has_node(name):
name = self._new_name(model=model)
return name
def _inspect_name(self):
"""Magic method that tries to infer the name from the code.
Does not work in interactive python shell.
"""
# Test if context info is available and try to give the same name as the variable
# Please note that this is only a convenience method which is not guaranteed to
# work in all cases. If you require a specific name, pass the name argument.
frame = inspect.currentframe()
if frame is None:
return None
# Frames are available
# Take the callers frame
frame = frame.f_back.f_back.f_back
info = inspect.getframeinfo(frame, 1)
# Skip super calls to find the assignment frame
while re.match(r'\s*super\(', info.code_context[0]):
frame = frame.f_back
info = inspect.getframeinfo(frame, 1)
# Match simple direct assignment with the class name, no commas or semicolons
# Also do not accept a name starting with an underscore
rex = r'\s*([^\W_][\w]*)\s*=\s*\w?[\w\.]*{}\('.format(self.__class__.__name__)
match = re.match(rex, info.code_context[0])
if match:
name = match.groups()[0]
return name
else:
return None
def _new_name(self, basename='', model=None):
model = model or self.model
if not basename:
basename = '_{}'.format(self.__class__.__name__.lower())
while True:
name = "{}_{}".format(basename, random_name())
if not model.has_node(name):
break
return name
@property
def state(self):
"""Return the state dictionary of the node."""
if self.model is None:
raise ValueError('{} {} is not initialized'.format(self.__class__.__name__, self.name))
return self.model.get_node(self.name)
def __getitem__(self, item):
"""Get item from the state dict of the node."""
return self.state[item]
def __setitem__(self, item, value):
"""Set item into the state dict of the node."""
self.state[item] = value
def __repr__(self):
"""Return a representation comprised of the names of the class and the node."""
return "{}(name='{}')".format(self.__class__.__name__, self.name)
def __str__(self):
"""Return the name of the node."""
return self.name
class StochasticMixin(NodeReference):
"""Define the inheriting node as stochastic.
Operations of stochastic nodes will receive a `random_state` keyword argument.
"""
def __init__(self, *parents, state, **kwargs):
# Flag that this node is stochastic
state['_stochastic'] = True
super(StochasticMixin, self).__init__(*parents, state=state, **kwargs)
class ObservableMixin(NodeReference):
"""Define the inheriting node as observable.
Observable nodes accept observed keyword argument. In addition the compiled
model will contain a sister node that contains the observed value or will compute the
observed value from the observed values of it's parents.
"""
def __init__(self, *parents, state, observed=None, **kwargs):
# Flag that this node can be observed
state['_observable'] = True
super(ObservableMixin, self).__init__(*parents, state=state, **kwargs)
# Set the observed value
if observed is not None:
self.model.observed[self.name] = observed
@property
def observed(self):
obs_name = observed_name(self.name)
result = self.model.generate(0, obs_name)
return result[obs_name]
# User interface nodes
class Constant(NodeReference):
"""A node holding a constant value."""
def __init__(self, value, **kwargs):
"""Initialize a node holding a constant value.
Parameters
----------
value
The constant value of the node.
"""
state = dict(_output=value)
super(Constant, self).__init__(state=state, **kwargs)
class Operation(NodeReference):
"""A generic deterministic operation node."""
def __init__(self, fn, *parents, **kwargs):
"""Initialize a node that performs an operation.
Parameters
----------
fn : callable
The operation of the node.
"""
state = dict(_operation=fn)
super(Operation, self).__init__(*parents, state=state, **kwargs)
class RandomVariable(StochasticMixin, NodeReference):
"""A node that draws values from a random distribution."""
def __init__(self, distribution, *params, size=None, **kwargs):
"""Initialize a node that represents a random variable.
Parameters
----------
distribution : str or scipy-like distribution object
params : params of the distribution
size : int, tuple or None, optional
Output size of a single random draw.
"""
state = dict(distribution=distribution, size=size, _uses_batch_size=True)
state['_operation'] = self.compile_operation(state)
super(RandomVariable, self).__init__(*params, state=state, **kwargs)
@staticmethod
def compile_operation(state):
"""Compile a callable operation that samples the associated distribution.
Parameters
----------
state : dict
"""
size = state['size']
distribution = state['distribution']
if not (size is None or isinstance(size, tuple)):
size = (size, )
# Note: sending the scipy distribution object also pickles the global numpy random
# state with it. If this needs to be avoided, the object needs to be constructed
# on the worker.
if isinstance(distribution, str):
distribution = scipy_from_str(distribution)
if not hasattr(distribution, 'rvs'):
raise ValueError("Distribution {} " "must implement a rvs method".format(distribution))
op = partial(rvs_from_distribution, distribution=distribution, size=size)
return op
@property
def distribution(self):
"""Return the distribution object."""
distribution = self.state['attr_dict']['distribution']
if isinstance(distribution, str):
distribution = scipy_from_str(distribution)
return distribution
@property
def size(self):
"""Return the size of the output from the distribution."""
return self['size']
def __repr__(self):
"""Return a string representation of the node."""
d = self.distribution
if isinstance(d, str):
name = "'{}'".format(d)
elif hasattr(d, 'name'):
name = "'{}'".format(d.name)
elif isinstance(d, type):
name = d.__name__
else:
name = d.__class__.__name__
return super(RandomVariable, self).__repr__()[0:-1] + ", {})".format(name)
class Prior(RandomVariable):
"""A parameter node of an ELFI graph."""
def __init__(self, distribution, *params, size=None, **kwargs):
"""Initialize a Prior.
Parameters
----------
distribution : str, object
Any distribution from `scipy.stats`, either as a string or an object. Objects
must implement at least an `rvs` method with signature
`rvs(*parameters, size, random_state)`. Can also be a custom distribution
object that implements at least an `rvs` method. Many of the algorithms also
require the `pdf` and `logpdf` methods to be available.
size : int, tuple or None, optional
Output size of a single random draw.
params
Parameters of the prior distribution
kwargs
Notes
-----
The parameters of the `scipy` distributions (typically `loc` and `scale`) must be
given as positional arguments.
Many algorithms (e.g. SMC) also require a `pdf` method for the distribution. In
general the definition of the distribution is a subset of
`scipy.stats.rv_continuous`.
Scipy distributions: https://docs.scipy.org/doc/scipy-0.19.0/reference/stats.html
"""
super(Prior, self).__init__(distribution, *params, size=size, **kwargs)
self['attr_dict']['_parameter'] = True
class Simulator(StochasticMixin, ObservableMixin, NodeReference):
"""A simulator node of an ELFI graph.
Simulator nodes are stochastic and may have observed data in the model.
"""
def __init__(self, fn, *params, **kwargs):
"""Initialize a Simulator.
Parameters
----------
fn : callable
Simulator function with a signature `sim(*params, batch_size, random_state)`
params
Input parameters for the simulator.
kwargs
"""
state = dict(_operation=fn, _uses_batch_size=True)
super(Simulator, self).__init__(*params, state=state, **kwargs)
class Summary(ObservableMixin, NodeReference):
"""A summary node of an ELFI graph.
Summary nodes are deterministic operations associated with the observed data. if their
parents hold observed data it will be automatically transformed.
"""
def __init__(self, fn, *parents, **kwargs):
"""Initialize a Summary.
Parameters
----------
fn : callable
Summary function with a signature `summary(*parents)`
parents
Input data for the summary function.
kwargs
"""
if not parents:
raise ValueError('This node requires that at least one parent is specified.')
state = dict(_operation=fn)
super(Summary, self).__init__(*parents, state=state, **kwargs)
class Discrepancy(NodeReference):
"""A discrepancy node of an ELFI graph.
This class provides a convenience node for custom distance operations.
"""
def __init__(self, discrepancy, *parents, **kwargs):
"""Initialize a Discrepancy.
Parameters
----------
discrepancy : callable
Signature of the discrepancy function is of the form:
`discrepancy(summary_1, summary_2, ..., observed)`, where summaries are
arrays containing `batch_size` simulated values and observed is a tuple
(observed_summary_1, observed_summary_2, ...). The callable object should
return a vector of discrepancies between the simulated summaries and the
observed summaries.
*parents
Typically the summaries for the discrepancy function.
**kwargs
See Also
--------
elfi.Distance : creating common distance discrepancies.
"""
if not parents:
raise ValueError('This node requires that at least one parent is specified.')
state = dict(_operation=discrepancy, _uses_observed=True)
super(Discrepancy, self).__init__(*parents, state=state, **kwargs)
# TODO: add weights
class Distance(Discrepancy):
"""A convenience class for the discrepancy node."""
def __init__(self, distance, *summaries, **kwargs):
"""Initialize a distance node of an ELFI graph.
This class contains many common distance implementations through scipy.
Parameters
----------
distance : str, callable
If string it must be a valid metric from `scipy.spatial.distance.cdist`.
Is a callable, the signature must be `distance(X, Y)`, where X is a n x m
array containing n simulated values (summaries) in rows and Y is a 1 x m array
that contains the observed values (summaries). The callable should return
a vector of distances between the simulated summaries and the observed
summaries.
*summaries
Summary nodes of the model.
**kwargs
Additional parameters may be required depending on the chosen distance.
See the scipy documentation. (The support is not exhaustive.)
ELFI-related kwargs are passed on to elfi.Discrepancy.
Examples
--------
>>> d = elfi.Distance('euclidean', summary1, summary2...) # doctest: +SKIP
>>> d = elfi.Distance('minkowski', summary, p=1) # doctest: +SKIP
Notes
-----
Your summaries need to be scalars or vectors for this method to work. The
summaries will be first stacked to a single 2D array with the simulated
summaries in the rows for every simulation and the distance is taken row
wise against the corresponding observed summary vector.
Scipy distances:
https://docs.scipy.org/doc/scipy/reference/generated/generated/scipy.spatial.distance.cdist.html # noqa
See Also
--------
elfi.Discrepancy : A general discrepancy node
"""
if not summaries:
raise ValueError("This node requires that at least one parent is specified.")
if isinstance(distance, str):
cdist_kwargs = dict(metric=distance)
if distance == 'wminkowski' and 'w' not in kwargs.keys():
raise ValueError('Parameter w must be specified for distance=wminkowski.')
elif distance == 'seuclidean' and 'V' not in kwargs.keys():
raise ValueError('Parameter V must be specified for distance=seuclidean.')
elif distance == 'mahalanobis' and 'VI' not in kwargs.keys():
raise ValueError('Parameter VI must be specified for distance=mahalanobis.')
# extract appropriate keyword arguments (depends on distance, not exhaustive!)
for key in ['p', 'w', 'V', 'VI']:
if key in kwargs.keys():
cdist_kwargs[key] = kwargs.pop(key)
dist_fn = partial(scipy.spatial.distance.cdist, **cdist_kwargs)
else:
dist_fn = distance
discrepancy = partial(distance_as_discrepancy, dist_fn)
super(Distance, self).__init__(discrepancy, *summaries, **kwargs)
# Store the original passed distance
self.state['distance'] = distance
class AdaptiveDistance(Discrepancy):
"""Euclidean (2-norm) distance calculation with adaptive scale.
Summary statistics are normalised to vary on similar scales.
References
----------
Prangle D (2017). Adapting the ABC Distance Function. Bayesian
Analysis 12(1):289-309, 2017.
https://projecteuclid.org/euclid.ba/1460641065
"""
def __init__(self, *summaries, **kwargs):
"""Initialize an AdaptiveDistance.
Parameters
----------
*summaries
Summary nodes of the model.
**kwargs
Notes
-----
Your summaries need to be scalars or vectors for this method to
work. The summaries will be first stacked to a single 2D array
with the simulated summaries in the rows for every simulation
and the distances are taken row wise against the corresponding
observed summary vector.
"""
if not summaries:
raise ValueError("This node requires that at least one parent is specified.")
discrepancy = partial(distance_as_discrepancy, self.nested_distance)
super(AdaptiveDistance, self).__init__(discrepancy, *summaries, **kwargs)
distance = partial(scipy.spatial.distance.cdist, metric='euclidean')
self.state['attr_dict']['distance'] = distance
self.init_state()
def init_state(self):
"""Initialise adaptive distance state."""
self.state['w'] = [None]
dist_fn = partial(self.state['attr_dict']['distance'], w=None)
self.state['distance_functions'] = [dist_fn]
self.state['store'] = 3 * [None]
self.init_adaptation_round()
def init_adaptation_round(self):
"""Initialise data stores to start a new adaptation round."""
if 'store' not in self.state:
self.init_state()
self.state['store'][0] = 0
self.state['store'][1] = 0
self.state['store'][2] = 0
def add_data(self, *data):
"""Add summaries data to update estimated standard deviation.
Parameters
----------
*data
Summary nodes output data.
Notes
-----
Standard deviation is computed with Welford's online algorithm.
"""
data = np.column_stack(data)
self.state['store'][0] += len(data)
delta_1 = data - self.state['store'][1]
self.state['store'][1] += np.sum(delta_1, axis=0) / self.state['store'][0]
delta_2 = data - self.state['store'][1]
self.state['store'][2] += np.sum(delta_1 * delta_2, axis=0)
self.state['scale'] = np.sqrt(self.state['store'][2]/self.state['store'][0])
def update_distance(self):
"""Update distance based on accumulated summaries data."""
weis = 1/self.state['scale']
self.state['w'].append(weis)
self.init_adaptation_round()
dist_fn = partial(self.state['attr_dict']['distance'], w=weis**2)
self.state['distance_functions'].append(dist_fn)
def nested_distance(self, u, v):
"""Compute distance between simulated and observed summaries.
Parameters
----------
u : ndarray
2D array with M x (num summaries) observations
v : ndarray
2D array with 1 x (num summaries) observations
Returns
-------
ndarray
2D array with M x (num distance functions) distances
"""
return np.column_stack([d(u, v) for d in self.state['distance_functions']])
|
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spc
'''
We have oribtals
Phi_1 = c_a * chi_a + c_b * chi_b
Phi_2 = c_c * chi_c + c_d * chi_d
where chi_i are gaussian type basis functions
and c_i are expansion coefficients
electron density of molecular orbitals Rho_1 = <phi_1|phi_1>
can be expressed using auxuliary gaussian basisfunctionc
rho_ab = chi_a * chi_b
Rho_1 = sim_ab c_a*c_b*S_ab * rho_ab
= sim_ab q_ab * rho_ab
where q_ab = c_a*c_b*S_ab is charge of the auxuliary electron blob
with S_ab being overlap integral between the basis functions chi_a*chi_b
we can use collective index i=ab and j=cd
qi = Sab*ca*cb
qj = Scd*cc*cd
The repulsion between blobs qi,qj can be expressed as
qi*qj / =
'''
const_hbar_SI = 1.054571817e-34; #< [J.s] #6.582119569e-16 # [eV/s]
const_Me_SI = 9.10938356e-31; #< [kg]
const_e_SI = 1.602176620898e-19; #< [Coulomb]
const_eps0_SI = 8.854187812813e-12; #< [F.m = Coulomb/(Volt*m)]
const_eV_SI = 1.602176620898e-19; #< [J]
const_Angstroem_SI = 1.0e-10;
const_K_SI = const_hbar_SI*const_hbar_SI/const_Me_SI;
const_El_SI = const_e_SI*const_e_SI/(4.*np.pi*const_eps0_SI);
const_Ry_SI = 0.5 * const_El_SI*const_El_SI/const_K_SI;
const_Ry_eV = 13.6056925944;
const_El_eVA = const_El_SI/( const_e_SI*const_Angstroem_SI );
const_K_eVA = (const_El_eVA*const_El_eVA)/(2*const_Ry_eV);
const_Ke_eVA = const_K_eVA*1.5;
def Coulomb( r, s ):
'''
double ir = 1./r; //(r+1.e-8);
double is = 1./s; //(s+1.e-8);
double r_s = r*is;
double r_2s = M_SQRT1_2 * r_s; // This is for charge-density blobs (assuming si,sj comes from charge denisty)
//double r_2s = r_s;
//double r_2s = M_SQRT2 * r_s; // This is for wavefunction blobs (assuming si,sj comes from wavefunction)
double e1 = ir * const_El_eVA;
double e2 = erf( r_2s ); // ToDo : this should be possible to compute together !!!
double g = exp( -r_2s*r_2s ) * const_F2;
double f1 = -e1*ir;
double f2 = g*is*0.5;
double e1f2 = e1*f2;
fr = (f1*e2 + e1f2)*ir;
fs = e1f2 *r_s * is;
return e1 * e2;
'''
# ToDo: maybe we can do without s=sqrt(s2) and r=sqrt(r2)
#constexpr const double const_F2 = -2.*sqrt(2./np.pi);
#const_F2 = M_2_SQRTPI * M_SQRT2;
M_SQRT2 = 1.41421356237
M_SQRT1_2 = 1/M_SQRT2
const_F2 = 2*np.sqrt(2/np.pi)
ir = 1./r #(r+1.e-8);
is_ = 1./s #(s+1.e-8);
r_s = r*is_
r_2s = M_SQRT1_2 * r_s; # This is for charge-density blobs (assuming si,sj comes from charge denisty)
#r_2s = r_s;
#r_2s = M_SQRT2 * r_s; # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
e1 = ir * const_El_eVA
e2 = spc.erf( r_2s )
g = np.exp( -r_2s*r_2s ) * const_F2
f1 = -e1*ir
#f2 = g*is_ # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
f2 = g*is_*0.5 # This is for charge-density blobs (assuming si,sj comes from charge denisty)
e1f2 = e1*f2
fr = (f1*e2 + e1f2)*ir
fs = e1f2 *r_s * is_
E = e1 * e2
#for i in range(len(r)):
# print "Gauss::Coulomb r %g s %g E %g fr %g " %(r[i],s, E[i], fr[i] )
return E,fr,fs
def product3D_s_deriv( si,pi, sj,pj ):
''' returns
S, p,
dSsi, dSsj,
dXsi, dXsj,
dXxi, dXxj,
dCsi, dCsj, dCr
'''
si2 = si*si
sj2 = sj*sj
s2 = si2 + sj2
is2 = 1/s2
is4 = is2*is2
sqrtis2 = np.sqrt(is2)
s = si*sj*sqrtis2 # size
p = pj*(si2*is2) + pi*(sj2*is2) # position
#X = ( si2*xj + sj2*xi )*inv;
inv3_2 = sqrtis2*is2
dSsi = sj*sj2*inv3_2
dSsj = si*si2*inv3_2
dp = pi-pj
dXsi = dp*(-2*si*sj2*is4)
dXsj = dp*( 2*sj*si2*is4)
dXxi = sj2*is2
dXxj = si2*is2
#r2 = dp.norm2()
r2 = dp*dp
a2 = 2.*(si*sj)*is2
a = np.sqrt(a2)
e1 = a2*a
e2 = np.exp( -r2*is2 )
f1 = 3.*a * (si2-sj2)*is4
f2 = 2.*e2 * r2*is4
dCsi = e1*f2*si - e2*f1*sj
dCsj = e1*f2*sj + e2*f1*si
C = e1*e2 # Overlap
dCr = C*(-2.*is2) # derivative is correct, tested !
# TODO : How is it possible that derivative is the same as value (just rescaled) ????
#double logC = wxi*xi + wxj*xj - wx*X;
#double C = np.exp(-logC) * Ci * Cj
#try:
# for i in range(len(r2)):
# print "product3D_s_deriv r %g s %g S %g dS %g " %(np.sqrt(r2[i]),s, S[i], dCr[i] )
#except:
# pass
return C,s,p, dCr*dp, (dSsi,dXsi,dXxi,dCsi), (dSsj,dXsj,dXxj,dCsj)
def checkNumDeriv( x, func, dfunc, name ):
dy = dfunc( x )
y = func(x)
dynum,xnum = numDeriv( x, y )
#print y
#print "y.shape, ynum.shape ", y.shape, ynum.shape
plotVsNum( x,dy,dynum, name )
plt.plot(x, y,'-.', label=name+"_F" )
if __name__ == "__main__":
#s = np.arange( 0.1, 5.0, 0.05 )
#rs = np.arange( 0.1, 5.0, 0.05 )
#S = np.arange( 1.25, 5.0, 0.05 )
#r = 1.5 + 0.*s
ca = 1.0
cb = 1.0
cc = 1.0
cd = 1.0
sa = 1.0
sb = 1.0
sc = 1.0
sd = 1.0
dx = 0.1
xa = np.arange( 0.01, 3.0, dx )
xb = 0.0
xc = -1.5
xd = 0.0
xs_ = (xa[1:]+xa[:-1])*0.5
# overlaps
Sab, si, xab, dQab, dA, dB = product3D_s_deriv( sa,xa, sb,xb )
Scd, sj, xcd, dQcd, dC, dD = product3D_s_deriv( sc,xc, sd,xd )
# coulomb
s2 = si*si + sj*sj
s = np.sqrt(s2)
r = xab-xcd
e, fx, fs = Coulomb( r, s )
dXxi = dA[2] + xa*0
plt.plot( xa, Sab , label='Sab' )
plt.plot( xa, r , label='r' )
#plt.plot( xa, dQab, label='dSab_ana' )
#plt.plot( xs_, (Sab[1:]-Sab[:-1])/dx,':', label='dSab_num' )
qij = 4*Scd*Sab
#qij = Sab
dQij = 4*Scd*dQab
# Q: Why we dont need derivatives of charge ????
#Fx = -fx*0.5*dA[1] # This works for zero initial distance between blobs
Fx = fx*r*dXxi
Fpi = fx*r*qij # see
fxi = Fpi*dXxi
print "Scd, 4*Scd ", Scd, 4*Scd
print "For some reason each charge is scaled by 2.0"
E = e*qij
F = fxi + e*dQij # total derivtive F = dE/dx = d(e*qi)/dx
# Note: e,fx=de/dx are NOT multiplied by charge Qij
# Total force Fx = dE/dx = d(e*q)/dx = q*(de/dx) + e*(dq/dx)
for i in range(len(r)):
#print "Gauss::Coulomb r %g s %g E %g Fx %g fx %g " %(r[i], s, E[i], Fx[i], fx[i] )
#print "fromRho r %g s %g E %g Fx %g fx %g " %((xa-xb)[i], s, E[i], Fx[i], fx[i] )
#print "CoublombElement r %g s %g E %g fr %g qij %g frq %g fij %g" %((xa-xb)[i], s, e[i], fx[i], qij[i], (fx*qij)[i], (fx*qij*r)[i] )
#print "fromRho r %g s %g | E %g e %g qij %g(%g) | Fx %g Fpi %g dQij %g " %((xa-xb)[i], si, E[i],e[i]*2*Scd,qij[i],Sab[i], Fx[i], Fpi[i],dQij[i] )
print "fromRho r %g Eqi %g Cij %g | Fpi %g dXxi %g fxi %g Fxi %g " %((xa-xb)[i], e[i]*2*Scd, Sab[i], Fpi[i], dXxi[i], fxi[i], F[i] );
pass
# ==== Derivative of Coulomb term without considering changes of Charges
#plt.plot( xa, e , label='e' )
#plt.plot( xa, Fx, label='dedx_ana' )
#plt.plot( xs_, (e[1:]-e[:-1])/dx,':', label='dedx_num' )
# ==== Derivative of Coulomb term with considering the Charges
plt.plot( xa, E, label='E' )
plt.plot( xa, F, label='dEdx_ana' )
plt.plot( xs_, (E[1:]-E[:-1])/dx,':', label='dEdx_num', lw=3 )
plt.plot( xa, fxi, label='fxi' )
#plt.plot( xa, fx, label='fx' )
#plt.plot( xa, dXxi, label='dXxi' )
plt.grid()
plt.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.