gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries import functions
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_name = default("/hostLevelParams/stack_name", None)
# node hostname
hostname = config["hostname"]
# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
# current host stack version
current_version = default("/hostLevelParams/current_version", None)
# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
hadoop_bin_dir = "/usr/bin"
hadoop_home = '/usr'
hive_bin = '/usr/lib/hive/bin'
hive_lib = '/usr/lib/hive/lib/'
hive_var_lib = '/var/lib/hive'
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
# These tar folders were used in HDP 2.1
hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
hive_specific_configs_supported = False
hive_etc_dir_prefix = "/etc/hive"
limits_conf_dir = "/etc/security/limits.d"
hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
# use the directories from status_params as they are already calculated for
# the correct version of HDP
hadoop_conf_dir = status_params.hadoop_conf_dir
hadoop_bin_dir = status_params.hadoop_bin_dir
webhcat_conf_dir = status_params.webhcat_conf_dir
hive_conf_dir = status_params.hive_conf_dir
hive_config_dir = status_params.hive_config_dir
hive_client_conf_dir = status_params.hive_client_conf_dir
hive_server_conf_dir = status_params.hive_server_conf_dir
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
# Starting from HDP2.3 drop should be executed with purge suffix
purge_tables = "false"
# --- Tarballs ---
webhcat_apps_dir = "/apps/webhcat"
# In HDP 2.1, the tarballs were copied from and to different locations.
# DON'T CHANGE THESE VARIABLE NAMES
hive_tar_source = hive_tar_file
pig_tar_source = pig_tar_file
hive_tar_dest_file = webhcat_apps_dir + "/hive.tar.gz"
pig_tar_dest_file = webhcat_apps_dir + "/pig.tar.gz"
hadoop_streaming_tar_source = hadoop_streaming_jars # this contains *
sqoop_tar_source = sqoop_tar_file # this contains *
hadoop_streaming_tar_dest_dir = webhcat_apps_dir
sqoop_tar_dest_dir = webhcat_apps_dir
tarballs_mode = 0755
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#HACK Temporarily use dbType=azuredb while invoking schematool
if hive_metastore_db_type == "mssql":
hive_metastore_db_type = "azuredb"
#users
hive_user = config['configurations']['hive-env']['hive_user']
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
hive_previous_jdbc_jar_name = None
if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_jar_name = "sqljdbc4.jar"
jdbc_symlink_name = "mssql-jdbc-driver.jar"
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = "mysql-connector-java.jar"
jdbc_symlink_name = "mysql-jdbc-driver.jar"
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = "postgresql-jdbc.jar"
jdbc_symlink_name = "postgresql-jdbc.jar"
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = "ojdbc.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_jar_name = "sajdbc4.jar"
jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
"org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
prepackaged_ojdbc_symlink = format("{hive_lib}/ojdbc6.jar")
templeton_port = config['configurations']['webhcat-site']['templeton.port']
#constants for type2 jdbc
jdbc_libs_dir = format("{hive_lib}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/sqla-client-jdbc.tar.gz")
libs_in_hive_lib = format("{jdbc_libs_dir}/*")
#common
hive_metastore_hosts = config['clusterHostInfo']['hive_metastore_host']
hive_metastore_host = hive_metastore_hosts[0]
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
hive_server_hosts = config['clusterHostInfo']['hive_server_host']
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
if hive_transport_mode.lower() == "http":
hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
else:
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
# ssl options
hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#hive_env
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_client_conf_dir]
if hostname in hive_metastore_hosts or hostname in hive_server_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
#hive-site
hive_database_name = config['configurations']['hive-env']['hive_database_name']
hive_database = config['configurations']['hive-env']['hive_database']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
java_share_dir = '/usr/share/java'
driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
target = format("{hive_lib}/{jdbc_jar_name}")
jars_in_hive_lib = format("{hive_lib}/*.jar")
source_jdbc_file = target
jdk_location = config['hostLevelParams']['jdk_location']
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
# hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
hive_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
else:
hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
java64_home = config['hostLevelParams']['java_home']
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
#### Metastore
# initialize the schema only if not in an upgrade/downgrade
init_metastore_schema = upgrade_direction is None
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#webhcat-log4j.properties.template
if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
else:
log4j_webhcat_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
daemon_name = status_params.daemon_name
process_name = status_params.process_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0755
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
# Tez-related properties
tez_user = config['configurations']['tez-env']['tez_user']
# Tez jars
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
if OSCheck.is_ubuntu_family():
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
mysql_user = 'mysql'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
hive_use_existing_db = hive_database.startswith('Existing')
hive_exclude_packages = []
# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
# trying to install mysql-connector-java upon them can cause packages to conflict.
if hive_use_existing_db:
hive_exclude_packages = ['mysql-connector-java', 'mysql', 'mysql-server',
'mysql-community-release', 'mysql-community-server']
else:
if 'role' in config and config['role'] != "MYSQL_SERVER":
hive_exclude_packages = ['mysql', 'mysql-server', 'mysql-community-release',
'mysql-community-server']
if os.path.exists(mysql_jdbc_driver_jar):
hive_exclude_packages.append('mysql-connector-java')
hive_site_config = dict(config['configurations']['hive-site'])
########################################################
############# Atlas related params #####################
########################################################
#atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
#has_atlas = len(atlas_hosts) > 0
#classpath_addition = ""
#atlas_plugin_package = "atlas-metadata*-hive-plugin"
#atlas_ubuntu_plugin_package = "atlas-metadata.*-hive-plugin"
#
#if not has_atlas:
# hive_exclude_packages.append(atlas_plugin_package)
# hive_exclude_packages.append(atlas_ubuntu_plugin_package)
#else:
# # client.properties
# atlas_client_props = {}
# auth_enabled = config['configurations']['application-properties'].get(
# 'atlas.http.authentication.enabled', False)
# atlas_client_props['atlas.http.authentication.enabled'] = auth_enabled
# if auth_enabled:
# atlas_client_props['atlas.http.authentication.type'] = config['configurations']['application-properties'].get('atlas.http.authentication.type', 'simple')
#
#region Atlas Hooks
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
enable_atlas_hook = default('/configurations/hive-env/hive.atlas.hook', False)
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
#ranger hive properties
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_hive'
jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
if security_enabled:
hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
#For curl command in ranger plugin to get db connector
if has_ranger_admin:
enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
ranger_jdbc_symlink_name = "mysql-jdbc-driver.jar"
ranger_jdbc_jar_name = "mysql-connector-java.jar"
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
ranger_jdbc_jar_name = "ojdbc6.jar"
ranger_jdbc_symlink_name = "oracle-jdbc-driver.jar"
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
ranger_jdbc_jar_name = "postgresql.jar"
ranger_jdbc_symlink_name = "postgres-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
ranger_jdbc_jar_name = "sqljdbc4.jar"
ranger_jdbc_symlink_name = "mssql-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
ranger_jdbc_jar_name = "sajdbc4.jar"
ranger_jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_symlink_name}")
ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
hive_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'jdbc.driverClassName': jdbc_driver_class_name,
'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
'commonNameForCertificate': common_name_for_certificate
}
hive_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hive_ranger_plugin_config),
'description': 'hive repo',
'name': repo_name,
'repositoryType': 'hive',
'assetType': '3'
}
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
#For SQLA explicitly disable audit to DB for Ranger
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
ranger_downloaded_custom_connector = None
ranger_previous_jdbc_jar_name = None
ranger_driver_curl_source = None
ranger_driver_curl_target = None
ranger_previous_jdbc_jar = None
# to get db connector related properties
if has_ranger_admin:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}")
ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}")
sql_connector_jar = ''
|
|
# -*- coding: utf-8 -*-
from .. import base
from girder.models.collection import Collection
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.user import User
class SetupDatabaseTestCase(base.TestCase):
def testAdmin(self):
admin = User().findOne({'login': 'admin'})
self.assertDictContains({
'firstName': 'First',
'lastName': 'Last',
'email': '[email protected]',
'admin': True
}, admin, 'Admin user')
folder = Folder().findOne({'parentId': admin['_id']})
self.assertDictContains({
'name': 'folder'
}, folder, 'imported folder')
item = Item().findOne({'folderId': folder['_id']})
self.assertDictContains({
'name': 'file.txt'
}, item, 'imported item')
file = File().findOne({'itemId': item['_id']})
self.assertDictContains({
'name': 'file.txt',
'mimeType': 'text/plain',
'size': 5
}, file, 'imported file')
def testUserDefaultFolders(self):
user = User().findOne({'login': 'defaultfolders'})
self.assertDictContains({
'firstName': 'User',
'lastName': 'One',
'admin': False
}, user, 'defaultFolders user')
folder = Folder().findOne({'parentId': user['_id'], 'name': 'Public'})
self.assertDictContains({
'public': True
}, folder, 'automatically created public folder')
folder = Folder().findOne({'parentId': user['_id'], 'name': 'Additional folder'})
self.assertDictContains({
'public': True
}, folder, 'manually created public folder')
self.assertDictContains({
'creatorId': user['_id']
}, folder, 'folder is created by expected user')
def testUserImportedFolders(self):
user = User().findOne({'login': 'importedfolders'})
self.assertDictContains({
'firstName': 'User',
'lastName': 'Two',
'admin': False
}, user, 'defaultFolders user')
folder = Folder().findOne({'parentId': user['_id']})
self.assertDictContains({
'name': 'folder'
}, folder, 'imported folder')
item = Item().findOne({'folderId': folder['_id']})
self.assertDictContains({
'name': 'file.txt'
}, item, 'imported item')
file = File().findOne({'itemId': item['_id']})
self.assertDictContains({
'name': 'file.txt',
'mimeType': 'text/plain',
'size': 5
}, file, 'imported file')
def testUserFolderWithAlternateCreator(self):
admin = User().findOne({'login': 'admin'})
user = User().findOne({'login': 'creatortest'})
self.assertDictContains({
'firstName': 'User',
'lastName': 'Three',
'admin': False
}, user, 'creatortest user')
folder = Folder().findOne({'parentId': user['_id']})
self.assertDictContains({
'name': 'Created by admin',
'creatorId': admin['_id']
}, folder, 'admin created folder')
def testManuallyCreatedCollection(self):
admin = User().findOne({'login': 'admin'})
user = User().findOne({'login': 'defaultfolders'})
collection = Collection().findOne({'name': 'Public Collection'})
self.assertDictContains({
'description': 'This is an example collection',
'public': True,
'creatorId': admin['_id']
}, collection, 'Public collection')
folder = Folder().findOne({'name': 'Folder 1', 'parentId': collection['_id']})
self.assertDictContains({
'description': 'This is a public folder',
'public': True,
'creatorId': admin['_id']
}, folder, 'Public folder')
item = Item().findOne(
{'name': 'Item 1', 'folderId': folder['_id']})
self.assertDictContains({
'description': 'This is an item',
'creatorId': admin['_id']
}, item, 'Item 1')
file = File().findOne({'name': 'File1.txt', 'itemId': item['_id']})
self.assertDictContains({
'mimeType': 'text/plain'
}, file, 'File1.txt')
file = File().findOne({'name': 'File2.txt', 'itemId': item['_id']})
self.assertDictContains({
'mimeType': 'application/json'
}, file, 'File2.txt')
folder = Folder().findOne({'name': 'Private folder', 'parentId': folder['_id']})
self.assertDictContains({
'description': 'Private folder in a public folder',
'public': False,
'creatorId': user['_id']
}, folder, 'Private folder')
def assertImported(self, parent):
admin = User().findOne({'login': 'admin'})
folder = Folder().findOne({'name': 'folder1', 'parentId': parent['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, folder, 'folder1')
item = Item().findOne(
{'name': 'emptyfile.txt', 'folderId': folder['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, item, 'emptyfile')
file = File().findOne({'itemId': item['_id']})
self.assertEqual(file['name'], 'emptyfile.txt')
item = Item().findOne(
{'name': 'file.txt', 'folderId': folder['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, item, 'emptyfile')
file = File().findOne({'itemId': item['_id']})
self.assertDictContains({
'name': 'file.txt',
'mimeType': 'text/plain',
'size': 5
}, file, 'file.txt')
folder = Folder().findOne({'name': 'folder2', 'parentId': folder['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, folder, 'folder2')
item = Item().findOne({'name': 'icon.png', 'folderId': folder['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, item, 'icon.png')
file = File().findOne({'itemId': item['_id']})
self.assertDictContains({
'name': 'icon.png',
'mimeType': 'image/png',
'size': 1494
}, file, 'icon.png')
def testImportedCollection(self):
admin = User().findOne({'login': 'admin'})
collection = Collection().findOne({'name': 'Imported collection'})
self.assertDictContains({
'creatorId': admin['_id']
}, collection, 'Public collection')
self.assertImported(collection)
def testImportedFolder(self):
admin = User().findOne({'login': 'admin'})
collection = Collection().findOne({'name': 'Imported folder collection'})
self.assertDictContains({
'creatorId': admin['_id']
}, collection, 'Imported folder collection')
folder = Folder().findOne({'name': 'Imported folder', 'parentId': collection['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, folder, 'imported folder root')
item = Item().findOne({'name': 'item.txt', 'folderId': folder['_id']})
self.assertDictContains({
'creatorId': admin['_id']
}, item, 'item.txt')
self.assertImported(folder)
def testYAMLAliases(self):
folderModel = Folder()
aliasedFolders = list(folderModel.find({'name': 'Common'}, force=True))
self.assertTrue(len(aliasedFolders) == 2)
for folder in aliasedFolders:
self.assertTrue(
len(list(folderModel.childItems(folder, force=True))) == 2
)
|
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
class FailAfterFewIterationsCounter(object):
def __init__(self, high, bytestring):
self._current = 0
self._high = high
self._bytestring = bytestring
def __iter__(self):
return self
def __next__(self):
if self._current >= self._high:
raise Exception("This is a deliberate failure in a unit test.")
else:
self._current += 1
return self._bytestring
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class InvocationDefectsTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(self._server_pool)
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testIterableStreamRequestBlockingUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_unary_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError):
response = multi_callable(
requests,
metadata=(
('test', 'IterableStreamRequestBlockingUnaryResponse'),))
def testIterableStreamRequestFutureUnaryResponse(self):
requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
requests,
metadata=(('test', 'IterableStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.RpcError):
response = response_future.result()
def testIterableStreamRequestStreamResponse(self):
requests = [b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)]
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
requests,
metadata=(('test', 'IterableStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError):
next(response_iterator)
def testIteratorStreamRequestStreamResponse(self):
requests_iterator = FailAfterFewIterationsCounter(
test_constants.STREAM_LENGTH // 2, b'\x07\x08')
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
requests_iterator,
metadata=(('test', 'IteratorStreamRequestStreamResponse'),))
with self.assertRaises(grpc.RpcError):
for _ in range(test_constants.STREAM_LENGTH // 2 + 1):
next(response_iterator)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
"""This library brings support for forked_daapd to Home Assistant."""
import asyncio
from collections import defaultdict
import logging
from pyforked_daapd import ForkedDaapdAPI
from pylibrespot_java import LibrespotJavaAPI
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import MEDIA_TYPE_MUSIC
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.util.dt import utcnow
from .const import (
CALLBACK_TIMEOUT,
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DEFAULT_TTS_PAUSE_TIME,
DEFAULT_TTS_VOLUME,
DEFAULT_UNMUTE_VOLUME,
DOMAIN,
FD_NAME,
HASS_DATA_REMOVE_LISTENERS_KEY,
HASS_DATA_UPDATER_KEY,
KNOWN_PIPES,
PIPE_FUNCTION_MAP,
SIGNAL_ADD_ZONES,
SIGNAL_CONFIG_OPTIONS_UPDATE,
SIGNAL_UPDATE_DATABASE,
SIGNAL_UPDATE_MASTER,
SIGNAL_UPDATE_OUTPUTS,
SIGNAL_UPDATE_PLAYER,
SIGNAL_UPDATE_QUEUE,
SOURCE_NAME_CLEAR,
SOURCE_NAME_DEFAULT,
STARTUP_DATA,
SUPPORTED_FEATURES,
SUPPORTED_FEATURES_ZONE,
TTS_TIMEOUT,
)
_LOGGER = logging.getLogger(__name__)
WS_NOTIFY_EVENT_TYPES = ["player", "outputs", "volume", "options", "queue", "database"]
WEBSOCKET_RECONNECT_TIME = 30 # seconds
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up forked-daapd from a config entry."""
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
password = config_entry.data[CONF_PASSWORD]
forked_daapd_api = ForkedDaapdAPI(
async_get_clientsession(hass), host, port, password
)
forked_daapd_master = ForkedDaapdMaster(
clientsession=async_get_clientsession(hass),
api=forked_daapd_api,
ip_address=host,
api_port=port,
api_password=password,
config_entry=config_entry,
)
@callback
def async_add_zones(api, outputs):
zone_entities = []
for output in outputs:
zone_entities.append(ForkedDaapdZone(api, output, config_entry.entry_id))
async_add_entities(zone_entities, False)
remove_add_zones_listener = async_dispatcher_connect(
hass, SIGNAL_ADD_ZONES.format(config_entry.entry_id), async_add_zones
)
remove_entry_listener = config_entry.add_update_listener(update_listener)
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {config_entry.entry_id: {}}
hass.data[DOMAIN][config_entry.entry_id] = {
HASS_DATA_REMOVE_LISTENERS_KEY: [
remove_add_zones_listener,
remove_entry_listener,
]
}
async_add_entities([forked_daapd_master], False)
forked_daapd_updater = ForkedDaapdUpdater(
hass, forked_daapd_api, config_entry.entry_id
)
await forked_daapd_updater.async_init()
hass.data[DOMAIN][config_entry.entry_id][
HASS_DATA_UPDATER_KEY
] = forked_daapd_updater
async def update_listener(hass, entry):
"""Handle options update."""
async_dispatcher_send(
hass, SIGNAL_CONFIG_OPTIONS_UPDATE.format(entry.entry_id), entry.options
)
class ForkedDaapdZone(MediaPlayerEntity):
"""Representation of a forked-daapd output."""
def __init__(self, api, output, entry_id):
"""Initialize the ForkedDaapd Zone."""
self._api = api
self._output = output
self._output_id = output["id"]
self._last_volume = DEFAULT_UNMUTE_VOLUME # used for mute/unmute
self._available = True
self._entry_id = entry_id
async def async_added_to_hass(self):
"""Use lifecycle hooks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._entry_id),
self._async_update_output_callback,
)
)
@callback
def _async_update_output_callback(self, outputs, _event=None):
new_output = next(
(output for output in outputs if output["id"] == self._output_id), None
)
self._available = bool(new_output)
if self._available:
self._output = new_output
self.async_write_ha_state()
@property
def unique_id(self):
"""Return unique ID."""
return f"{self._entry_id}-{self._output_id}"
@property
def should_poll(self) -> bool:
"""Entity pushes its state to HA."""
return False
async def async_toggle(self):
"""Toggle the power on the zone."""
if self.state == STATE_OFF:
await self.async_turn_on()
else:
await self.async_turn_off()
@property
def available(self) -> bool:
"""Return whether the zone is available."""
return self._available
async def async_turn_on(self):
"""Enable the output."""
await self._api.change_output(self._output_id, selected=True)
async def async_turn_off(self):
"""Disable the output."""
await self._api.change_output(self._output_id, selected=False)
@property
def name(self):
"""Return the name of the zone."""
return f"{FD_NAME} output ({self._output['name']})"
@property
def state(self):
"""State of the zone."""
if self._output["selected"]:
return STATE_ON
return STATE_OFF
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._output["volume"] / 100
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._output["volume"] == 0
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
if self.volume_level == 0:
return
self._last_volume = self.volume_level # store volume level to restore later
target_volume = 0
else:
target_volume = self._last_volume # restore volume level
await self.async_set_volume_level(volume=target_volume)
async def async_set_volume_level(self, volume):
"""Set volume - input range [0,1]."""
await self._api.set_volume(volume=volume * 100, output_id=self._output_id)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORTED_FEATURES_ZONE
class ForkedDaapdMaster(MediaPlayerEntity):
"""Representation of the main forked-daapd device."""
def __init__(
self, clientsession, api, ip_address, api_port, api_password, config_entry
):
"""Initialize the ForkedDaapd Master Device."""
self._api = api
self._player = STARTUP_DATA[
"player"
] # _player, _outputs, and _queue are loaded straight from api
self._outputs = STARTUP_DATA["outputs"]
self._queue = STARTUP_DATA["queue"]
self._track_info = defaultdict(
str
) # _track info is found by matching _player data with _queue data
self._last_outputs = [] # used for device on/off
self._last_volume = DEFAULT_UNMUTE_VOLUME
self._player_last_updated = None
self._pipe_control_api = {}
self._ip_address = (
ip_address # need to save this because pipe control is on same ip
)
self._tts_pause_time = DEFAULT_TTS_PAUSE_TIME
self._tts_volume = DEFAULT_TTS_VOLUME
self._tts_requested = False
self._tts_queued = False
self._tts_playing_event = asyncio.Event()
self._on_remove = None
self._available = False
self._clientsession = clientsession
self._config_entry = config_entry
self.update_options(config_entry.options)
self._paused_event = asyncio.Event()
self._pause_requested = False
self._sources_uris = {}
self._source = SOURCE_NAME_DEFAULT
self._max_playlists = None
async def async_added_to_hass(self):
"""Use lifecycle hooks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_PLAYER.format(self._config_entry.entry_id),
self._update_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_QUEUE.format(self._config_entry.entry_id),
self._update_queue,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._config_entry.entry_id),
self._update_outputs,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_MASTER.format(self._config_entry.entry_id),
self._update_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_CONFIG_OPTIONS_UPDATE.format(self._config_entry.entry_id),
self.update_options,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_DATABASE.format(self._config_entry.entry_id),
self._update_database,
)
)
@callback
def _update_callback(self, available):
"""Call update method."""
self._available = available
self.async_write_ha_state()
@callback
def update_options(self, options):
"""Update forked-daapd server options."""
if CONF_LIBRESPOT_JAVA_PORT in options:
self._pipe_control_api["librespot-java"] = LibrespotJavaAPI(
self._clientsession, self._ip_address, options[CONF_LIBRESPOT_JAVA_PORT]
)
if CONF_TTS_PAUSE_TIME in options:
self._tts_pause_time = options[CONF_TTS_PAUSE_TIME]
if CONF_TTS_VOLUME in options:
self._tts_volume = options[CONF_TTS_VOLUME]
if CONF_MAX_PLAYLISTS in options:
# sources not updated until next _update_database call
self._max_playlists = options[CONF_MAX_PLAYLISTS]
@callback
def _update_player(self, player, event):
self._player = player
self._player_last_updated = utcnow()
self._update_track_info()
if self._tts_queued:
self._tts_playing_event.set()
self._tts_queued = False
if self._pause_requested:
self._paused_event.set()
self._pause_requested = False
event.set()
@callback
def _update_queue(self, queue, event):
self._queue = queue
if (
self._tts_requested
and self._queue["count"] == 1
and self._queue["items"][0]["uri"].find("tts_proxy") != -1
):
self._tts_requested = False
self._tts_queued = True
if (
self._queue["count"] >= 1
and self._queue["items"][0]["data_kind"] == "pipe"
and self._queue["items"][0]["title"] in KNOWN_PIPES
): # if we're playing a pipe, set the source automatically so we can forward controls
self._source = f"{self._queue['items'][0]['title']} (pipe)"
self._update_track_info()
event.set()
@callback
def _update_outputs(self, outputs, event=None):
if event: # Calling without event is meant for zone, so ignore
self._outputs = outputs
event.set()
@callback
def _update_database(self, pipes, playlists, event):
self._sources_uris = {SOURCE_NAME_CLEAR: None, SOURCE_NAME_DEFAULT: None}
if pipes:
self._sources_uris.update(
{
f"{pipe['title']} (pipe)": pipe["uri"]
for pipe in pipes
if pipe["title"] in KNOWN_PIPES
}
)
if playlists:
self._sources_uris.update(
{
f"{playlist['name']} (playlist)": playlist["uri"]
for playlist in playlists[: self._max_playlists]
}
)
event.set()
def _update_track_info(self): # run during every player or queue update
try:
self._track_info = next(
track
for track in self._queue["items"]
if track["id"] == self._player["item_id"]
)
except (StopIteration, TypeError, KeyError):
_LOGGER.debug("Could not get track info")
self._track_info = defaultdict(str)
@property
def unique_id(self):
"""Return unique ID."""
return self._config_entry.entry_id
@property
def should_poll(self) -> bool:
"""Entity pushes its state to HA."""
return False
@property
def available(self) -> bool:
"""Return whether the master is available."""
return self._available
async def async_turn_on(self):
"""Restore the last on outputs state."""
# restore state
await self._api.set_volume(volume=self._last_volume * 100)
if self._last_outputs:
futures = []
for output in self._last_outputs:
futures.append(
self._api.change_output(
output["id"],
selected=output["selected"],
volume=output["volume"],
)
)
await asyncio.wait(futures)
else: # enable all outputs
await self._api.set_enabled_outputs(
[output["id"] for output in self._outputs]
)
async def async_turn_off(self):
"""Pause player and store outputs state."""
await self.async_media_pause()
self._last_outputs = self._outputs
if any(output["selected"] for output in self._outputs):
await self._api.set_enabled_outputs([])
async def async_toggle(self):
"""Toggle the power on the device.
Default media player component method counts idle as off.
We consider idle to be on but just not playing.
"""
if self.state == STATE_OFF:
await self.async_turn_on()
else:
await self.async_turn_off()
@property
def name(self):
"""Return the name of the device."""
return f"{FD_NAME} server"
@property
def state(self):
"""State of the player."""
if self._player["state"] == "play":
return STATE_PLAYING
if self._player["state"] == "pause":
return STATE_PAUSED
if not any(output["selected"] for output in self._outputs):
return STATE_OFF
if self._player["state"] == "stop": # this should catch all remaining cases
return STATE_IDLE
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player["volume"] / 100
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._player["volume"] == 0
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._player["item_id"]
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._track_info["media_kind"]
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._player["item_length_ms"] / 1000
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._player["item_progress_ms"] / 1000
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._player_last_updated
@property
def media_title(self):
"""Title of current playing media."""
# Use album field when data_kind is url
# https://github.com/ejurgensen/forked-daapd/issues/351
if self._track_info["data_kind"] == "url":
return self._track_info["album"]
return self._track_info["title"]
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_info["artist"]
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
# Use title field when data_kind is url
# https://github.com/ejurgensen/forked-daapd/issues/351
if self._track_info["data_kind"] == "url":
return self._track_info["title"]
return self._track_info["album"]
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self._track_info["album_artist"]
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self._track_info["track_number"]
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._player["shuffle"]
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORTED_FEATURES
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return [*self._sources_uris]
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
if self.volume_level == 0:
return
self._last_volume = self.volume_level # store volume level to restore later
target_volume = 0
else:
target_volume = self._last_volume # restore volume level
await self._api.set_volume(volume=target_volume * 100)
async def async_set_volume_level(self, volume):
"""Set volume - input range [0,1]."""
await self._api.set_volume(volume=volume * 100)
async def async_media_play(self):
"""Start playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_play")
else:
await self._api.start_playback()
async def async_media_pause(self):
"""Pause playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_pause")
else:
await self._api.pause_playback()
async def async_media_stop(self):
"""Stop playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_stop")
else:
await self._api.stop_playback()
async def async_media_previous_track(self):
"""Skip to previous track."""
if self._use_pipe_control():
await self._pipe_call(
self._use_pipe_control(), "async_media_previous_track"
)
else:
await self._api.previous_track()
async def async_media_next_track(self):
"""Skip to next track."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_next_track")
else:
await self._api.next_track()
async def async_media_seek(self, position):
"""Seek to position."""
await self._api.seek(position_ms=position * 1000)
async def async_clear_playlist(self):
"""Clear playlist."""
await self._api.clear_queue()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._api.shuffle(shuffle)
@property
def media_image_url(self):
"""Image url of current playing media."""
if url := self._track_info.get("artwork_url"):
url = self._api.full_url(url)
return url
async def _save_and_set_tts_volumes(self):
if self.volume_level: # save master volume
self._last_volume = self.volume_level
self._last_outputs = self._outputs
if self._outputs:
await self._api.set_volume(volume=self._tts_volume * 100)
futures = []
for output in self._outputs:
futures.append(
self._api.change_output(
output["id"], selected=True, volume=self._tts_volume * 100
)
)
await asyncio.wait(futures)
async def _pause_and_wait_for_callback(self):
"""Send pause and wait for the pause callback to be received."""
self._pause_requested = True
await self.async_media_pause()
try:
await asyncio.wait_for(
self._paused_event.wait(), timeout=CALLBACK_TIMEOUT
) # wait for paused
except asyncio.TimeoutError:
self._pause_requested = False
self._paused_event.clear()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a URI."""
if media_type == MEDIA_TYPE_MUSIC:
saved_state = self.state # save play state
saved_mute = self.is_volume_muted
sleep_future = asyncio.create_task(
asyncio.sleep(self._tts_pause_time)
) # start timing now, but not exact because of fd buffer + tts latency
await self._pause_and_wait_for_callback()
await self._save_and_set_tts_volumes()
# save position
saved_song_position = self._player["item_progress_ms"]
saved_queue = (
self._queue if self._queue["count"] > 0 else None
) # stash queue
if saved_queue:
saved_queue_position = next(
i
for i, item in enumerate(saved_queue["items"])
if item["id"] == self._player["item_id"]
)
self._tts_requested = True
await sleep_future
await self._api.add_to_queue(uris=media_id, playback="start", clear=True)
try:
await asyncio.wait_for(
self._tts_playing_event.wait(), timeout=TTS_TIMEOUT
)
# we have started TTS, now wait for completion
await asyncio.sleep(
self._queue["items"][0]["length_ms"]
/ 1000 # player may not have updated yet so grab length from queue
+ self._tts_pause_time
)
except asyncio.TimeoutError:
self._tts_requested = False
_LOGGER.warning("TTS request timed out")
self._tts_playing_event.clear()
# TTS done, return to normal
await self.async_turn_on() # restore outputs and volumes
if saved_mute: # mute if we were muted
await self.async_mute_volume(True)
if self._use_pipe_control(): # resume pipe
await self._api.add_to_queue(
uris=self._sources_uris[self._source], clear=True
)
if saved_state == STATE_PLAYING:
await self.async_media_play()
else: # restore stashed queue
if saved_queue:
uris = ""
for item in saved_queue["items"]:
uris += item["uri"] + ","
await self._api.add_to_queue(
uris=uris,
playback="start",
playback_from_position=saved_queue_position,
clear=True,
)
await self._api.seek(position_ms=saved_song_position)
if saved_state == STATE_PAUSED:
await self.async_media_pause()
elif saved_state != STATE_PLAYING:
await self.async_media_stop()
else:
_LOGGER.debug("Media type '%s' not supported", media_type)
async def async_select_source(self, source):
"""Change source.
Source name reflects whether in default mode or pipe mode.
Selecting playlists/clear sets the playlists/clears but ends up in default mode.
"""
if source == self._source:
return
if self._use_pipe_control(): # if pipe was playing, we need to stop it first
await self._pause_and_wait_for_callback()
self._source = source
if not self._use_pipe_control(): # playlist or clear ends up at default
self._source = SOURCE_NAME_DEFAULT
if self._sources_uris.get(source): # load uris for pipes or playlists
await self._api.add_to_queue(uris=self._sources_uris[source], clear=True)
elif source == SOURCE_NAME_CLEAR: # clear playlist
await self._api.clear_queue()
self.async_write_ha_state()
def _use_pipe_control(self):
"""Return which pipe control from KNOWN_PIPES to use."""
if self._source[-7:] == " (pipe)":
return self._source[:-7]
return ""
async def _pipe_call(self, pipe_name, base_function_name):
if self._pipe_control_api.get(pipe_name):
return await getattr(
self._pipe_control_api[pipe_name],
PIPE_FUNCTION_MAP[pipe_name][base_function_name],
)()
_LOGGER.warning("No pipe control available for %s", pipe_name)
class ForkedDaapdUpdater:
"""Manage updates for the forked-daapd device."""
def __init__(self, hass, api, entry_id):
"""Initialize."""
self.hass = hass
self._api = api
self.websocket_handler = None
self._all_output_ids = set()
self._entry_id = entry_id
async def async_init(self):
"""Perform async portion of class initialization."""
server_config = await self._api.get_request("config")
if websocket_port := server_config.get("websocket_port"):
self.websocket_handler = asyncio.create_task(
self._api.start_websocket_handler(
websocket_port,
WS_NOTIFY_EVENT_TYPES,
self._update,
WEBSOCKET_RECONNECT_TIME,
self._disconnected_callback,
)
)
else:
_LOGGER.error("Invalid websocket port")
def _disconnected_callback(self):
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_MASTER.format(self._entry_id), False
)
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_OUTPUTS.format(self._entry_id), []
)
async def _update(self, update_types):
"""Private update method."""
update_types = set(update_types)
update_events = {}
_LOGGER.debug("Updating %s", update_types)
if (
"queue" in update_types
): # update queue, queue before player for async_play_media
queue = await self._api.get_request("queue")
if queue:
update_events["queue"] = asyncio.Event()
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_QUEUE.format(self._entry_id),
queue,
update_events["queue"],
)
# order of below don't matter
if not {"outputs", "volume"}.isdisjoint(update_types): # update outputs
outputs = await self._api.get_request("outputs")
if outputs:
outputs = outputs["outputs"]
update_events[
"outputs"
] = asyncio.Event() # only for master, zones should ignore
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._entry_id),
outputs,
update_events["outputs"],
)
self._add_zones(outputs)
if not {"database"}.isdisjoint(update_types):
pipes, playlists = await asyncio.gather(
self._api.get_pipes(), self._api.get_playlists()
)
update_events["database"] = asyncio.Event()
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_DATABASE.format(self._entry_id),
pipes,
playlists,
update_events["database"],
)
if not {"update", "config"}.isdisjoint(update_types): # not supported
_LOGGER.debug("update/config notifications neither requested nor supported")
if not {"player", "options", "volume"}.isdisjoint(
update_types
): # update player
player = await self._api.get_request("player")
if player:
update_events["player"] = asyncio.Event()
if update_events.get("queue"):
await update_events[
"queue"
].wait() # make sure queue done before player for async_play_media
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_PLAYER.format(self._entry_id),
player,
update_events["player"],
)
if update_events:
await asyncio.wait(
[asyncio.create_task(event.wait()) for event in update_events.values()]
) # make sure callbacks done before update
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_MASTER.format(self._entry_id), True
)
def _add_zones(self, outputs):
outputs_to_add = []
for output in outputs:
if output["id"] not in self._all_output_ids:
self._all_output_ids.add(output["id"])
outputs_to_add.append(output)
if outputs_to_add:
async_dispatcher_send(
self.hass,
SIGNAL_ADD_ZONES.format(self._entry_id),
self._api,
outputs_to_add,
)
|
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from utils import remap
from tool.kallisto_indexer import kallistoIndexerTool
from tool.kallisto_quant import kallistoQuantificationTool
# ------------------------------------------------------------------------------
class process_rnaseq(Workflow):
"""
Functions for downloading and processing RNA-seq FastQ files. Files are
downloaded from the European Nucleotide Archive (ENA), then they are mapped
to quantify the amount of cDNA
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing RNA-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing RNA-Seq FastQ data. Pipeline aligns
the FASTQ files to the genome using Kallisto. Kallisto is then also
used for peak calling to identify levels of expression.
Parameters
----------
files_ids : dict
List of file locations (genome FASTA, FASTQ_01, FASTQ_02 (for
paired ends))
metadata : dict
Required meta data
output_files : dict
List of output file locations
Returns
-------
outputfiles : list
List of locations for the output bam, bed and tsv files
Parameters
----------
input_files : list
List of file locations
metadata : list
Required meta data
output_files : list
List of output file locations
Returns
-------
outputfiles : dict
List of locations for the output index files
output_metadata : dict
Metadata about each of the files
"""
if "cdna_public" in input_files:
input_files["cdna"] = input_files.pop("cdna_public")
metadata["cdna"] = metadata.pop("cdna_public")
if "gff_public" in input_files:
input_files["gff"] = input_files.pop("gff_public")
metadata["gff"] = metadata.pop("gff_public")
# Index the cDNA
# This could get moved to the general tools section
k_index = kallistoIndexerTool(self.configuration)
logger.progress("Kallisto Indexer", status="RUNNING")
k_out, k_meta = k_index.run(
{"cdna": input_files["cdna"]},
{"cdna": metadata["cdna"]},
{"index": output_files["index"]}
)
logger.progress("Kallisto Indexer", status="DONE")
if "index" not in k_out:
logger.fatal("Kallisto: Index has not been generated")
return {}, {}
# Quantification
k_quant = kallistoQuantificationTool()
logger.progress("Kallisto Quant", status="RUNNING")
if "fastq2" not in input_files:
kq_input_files = {
"cdna": input_files["cdna"],
"fastq1": input_files["fastq1"],
"index": k_out["index"],
"gff": input_files["gff"],
}
kq_input_meta = {
"cdna": metadata["cdna"],
"fastq1": metadata["fastq1"],
"gff": metadata["gff"],
"index": k_meta["index"]
}
kq_files, kq_meta = k_quant.run(
kq_input_files,
kq_input_meta,
remap(
output_files,
"abundance_h5_file", "abundance_tsv_file",
"abundance_gff_file", "run_info_file"
)
)
elif "fastq2" in input_files:
kq_input_files = {
"cdna": input_files["cdna"],
"fastq1": input_files["fastq1"],
"fastq2": input_files["fastq2"],
"index": k_out["index"],
"gff": input_files["gff"],
}
kq_input_meta = {
"cdna": metadata["cdna"],
"fastq1": metadata["fastq1"],
"fastq2": metadata["fastq2"],
"index": k_meta["index"],
"gff": metadata["gff"],
}
kq_files, kq_meta = k_quant.run(
kq_input_files,
kq_input_meta,
remap(
output_files,
"abundance_h5_file", "abundance_tsv_file",
"abundance_gff_file", "run_info_file")
)
logger.progress("Kallisto Quant", status="DONE")
try:
kq_files["index"] = k_out["index"]
kq_meta["index"] = k_meta["index"]
tool_name = kq_meta['index'].meta_data['tool']
kq_meta['index'].meta_data['tool_description'] = tool_name
kq_meta['index'].meta_data['tool'] = "process_rnaseq"
tool_name = kq_meta['abundance_h5_file'].meta_data['tool']
kq_meta['abundance_h5_file'].meta_data['tool_description'] = tool_name
kq_meta['abundance_h5_file'].meta_data['tool'] = "process_rnaseq"
tool_name = kq_meta['abundance_tsv_file'].meta_data['tool']
kq_meta['abundance_tsv_file'].meta_data['tool_description'] = tool_name
kq_meta['abundance_tsv_file'].meta_data['tool'] = "process_rnaseq"
tool_name = kq_meta['run_info_file'].meta_data['tool']
kq_meta['run_info_file'].meta_data['tool_description'] = tool_name
kq_meta['run_info_file'].meta_data['tool'] = "process_rnaseq"
except KeyError:
logger.fatal("Kallisto failed")
return (kq_files, kq_meta)
# -----------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_rnaseq,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(
description="Parse RNA-seq for expression analysis")
PARSER.add_argument(
"--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
|
|
#!/usr/bin/env python
# PyQt tutorial 13
import sys
import math
import random
from PySide import QtCore, QtGui
class LCDRange(QtGui.QWidget):
def __init__(self, text=None, parent=None):
if isinstance(text, QtGui.QWidget):
parent = text
text = None
QtGui.QWidget.__init__(self, parent)
self.init()
if text:
self.setText(text)
def init(self):
lcd = QtGui.QLCDNumber(2)
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slider.setRange(0, 99)
self.slider.setValue(0)
self.label = QtGui.QLabel()
self.label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
self.connect(self.slider, QtCore.SIGNAL("valueChanged(int)"),
lcd, QtCore.SLOT("display(int)"))
self.connect(self.slider, QtCore.SIGNAL("valueChanged(int)"),
self, QtCore.SIGNAL("valueChanged(int)"))
layout = QtGui.QVBoxLayout()
layout.addWidget(lcd)
layout.addWidget(self.slider)
layout.addWidget(self.label)
self.setLayout(layout)
self.setFocusProxy(self.slider)
def value(self):
return self.slider.value()
def setValue(self, value):
self.slider.setValue(value)
def text(self):
return self.label.text()
def setRange(self, minValue, maxValue):
if minValue < 0 or maxValue > 99 or minValue > maxValue:
QtCore.qWarning("LCDRange::setRange(%d, %d)\n"
"\tRange must be 0..99\n"
"\tand minValue must not be greater than maxValue" % (minValue, maxValue))
return
self.slider.setRange(minValue, maxValue)
def setText(self, text):
self.label.setText(text)
class CannonField(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.currentAngle = 45
self.currentForce = 0
self.timerCount = 0
self.autoShootTimer = QtCore.QTimer(self)
self.connect(self.autoShootTimer, QtCore.SIGNAL("timeout()"),
self.moveShot)
self.shootAngle = 0
self.shootForce = 0
self.target = QtCore.QPoint(0, 0)
self.gameEnded = False
self.setPalette(QtGui.QPalette(QtGui.QColor(250, 250, 200)))
self.setAutoFillBackground(True)
self.newTarget()
def angle(self):
return self.currentAngle
def setAngle(self, angle):
if angle < 5:
angle = 5
if angle > 70:
angle = 70;
if self.currentAngle == angle:
return
self.currentAngle = angle
self.update()
self.emit(QtCore.SIGNAL("angleChanged(int)"), self.currentAngle)
def force(self):
return self.currentForce
def setForce(self, force):
if force < 0:
force = 0
if self.currentForce == force:
return
self.currentForce = force;
self.emit(QtCore.SIGNAL("forceChanged(int)"), self.currentForce)
def shoot(self):
if self.isShooting():
return
self.timerCount = 0
self.shootAngle = self.currentAngle
self.shootForce = self.currentForce
self.autoShootTimer.start(5)
self.emit(QtCore.SIGNAL("canShoot(bool)"), False)
firstTime = True
def newTarget(self):
if CannonField.firstTime:
CannonField.firstTime = False
midnight = QtCore.QTime(0, 0, 0)
random.seed(midnight.secsTo(QtCore.QTime.currentTime()))
self.target = QtCore.QPoint(200 + random.randint(0, 190 - 1), 10 + random.randint(0, 255 - 1))
self.update()
def setGameOver(self):
if self.gameEnded:
return
if self.isShooting():
self.autoShootTimer.stop()
self.gameEnded = True
self.update()
def restartGame(self):
if self.isShooting():
self.autoShootTimer.stop()
self.gameEnded = False
self.update()
self.emit(QtCore.SIGNAL("canShoot(bool)"), True)
def moveShot(self):
region = QtGui.QRegion(self.shotRect())
self.timerCount += 1
shotR = self.shotRect()
if shotR.intersects(self.targetRect()):
self.autoShootTimer.stop()
self.emit(QtCore.SIGNAL("hit()"))
self.emit(QtCore.SIGNAL("canShoot(bool)"), True)
elif shotR.x() > self.width() or shotR.y() > self.height():
self.autoShootTimer.stop()
self.emit(QtCore.SIGNAL("missed()"))
self.emit(QtCore.SIGNAL("canShoot(bool)"), True)
else:
region = region.united(QtGui.QRegion(shotR))
self.update(region)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
if self.gameEnded:
painter.setPen(QtCore.Qt.black)
painter.setFont(QtGui.QFont("Courier", 48, QtGui.QFont.Bold))
painter.drawText(self.rect(), QtCore.Qt.AlignCenter, "Game Over")
self.paintCannon(painter)
if self.isShooting():
self.paintShot(painter)
if not self.gameEnded:
self.paintTarget(painter)
def paintShot(self, painter):
painter.setPen(QtCore.Qt.NoPen);
painter.setBrush(QtCore.Qt.black)
painter.drawRect(self.shotRect())
def paintTarget(self, painter):
painter.setPen(QtCore.Qt.black)
painter.setBrush(QtCore.Qt.red)
painter.drawRect(self.targetRect())
barrelRect = QtCore.QRect(33, -4, 15, 8)
def paintCannon(self, painter):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.blue)
painter.save()
painter.translate(0, self.height())
painter.drawPie(QtCore.QRect(-35, -35, 70, 70), 0, 90 * 16)
painter.rotate(-self.currentAngle)
painter.drawRect(CannonField.barrelRect)
painter.restore()
def cannonRect(self):
result = QtCore.QRect(0, 0, 50, 50)
result.moveBottomLeft(self.rect().bottomLect())
return result
def shotRect(self):
gravity = 4.0
time = self.timerCount / 40.0
velocity = self.shootForce
radians = self.shootAngle * 3.14159265 / 180
velx = velocity * math.cos(radians)
vely = velocity * math.sin(radians)
x0 = (CannonField.barrelRect.right() + 5) * math.cos(radians)
y0 = (CannonField.barrelRect.right() + 5) * math.sin(radians)
x = x0 + velx * time
y = y0 + vely * time - 0.5 * gravity * time * time
result = QtCore.QRect(0, 0, 6, 6)
result.moveCenter(QtCore.QPoint(QtCore.qRound(x), self.height() - 1 - QtCore.qRound(y)))
return result
def targetRect(self):
result = QtCore.QRect(0, 0, 20, 10)
result.moveCenter(QtCore.QPoint(self.target.x(), self.height() - 1 - self.target.y()))
return result
def gameOver(self):
return self.gameEnded
def isShooting(self):
return self.autoShootTimer.isActive()
class GameBoard(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
quit = QtGui.QPushButton("&Quit")
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
self.connect(quit, QtCore.SIGNAL("clicked()"),
QtGui.qApp, QtCore.SLOT("quit()"))
angle = LCDRange("ANGLE")
angle.setRange(5, 70)
force = LCDRange("FORCE")
force.setRange(10, 50)
self.cannonField = CannonField()
self.connect(angle, QtCore.SIGNAL("valueChanged(int)"),
self.cannonField.setAngle)
self.connect(self.cannonField, QtCore.SIGNAL("angleChanged(int)"),
angle.setValue)
self.connect(force, QtCore.SIGNAL("valueChanged(int)"),
self.cannonField.setForce)
self.connect(self.cannonField, QtCore.SIGNAL("forceChanged(int)"),
force.setValue)
self.connect(self.cannonField, QtCore.SIGNAL("hit()"), self.hit)
self.connect(self.cannonField, QtCore.SIGNAL("missed()"), self.missed)
shoot = QtGui.QPushButton("&Shoot")
shoot.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
self.connect(shoot, QtCore.SIGNAL("clicked()"), self.fire)
self.connect(self.cannonField, QtCore.SIGNAL("canShoot(bool)"),
shoot, QtCore.SLOT("setEnabled(bool)"))
restart = QtGui.QPushButton("&New Game")
restart.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
self.connect(restart, QtCore.SIGNAL("clicked()"), self.newGame)
self.hits = QtGui.QLCDNumber(2)
self.shotsLeft = QtGui.QLCDNumber(2)
hitsLabel = QtGui.QLabel("HITS")
shotsLeftLabel = QtGui.QLabel("SHOTS LEFT")
topLayout = QtGui.QHBoxLayout()
topLayout.addWidget(shoot)
topLayout.addWidget(self.hits)
topLayout.addWidget(hitsLabel)
topLayout.addWidget(self.shotsLeft)
topLayout.addWidget(shotsLeftLabel)
topLayout.addStretch(1)
topLayout.addWidget(restart)
leftLayout = QtGui.QVBoxLayout()
leftLayout.addWidget(angle)
leftLayout.addWidget(force)
gridLayout = QtGui.QGridLayout()
gridLayout.addWidget(quit, 0, 0)
gridLayout.addLayout(topLayout, 0, 1)
gridLayout.addLayout(leftLayout, 1, 0)
gridLayout.addWidget(self.cannonField, 1, 1, 2, 1)
gridLayout.setColumnStretch(1, 10)
self.setLayout(gridLayout)
angle.setValue(60)
force.setValue(25)
angle.setFocus()
self.newGame()
def fire(self):
if self.cannonField.gameOver() or self.cannonField.isShooting():
return
self.shotsLeft.display(self.shotsLeft.intValue() - 1)
self.cannonField.shoot()
def hit(self):
self.hits.display(self.hits.intValue() + 1)
if self.shotsLeft.intValue() == 0:
self.cannonField.setGameOver()
else:
self.cannonField.newTarget()
def missed(self):
if self.shotsLeft.intValue() == 0:
self.cannonField.setGameOver()
def newGame(self):
self.shotsLeft.display(15)
self.hits.display(0)
self.cannonField.restartGame()
self.cannonField.newTarget()
app = QtGui.QApplication(sys.argv)
board = GameBoard()
board.setGeometry(100, 100, 500, 355)
board.show()
sys.exit(app.exec_())
|
|
"""Support for HomeMatic devices."""
from datetime import timedelta
from functools import partial
import logging
import socket
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, CONF_HOST, CONF_HOSTS, CONF_PASSWORD,
CONF_PLATFORM, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP, STATE_UNKNOWN)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'homematic'
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = 'homematic.switch'
DISCOVER_LIGHTS = 'homematic.light'
DISCOVER_SENSORS = 'homematic.sensor'
DISCOVER_BINARY_SENSORS = 'homematic.binary_sensor'
DISCOVER_COVER = 'homematic.cover'
DISCOVER_CLIMATE = 'homematic.climate'
DISCOVER_LOCKS = 'homematic.locks'
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_PARAM = 'param'
ATTR_CHANNEL = 'channel'
ATTR_ADDRESS = 'address'
ATTR_VALUE = 'value'
ATTR_INTERFACE = 'interface'
ATTR_ERRORCODE = 'error'
ATTR_MESSAGE = 'message'
ATTR_MODE = 'mode'
ATTR_TIME = 'time'
ATTR_UNIQUE_ID = 'unique_id'
ATTR_PARAMSET_KEY = 'paramset_key'
ATTR_PARAMSET = 'paramset'
EVENT_KEYPRESS = 'homematic.keypress'
EVENT_IMPULSE = 'homematic.impulse'
EVENT_ERROR = 'homematic.error'
SERVICE_VIRTUALKEY = 'virtualkey'
SERVICE_RECONNECT = 'reconnect'
SERVICE_SET_VARIABLE_VALUE = 'set_variable_value'
SERVICE_SET_DEVICE_VALUE = 'set_device_value'
SERVICE_SET_INSTALL_MODE = 'set_install_mode'
SERVICE_PUT_PARAMSET = 'put_paramset'
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
'Switch', 'SwitchPowermeter', 'IOSwitch', 'IPSwitch', 'RFSiren',
'IPSwitchPowermeter', 'HMWIOSwitch', 'Rain', 'EcoLogic',
'IPKeySwitchPowermeter', 'IPGarage', 'IPKeySwitch', 'IPMultiIO'],
DISCOVER_LIGHTS: ['Dimmer', 'KeyDimmer', 'IPKeyDimmer', 'IPDimmer',
'ColorEffectLight'],
DISCOVER_SENSORS: [
'SwitchPowermeter', 'Motion', 'MotionV2', 'RemoteMotion', 'MotionIP',
'ThermostatWall', 'AreaThermostat', 'RotaryHandleSensor',
'WaterSensor', 'PowermeterGas', 'LuxSensor', 'WeatherSensor',
'WeatherStation', 'ThermostatWall2', 'TemperatureDiffSensor',
'TemperatureSensor', 'CO2Sensor', 'IPSwitchPowermeter', 'HMWIOSwitch',
'FillingLevel', 'ValveDrive', 'EcoLogic', 'IPThermostatWall',
'IPSmoke', 'RFSiren', 'PresenceIP', 'IPAreaThermostat',
'IPWeatherSensor', 'RotaryHandleSensorIP', 'IPPassageSensor',
'IPKeySwitchPowermeter', 'IPThermostatWall230V', 'IPWeatherSensorPlus',
'IPWeatherSensorBasic', 'IPBrightnessSensor', 'IPGarage',
'UniversalSensor', 'MotionIPV2', 'IPMultiIO', 'IPThermostatWall2'],
DISCOVER_CLIMATE: [
'Thermostat', 'ThermostatWall', 'MAXThermostat', 'ThermostatWall2',
'MAXWallThermostat', 'IPThermostat', 'IPThermostatWall',
'ThermostatGroup', 'IPThermostatWall230V', 'IPThermostatWall2'],
DISCOVER_BINARY_SENSORS: [
'ShutterContact', 'Smoke', 'SmokeV2', 'Motion', 'MotionV2',
'MotionIP', 'RemoteMotion', 'WeatherSensor', 'TiltSensor',
'IPShutterContact', 'HMWIOSwitch', 'MaxShutterContact', 'Rain',
'WiredSensor', 'PresenceIP', 'IPWeatherSensor', 'IPPassageSensor',
'SmartwareMotion', 'IPWeatherSensorPlus', 'MotionIPV2', 'WaterIP',
'IPMultiIO', 'TiltIP', 'IPShutterContactSabotage'],
DISCOVER_COVER: ['Blind', 'KeyBlind', 'IPKeyBlind', 'IPKeyBlindTilt'],
DISCOVER_LOCKS: ['KeyMatic']
}
HM_IGNORE_DISCOVERY_NODE = [
'ACTUAL_TEMPERATURE',
'ACTUAL_HUMIDITY'
]
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS = {
'ACTUAL_TEMPERATURE': [
'IPAreaThermostat', 'IPWeatherSensor',
'IPWeatherSensorPlus', 'IPWeatherSensorBasic',
'IPThermostatWall', 'IPThermostatWall2'],
}
HM_ATTRIBUTE_SUPPORT = {
'LOWBAT': ['battery', {0: 'High', 1: 'Low'}],
'LOW_BAT': ['battery', {0: 'High', 1: 'Low'}],
'ERROR': ['error', {0: 'No'}],
'ERROR_SABOTAGE': ['sabotage', {0: 'No', 1: 'Yes'}],
'SABOTAGE': ['sabotage', {0: 'No', 1: 'Yes'}],
'RSSI_PEER': ['rssi_peer', {}],
'RSSI_DEVICE': ['rssi_device', {}],
'VALVE_STATE': ['valve', {}],
'LEVEL': ['level', {}],
'BATTERY_STATE': ['battery', {}],
'CONTROL_MODE': ['mode', {
0: 'Auto',
1: 'Manual',
2: 'Away',
3: 'Boost',
4: 'Comfort',
5: 'Lowering'
}],
'POWER': ['power', {}],
'CURRENT': ['current', {}],
'VOLTAGE': ['voltage', {}],
'OPERATING_VOLTAGE': ['voltage', {}],
'WORKING': ['working', {0: 'No', 1: 'Yes'}]
}
HM_PRESS_EVENTS = [
'PRESS_SHORT',
'PRESS_LONG',
'PRESS_CONT',
'PRESS_LONG_RELEASE',
'PRESS',
]
HM_IMPULSE_EVENTS = [
'SEQUENCE_OK',
]
CONF_RESOLVENAMES_OPTIONS = [
'metadata',
'json',
'xml',
False
]
DATA_HOMEMATIC = 'homematic'
DATA_STORE = 'homematic_store'
DATA_CONF = 'homematic_conf'
CONF_INTERFACES = 'interfaces'
CONF_LOCAL_IP = 'local_ip'
CONF_LOCAL_PORT = 'local_port'
CONF_PORT = 'port'
CONF_PATH = 'path'
CONF_CALLBACK_IP = 'callback_ip'
CONF_CALLBACK_PORT = 'callback_port'
CONF_RESOLVENAMES = 'resolvenames'
CONF_JSONPORT = 'jsonport'
CONF_VARIABLES = 'variables'
CONF_DEVICES = 'devices'
CONF_PRIMARY = 'primary'
DEFAULT_LOCAL_IP = '0.0.0.0'
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_JSONPORT = 80
DEFAULT_PORT = 2001
DEFAULT_PATH = ''
DEFAULT_USERNAME = 'Admin'
DEFAULT_PASSWORD = ''
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_CHANNEL = 1
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'homematic',
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
vol.Optional(ATTR_UNIQUE_ID): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_INTERFACES, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES):
vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_JSONPORT, default=DEFAULT_JSONPORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(
CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}},
vol.Optional(CONF_HOSTS, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
}},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1):
vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
})
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
})
def setup(hass, config):
"""Set up the Homematic component."""
from pyhomematic import HMConnection
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
'ip': socket.gethostbyname(rconfig.get(CONF_HOST)),
'port': rconfig.get(CONF_PORT),
'path': rconfig.get(CONF_PATH),
'resolvenames': rconfig.get(CONF_RESOLVENAMES),
'jsonport': rconfig.get(CONF_JSONPORT),
'username': rconfig.get(CONF_USERNAME),
'password': rconfig.get(CONF_PASSWORD),
'callbackip': rconfig.get(CONF_CALLBACK_IP),
'callbackport': rconfig.get(CONF_CALLBACK_PORT),
'ssl': rconfig.get(CONF_SSL),
'verify_ssl': rconfig.get(CONF_VERIFY_SSL),
'connect': True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
'ip': socket.gethostbyname(sconfig.get(CONF_HOST)),
'port': DEFAULT_PORT,
'username': sconfig.get(CONF_USERNAME),
'password': sconfig.get(CONF_PASSWORD),
'connect': False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT, DEFAULT_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id='homeassistant'
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when HASS is shutting down
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS].keys():
entity_hubs.append(HMHub(hass, homematic, hub_name))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s",
channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey,
schema=SCHEMA_SERVICE_VIRTUALKEY)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [entity for entity in entity_hubs if
entity.entity_id in entity_ids]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN, SERVICE_SET_VARIABLE_VALUE, _service_handle_value,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect,
schema=SCHEMA_SERVICE_RECONNECT)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN, SERVICE_SET_DEVICE_VALUE, _service_handle_device,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN, SERVICE_SET_INSTALL_MODE, _service_handle_install_mode,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE)
def _service_put_paramset(service):
"""Service to call the putParamset method on a HomeMatic connection."""
interface = service.data.get(ATTR_INTERFACE)
address = service.data.get(ATTR_ADDRESS)
paramset_key = service.data.get(ATTR_PARAMSET_KEY)
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data.get(ATTR_PARAMSET))
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s",
interface, address, paramset_key, paramset
)
homematic.putParamset(interface, address, paramset_key, paramset)
hass.services.register(
DOMAIN, SERVICE_PUT_PARAMSET, _service_put_paramset,
schema=SCHEMA_SERVICE_PUT_PARAMSET)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == 'newDevices':
(interface_id, dev_descriptions) = args
interface = interface_id.split('-')[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]['connect']:
return
addresses = []
for dev in dev_descriptions:
address = dev['ADDRESS'].split(':')[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(
callback=bound_event_callback, bequeath=True)
# Create HASS entities
if addresses:
for component_name, discovery_type in (
('switch', DISCOVER_SWITCHES),
('light', DISCOVER_LIGHTS),
('cover', DISCOVER_COVER),
('binary_sensor', DISCOVER_BINARY_SENSORS),
('sensor', DISCOVER_SENSORS),
('climate', DISCOVER_CLIMATE),
('lock', DISCOVER_LOCKS)):
# Get all devices of a specific type
found_devices = _get_devices(
hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in HASS and a discovery event is fired
if found_devices:
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices
}, config)
# Homegear error message
elif src == 'error':
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {
ATTR_ERRORCODE: errorcode,
ATTR_MESSAGE: message
})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if class_name not in HM_DEVICE_TYPES[discovery_type]:
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if param in HM_IGNORE_DISCOVERY_NODE and class_name not in \
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, []):
continue
# Add devices
_LOGGER.debug("%s: Handling %s: %s: %s",
discovery_type, key, param, channels)
for channel in channels:
name = _create_ha_id(
name=device.NAME, channel=channel, param=param,
count=len(channels)
)
unique_id = _create_ha_id(
name=key, channel=channel, param=param,
count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_CHANNEL: channel,
ATTR_UNIQUE_ID: unique_id
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s",
str(err))
return device_arr
def _create_ha_id(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return "{} {}".format(name, channel)
# With multiple parameters on first channel
if count == 1 and param is not None:
return "{} {}".format(name, param)
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return "{} {} {}".format(name, channel, param)
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute,
hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_PARAM: attribute,
ATTR_CHANNEL: channel
})
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {
ATTR_NAME: hmdevice.NAME,
ATTR_CHANNEL: channel
})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == 'BIDCOS-RF':
address = 'BidCoS-RF'
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The HomeMatic hub. (CCU2/HomeGear)."""
def __init__(self, hass, homematic, name):
"""Initialize HomeMatic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = homematic
self._variables = {}
self._name = name
self._state = None
# Load data
self.hass.helpers.event.track_time_interval(
self._update_hub, SCAN_INTERVAL_HUB)
self.hass.add_job(self._update_hub, None)
self.hass.helpers.event.track_time_interval(
self._update_variables, SCAN_INTERVAL_VARIABLES)
self.hass.add_job(self._update_variables, None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. HomeMatic Hub object updates variables."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
service_message = self._homematic.getServiceMessages(self._name)
state = None if service_message is None else len(service_message)
# state have change?
if self._state != state:
self._state = state
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrieve all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable value on CCU/Homegear."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The HomeMatic device base object."""
def __init__(self, config):
"""Initialize a generic HomeMatic device."""
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._interface = config.get(ATTR_INTERFACE)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._unique_id = config.get(ATTR_UNIQUE_ID)
self._data = {}
self._homematic = None
self._hmdevice = None
self._connected = False
self._available = False
# Set parameter to uppercase
if self._state:
self._state = self._state.upper()
async def async_added_to_hass(self):
"""Load data init callbacks."""
await self.hass.async_add_job(self.link_homematic)
@property
def unique_id(self):
"""Return unique ID. HomeMatic entity IDs are unique by default."""
return self._unique_id.replace(" ", "_")
@property
def should_poll(self):
"""Return false. HomeMatic states are pushed by the XML-RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# Generate a dictionary with attributes
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attribute and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# Static attributes
attr['id'] = self._hmdevice.ADDRESS
attr['interface'] = self._interface
return attr
def link_homematic(self):
"""Connect to HomeMatic."""
if self._connected:
return True
# Initialize
self._homematic = self.hass.data[DATA_HOMEMATIC]
self._hmdevice = \
self._homematic.devices[self._interface][self._address]
self._connected = True
try:
# Initialize datapoints of this object
self._init_data()
self._load_data_from_hm()
# Link events from pyhomematic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
except Exception as err: # pylint: disable=broad-except
self._connected = False
_LOGGER.error("Exception while linking %s: %s",
self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name,
attribute, value)
has_changed = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
has_changed = True
# Availability has changed
if self.available != (not self._hmdevice.UNREACH):
self._available = not self._hmdevice.UNREACH
has_changed = True
# If it has changed data point, update HASS
if has_changed:
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = set()
# Push data to channels_to_sub from hmdevice metadata
for metadata in (self._hmdevice.SENSORNODE, self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE, self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
channels_to_sub.add(int(channel))
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s",
self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug(
"Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False,
channel=channel)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE,
self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData)):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the HomeMatic metadata."""
# Add all attributes to data dictionary
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# Initialize device specific data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dictionary from the HomeMatic device metadata."""
raise NotImplementedError
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from uimockup.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'uimockup'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from uimockup.uimockupImpl import uimockup # @IgnorePep8
impl_uimockup = uimockup(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'uimockup'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_uimockup.Hisat2Call,
name='uimockup.Hisat2Call',
types=[dict])
self.method_authentication['uimockup.Hisat2Call'] = 'required'
self.rpc_service.add(impl_uimockup.status,
name='uimockup.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for uimockup ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, append
import copy
import json
from robofab.objects.objectsRF import RPoint
from robofab.world import OpenFont
from decomposeGlyph import decomposeGlyph
class FFont:
"Font wrapper for floating point operations"
def __init__(self,f=None):
self.glyphs = {}
self.hstems = []
self.vstems = []
self.kerning = {}
if isinstance(f,FFont):
#self.glyphs = [g.copy() for g in f.glyphs]
for key,g in f.glyphs.iteritems():
self.glyphs[key] = g.copy()
self.hstems = list(f.hstems)
self.vstems = list(f.vstems)
self.kerning = dict(f.kerning)
elif f != None:
self.copyFromFont(f)
def copyFromFont(self, f):
for g in f:
self.glyphs[g.name] = FGlyph(g)
self.hstems = [s for s in f.info.postscriptStemSnapH]
self.vstems = [s for s in f.info.postscriptStemSnapV]
self.kerning = f.kerning.asDict()
def copyToFont(self, f):
for g in f:
try:
gF = self.glyphs[g.name]
gF.copyToGlyph(g)
except:
print "Copy to glyph failed for" + g.name
f.info.postscriptStemSnapH = self.hstems
f.info.postscriptStemSnapV = self.vstems
for pair in self.kerning:
f.kerning[pair] = self.kerning[pair]
def getGlyph(self, gname):
try:
return self.glyphs[gname]
except:
return None
def setGlyph(self, gname, glyph):
self.glyphs[gname] = glyph
def addDiff(self,b,c):
newFont = FFont(self)
for key,g in newFont.glyphs.iteritems():
gB = b.getGlyph(key)
gC = c.getGlyph(key)
try:
newFont.glyphs[key] = g.addDiff(gB,gC)
except:
print "Add diff failed for '%s'" %key
return newFont
class FGlyph:
"provides a temporary floating point compatible glyph data structure"
def __init__(self, g=None):
self.contours = []
self.width = 0.
self.components = []
self.anchors = []
if g != None:
self.copyFromGlyph(g)
def copyFromGlyph(self,g):
self.name = g.name
valuesX = []
valuesY = []
self.width = len(valuesX)
valuesX.append(g.width)
for c in g.components:
self.components.append((len(valuesX), len(valuesY)))
valuesX.append(c.scale[0])
valuesY.append(c.scale[1])
valuesX.append(c.offset[0])
valuesY.append(c.offset[1])
for a in g.anchors:
self.anchors.append((len(valuesX), len(valuesY)))
valuesX.append(a.x)
valuesY.append(a.y)
for i in range(len(g)):
self.contours.append([])
for j in range (len(g[i].points)):
self.contours[i].append((len(valuesX), len(valuesY)))
valuesX.append(g[i].points[j].x)
valuesY.append(g[i].points[j].y)
self.dataX = array(valuesX, dtype=float)
self.dataY = array(valuesY, dtype=float)
def copyToGlyph(self,g):
g.width = self._derefX(self.width)
if len(g.components) == len(self.components):
for i in range(len(self.components)):
g.components[i].scale = (self._derefX(self.components[i][0] + 0, asInt=False),
self._derefY(self.components[i][1] + 0, asInt=False))
g.components[i].offset = (self._derefX(self.components[i][0] + 1),
self._derefY(self.components[i][1] + 1))
if len(g.anchors) == len(self.anchors):
for i in range(len(self.anchors)):
g.anchors[i].x = self._derefX( self.anchors[i][0])
g.anchors[i].y = self._derefY( self.anchors[i][1])
for i in range(len(g)) :
for j in range (len(g[i].points)):
g[i].points[j].x = self._derefX(self.contours[i][j][0])
g[i].points[j].y = self._derefY(self.contours[i][j][1])
def isCompatible(self, g):
return (len(self.dataX) == len(g.dataX) and
len(self.dataY) == len(g.dataY) and
len(g.contours) == len(self.contours))
def __add__(self,g):
if self.isCompatible(g):
newGlyph = self.copy()
newGlyph.dataX = self.dataX + g.dataX
newGlyph.dataY = self.dataY + g.dataY
return newGlyph
else:
print "Add failed for '%s'" %(self.name)
raise Exception
def __sub__(self,g):
if self.isCompatible(g):
newGlyph = self.copy()
newGlyph.dataX = self.dataX - g.dataX
newGlyph.dataY = self.dataY - g.dataY
return newGlyph
else:
print "Subtract failed for '%s'" %(self.name)
raise Exception
def __mul__(self,scalar):
newGlyph = self.copy()
newGlyph.dataX = self.dataX * scalar
newGlyph.dataY = self.dataY * scalar
return newGlyph
def scaleX(self,scalar):
newGlyph = self.copy()
if len(self.dataX) > 0:
newGlyph.dataX = self.dataX * scalar
for i in range(len(newGlyph.components)):
newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
return newGlyph
def shift(self,ammount):
newGlyph = self.copy()
newGlyph.dataX = self.dataX + ammount
for i in range(len(newGlyph.components)):
newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
return newGlyph
def interp(self, g, v):
gF = self.copy()
if not self.isCompatible(g):
print "Interpolate failed for '%s'; outlines incompatible" %(self.name)
raise Exception
gF.dataX += (g.dataX - gF.dataX) * v.x
gF.dataY += (g.dataY - gF.dataY) * v.y
return gF
def copy(self):
ng = FGlyph()
ng.contours = list(self.contours)
ng.width = self.width
ng.components = list(self.components)
ng.anchors = list(self.anchors)
ng.dataX = self.dataX.copy()
ng.dataY = self.dataY.copy()
ng.name = self.name
return ng
def _derefX(self,id, asInt=True):
val = self.dataX[id]
return int(round(val)) if asInt else val
def _derefY(self,id, asInt=True):
val = self.dataY[id]
return int(round(val)) if asInt else val
def addDiff(self,gB,gC):
newGlyph = self + (gB - gC)
return newGlyph
class Master:
def __init__(self, font=None, v=0, kernlist=None, overlay=None):
if isinstance(font, FFont):
self.font = None
self.ffont = font
elif isinstance(font,str):
self.openFont(font,overlay)
elif isinstance(font,Mix):
self.font = font
else:
self.font = font
self.ffont = FFont(font)
if isinstance(v,float) or isinstance(v,int):
self.v = RPoint(v, v)
else:
self.v = v
if kernlist != None:
kerns = [i.strip().split() for i in open(kernlist).readlines()]
self.kernlist = [{'left':k[0], 'right':k[1], 'value': k[2]}
for k in kerns
if not k[0].startswith("#")
and not k[0] == ""]
#TODO implement class based kerning / external kerning file
def openFont(self, path, overlayPath=None):
self.font = OpenFont(path)
for g in self.font:
size = len(g)
csize = len(g.components)
if (size > 0 and csize > 0):
decomposeGlyph(self.font, g.name)
if overlayPath != None:
overlayFont = OpenFont(overlayPath)
font = self.font
for overlayGlyph in overlayFont:
font.insertGlyph(overlayGlyph)
self.ffont = FFont(self.font)
class Mix:
def __init__(self,masters,v):
self.masters = masters
if isinstance(v,float) or isinstance(v,int):
self.v = RPoint(v,v)
else:
self.v = v
def getFGlyph(self, master, gname):
if isinstance(master.font, Mix):
return font.mixGlyphs(gname)
return master.ffont.getGlyph(gname)
def getGlyphMasters(self,gname):
masters = self.masters
if len(masters) <= 2:
return self.getFGlyph(masters[0], gname), self.getFGlyph(masters[-1], gname)
def generateFFont(self):
ffont = FFont(self.masters[0].ffont)
for key,g in ffont.glyphs.iteritems():
ffont.glyphs[key] = self.mixGlyphs(key)
ffont.kerning = self.mixKerns()
return ffont
def generateFont(self, baseFont):
newFont = baseFont.copy()
#self.mixStems(newFont) todo _ fix stems code
for g in newFont:
gF = self.mixGlyphs(g.name)
if gF == None:
g.mark = True
else:
gF.copyToGlyph(g)
newFont.kerning.clear()
newFont.kerning.update(self.mixKerns() or {})
return newFont
def mixGlyphs(self,gname):
gA,gB = self.getGlyphMasters(gname)
try:
return gA.interp(gB,self.v)
except:
print "mixglyph failed for %s" %(gname)
if gA != None:
return gA.copy()
def getKerning(self, master):
if isinstance(master.font, Mix):
return master.font.mixKerns()
return master.ffont.kerning
def mixKerns(self):
masters = self.masters
kA, kB = self.getKerning(masters[0]), self.getKerning(masters[-1])
return interpolateKerns(kA, kB, self.v)
def narrowFLGlyph(g, gThin, factor=.75):
gF = FGlyph(g)
if not isinstance(gThin,FGlyph):
gThin = FGlyph(gThin)
gCondensed = gThin.scaleX(factor)
try:
gNarrow = gF + (gCondensed - gThin)
gNarrow.copyToGlyph(g)
except:
print "No dice for: " + g.name
def interpolate(a,b,v,e=0):
if e == 0:
return a+(b-a)*v
qe = (b-a)*v*v*v + a #cubic easing
le = a+(b-a)*v # linear easing
return le + (qe-le) * e
def interpolateKerns(kA, kB, v):
# to yield correct kerning for Roboto output, we must emulate the behavior
# of old versions of this code; namely, take the kerning values of the first
# master instead of actually interpolating.
# old code:
# https://github.com/google/roboto/blob/7f083ac31241cc86d019ea6227fa508b9fcf39a6/scripts/lib/fontbuild/mix.py
# bug:
# https://github.com/google/roboto/issues/213
#kerns = {}
#for pair, val in kA.items():
# kerns[pair] = interpolate(val, kB.get(pair, 0), v.x)
#for pair, val in kB.items():
# lerped_val = interpolate(val, kA.get(pair, 0), 1 - v.x)
# if pair in kerns:
# assert abs(kerns[pair] - lerped_val) < 1e-6
# else:
# kerns[pair] = lerped_val
#return kerns
return dict(kA)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import py_utils
from systrace import trace_result
from systrace import tracing_agents
class FtraceAgentIo(object):
@staticmethod
def writeFile(path, data):
if FtraceAgentIo.haveWritePermissions(path):
with open(path, 'w') as f:
f.write(data)
else:
raise IOError('Cannot write to %s; did you forget sudo/root?' % path)
@staticmethod
def readFile(path):
with open(path, 'r') as f:
return f.read()
@staticmethod
def haveWritePermissions(path):
return os.access(path, os.W_OK)
FT_DIR = "/sys/kernel/debug/tracing/"
FT_CLOCK = FT_DIR + "trace_clock"
FT_BUFFER_SIZE = FT_DIR + "buffer_size_kb"
FT_TRACER = FT_DIR + "current_tracer"
FT_PRINT_TGID = FT_DIR + "options/print-tgid"
FT_TRACE_ON = FT_DIR + "tracing_on"
FT_TRACE = FT_DIR + "trace"
FT_TRACE_MARKER = FT_DIR + "trace_marker"
FT_OVERWRITE = FT_DIR + "options/overwrite"
all_categories = {
"sched": {
"desc": "CPU Scheduling",
"req": ["sched/sched_switch/", "sched/sched_wakeup/"]
},
"freq": {
"desc": "CPU Frequency",
"req": ["power/cpu_frequency/"],
"opt": ["power/clock_set_rate/", "clk/clk_set_rate/"]
},
"irq": {
"desc": "CPU IRQS and IPIS",
"req": ["irq/"],
"opt": ["ipi/"]
},
"workq": {
"desc": "Kernel workqueues",
"req": ["workqueue/"]
},
"memreclaim": {
"desc": "Kernel Memory Reclaim",
"req": ["vmscan/mm_vmscan_direct_reclaim_begin/",
"vmscan/mm_vmscan_direct_reclaim_end/",
"vmscan/mm_vmscan_kswapd_wake/",
"vmscan/mm_vmscan_kswapd_sleep/"]
},
"idle": {
"desc": "CPU Idle",
"req": ["power/cpu_idle/"]
},
"regulators": {
"desc": "Voltage and Current Regulators",
"req": ["regulator/"]
},
"disk": {
"desc": "Disk I/O",
"req": ["block/block_rq_issue/",
"block/block_rq_complete/"],
"opt": ["f2fs/f2fs_sync_file_enter/",
"f2fs/f2fs_sync_file_exit/",
"f2fs/f2fs_write_begin/",
"f2fs/f2fs_write_end/",
"ext4/ext4_da_write_begin/",
"ext4/ext4_da_write_end/",
"ext4/ext4_sync_file_enter/",
"ext4/ext4_sync_file_exit/"]
}
}
def try_create_agent(config):
if config.target != 'linux':
return None
return FtraceAgent(FtraceAgentIo)
def list_categories(_):
agent = FtraceAgent(FtraceAgentIo)
agent._print_avail_categories()
class FtraceConfig(tracing_agents.TracingConfig):
def __init__(self, ftrace_categories, target, trace_buf_size):
tracing_agents.TracingConfig.__init__(self)
self.ftrace_categories = ftrace_categories
self.target = target
self.trace_buf_size = trace_buf_size
def add_options(parser):
options = optparse.OptionGroup(parser, 'Ftrace options')
options.add_option('--ftrace-categories', dest='ftrace_categories',
help='Select ftrace categories with a comma-delimited '
'list, e.g. --ftrace-categories=cat1,cat2,cat3')
return options
def get_config(options):
return FtraceConfig(options.ftrace_categories, options.target,
options.trace_buf_size)
class FtraceAgent(tracing_agents.TracingAgent):
def __init__(self, fio=FtraceAgentIo):
"""Initialize a systrace agent.
Args:
config: The command-line config.
categories: The trace categories to capture.
"""
super(FtraceAgent, self).__init__()
self._fio = fio
self._config = None
self._categories = None
def _get_trace_buffer_size(self):
buffer_size = 4096
if ((self._config.trace_buf_size is not None)
and (self._config.trace_buf_size > 0)):
buffer_size = self._config.trace_buf_size
return buffer_size
def _fix_categories(self, categories):
"""
Applies the default category (sched) if there are no categories
in the list and removes unavailable categories from the list.
Args:
categories: List of categories.
"""
if not categories:
categories = ["sched"]
return [x for x in categories
if self._is_category_available(x)]
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
"""Start tracing.
"""
self._config = config
categories = self._fix_categories(config.ftrace_categories)
self._fio.writeFile(FT_BUFFER_SIZE,
str(self._get_trace_buffer_size()))
self._fio.writeFile(FT_CLOCK, 'global')
self._fio.writeFile(FT_TRACER, 'nop')
self._fio.writeFile(FT_OVERWRITE, "0")
# TODO: riandrews to push necessary patches for TGID option to upstream
# linux kernel
# self._fio.writeFile(FT_PRINT_TGID, '1')
for category in categories:
self._category_enable(category)
self._categories = categories # need to store list of categories to disable
print 'starting tracing.'
self._fio.writeFile(FT_TRACE, '')
self._fio.writeFile(FT_TRACE_ON, '1')
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Collect the result of tracing.
This function will block while collecting the result. For sync mode, it
reads the data, e.g., from stdout, until it finishes. For async mode, it
blocks until the agent is stopped and the data is ready.
"""
self._fio.writeFile(FT_TRACE_ON, '0')
for category in self._categories:
self._category_disable(category)
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
# get the output
d = self._fio.readFile(FT_TRACE)
self._fio.writeFile(FT_BUFFER_SIZE, "1")
return trace_result.TraceResult('trace-data', d)
def SupportsExplicitClockSync(self):
return False
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
# No implementation, but need to have this to support the API
# pylint: disable=unused-argument
return False
def _is_category_available(self, category):
if category not in all_categories:
return False
events_dir = FT_DIR + "events/"
req_events = all_categories[category]["req"]
for event in req_events:
event_full_path = events_dir + event + "enable"
if not self._fio.haveWritePermissions(event_full_path):
return False
return True
def _avail_categories(self):
ret = []
for event in all_categories:
if self._is_category_available(event):
ret.append(event)
return ret
def _print_avail_categories(self):
avail = self._avail_categories()
if len(avail):
print "tracing config:"
for category in self._avail_categories():
desc = all_categories[category]["desc"]
print "{0: <16}".format(category), ": ", desc
else:
print "No tracing categories available - perhaps you need root?"
def _category_enable_paths(self, category):
events_dir = FT_DIR + "events/"
req_events = all_categories[category]["req"]
for event in req_events:
event_full_path = events_dir + event + "enable"
yield event_full_path
if "opt" in all_categories[category]:
opt_events = all_categories[category]["opt"]
for event in opt_events:
event_full_path = events_dir + event + "enable"
if self._fio.haveWritePermissions(event_full_path):
yield event_full_path
def _category_enable(self, category):
for path in self._category_enable_paths(category):
self._fio.writeFile(path, "1")
def _category_disable(self, category):
for path in self._category_enable_paths(category):
self._fio.writeFile(path, "0")
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
import mock
class FakePlatform(object):
def GetOSName(self):
return 'os_name'
def GetOSVersionName(self):
return 'os_version_name'
class FakePossibleBrowser(object):
def __init__(self):
self.browser_type = 'browser_type'
self.platform = FakePlatform()
self.supports_tab_control = False
class FakeTest(object):
def SetEnabledStrings(self, enabled_strings):
# pylint: disable=W0201
self._enabled_strings = enabled_strings
def SetDisabledStrings(self, disabled_strings):
# pylint: disable=W0201
self._disabled_strings = disabled_strings
class TestDisableDecorators(unittest.TestCase):
def testDisabledStringOnFunction(self):
@decorators.Disabled('bar')
def Sum():
return 1 + 1
self.assertEquals({'bar'}, Sum._disabled_strings)
@decorators.Disabled('bar')
@decorators.Disabled('baz')
@decorators.Disabled('bart', 'baz')
def Product():
return 1 * 1
self.assertEquals({'bar', 'bart', 'baz'}, Product._disabled_strings)
def testDisabledStringOnClass(self):
@decorators.Disabled('windshield')
class Ford(object):
pass
self.assertEquals({'windshield'}, Ford._disabled_strings)
@decorators.Disabled('windows', 'Drive')
@decorators.Disabled('wheel')
@decorators.Disabled('windows')
class Honda(object):
pass
self.assertEquals({'wheel', 'Drive', 'windows'}, Honda._disabled_strings)
def testDisabledStringOnMethod(self):
class Ford(object):
@decorators.Disabled('windshield')
def Drive(self):
pass
self.assertEquals({'windshield'}, Ford().Drive._disabled_strings)
class Honda(object):
@decorators.Disabled('windows', 'Drive')
@decorators.Disabled('wheel')
@decorators.Disabled('windows')
def Drive(self):
pass
self.assertEquals({'wheel', 'Drive', 'windows'},
Honda().Drive._disabled_strings)
class TestEnableDecorators(unittest.TestCase):
def testEnabledStringOnFunction(self):
@decorators.Enabled('minus', 'power')
def Sum():
return 1 + 1
self.assertEquals({'minus', 'power'}, Sum._enabled_strings)
@decorators.Enabled('dot')
@decorators.Enabled('product')
@decorators.Enabled('product', 'dot')
def Product():
return 1 * 1
self.assertEquals({'dot', 'product'}, Product._enabled_strings)
def testEnabledStringOnClass(self):
@decorators.Enabled('windshield', 'light')
class Ford(object):
pass
self.assertEquals({'windshield', 'light'}, Ford._enabled_strings)
@decorators.Enabled('wheel', 'Drive')
@decorators.Enabled('wheel')
@decorators.Enabled('windows')
class Honda(object):
pass
self.assertEquals({'wheel', 'Drive', 'windows'}, Honda._enabled_strings)
def testEnabledStringOnMethod(self):
class Ford(object):
@decorators.Enabled('windshield')
def Drive(self):
pass
self.assertEquals({'windshield'}, Ford().Drive._enabled_strings)
class Honda(object):
@decorators.Enabled('windows', 'Drive')
@decorators.Enabled('wheel', 'Drive')
@decorators.Enabled('windows')
def Drive(self):
pass
self.assertEquals({'wheel', 'Drive', 'windows'},
Honda().Drive._enabled_strings)
class TestShouldSkip(unittest.TestCase):
def testEnabledStrings(self):
test = FakeTest()
possible_browser = FakePossibleBrowser()
# When no enabled_strings is given, everything should be enabled.
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_version_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['os_name', 'another_os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name', 'os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetEnabledStrings(['another_os_name', 'another_os_version_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
def testDisabledStrings(self):
test = FakeTest()
possible_browser = FakePossibleBrowser()
# When no disabled_strings is given, nothing should be disabled.
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['os_version_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['os_name', 'another_os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name', 'os_name'])
self.assertTrue(decorators.ShouldSkip(test, possible_browser)[0])
test.SetDisabledStrings(['another_os_name', 'another_os_version_name'])
self.assertFalse(decorators.ShouldSkip(test, possible_browser)[0])
class TestDeprecation(unittest.TestCase):
@mock.patch('warnings.warn')
def testFunctionDeprecation(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
def Foo(x):
return x
Foo(1)
warn_mock.assert_called_with(
'Function Foo is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testMethodDeprecated(self, warn_mock):
class Bar(object):
@decorators.Deprecated(2015, 12, 1, 'Testing only.')
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Function Foo is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. Testing only.\n', stacklevel=4)
@mock.patch('warnings.warn')
def testClassWithoutInitDefinedDeprecated(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
class Bar(object):
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testClassWithInitDefinedDeprecated(self, warn_mock):
@decorators.Deprecated(2015, 12, 1)
class Bar(object):
def __init__(self):
pass
def Foo(self, x):
return x
Bar().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
@mock.patch('warnings.warn')
def testInheritedClassDeprecated(self, warn_mock):
class Ba(object):
pass
@decorators.Deprecated(2015, 12, 1)
class Bar(Ba):
def Foo(self, x):
return x
class Baz(Bar):
pass
Baz().Foo(1)
warn_mock.assert_called_with(
'Class Bar is deprecated. It will no longer be supported on '
'December 01, 2015. Please remove it or switch to an alternative '
'before that time. \n', stacklevel=4)
def testReturnValue(self):
class Bar(object):
@decorators.Deprecated(2015, 12, 1, 'Testing only.')
def Foo(self, x):
return x
self.assertEquals(5, Bar().Foo(5))
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import iso8601
import netaddr
import six
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
class KeyTypeError(TypeError):
def __init__(self, expected, value):
super(KeyTypeError, self).__init__(
_('Key %(key)s must be of type %(expected)s not %(actual)s'
) % {'key': repr(value),
'expected': expected.__name__,
'actual': value.__class__.__name__,
})
class ElementTypeError(TypeError):
def __init__(self, expected, key, value):
super(ElementTypeError, self).__init__(
_('Element %(key)s:%(val)s must be of type %(expected)s'
' not %(actual)s'
) % {'key': key,
'val': repr(value),
'expected': expected,
'actual': value.__class__.__name__,
})
class AbstractFieldType(six.with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def coerce(self, obj, attr, value):
"""This is called to coerce (if possible) a value on assignment.
This method should convert the value given into the designated type,
or throw an exception if this is not possible.
:param:obj: The NovaObject on which an attribute is being set
:param:attr: The name of the attribute being set
:param:value: The value being set
:returns: A properly-typed value
"""
pass
@abc.abstractmethod
def from_primitive(self, obj, attr, value):
"""This is called to deserialize a value.
This method should deserialize a value from the form given by
to_primitive() to the designated type.
:param:obj: The NovaObject on which the value is to be set
:param:attr: The name of the attribute which will hold the value
:param:value: The serialized form of the value
:returns: The natural form of the value
"""
pass
@abc.abstractmethod
def to_primitive(self, obj, attr, value):
"""This is called to serialize a value.
This method should serialize a value to the form expected by
from_primitive().
:param:obj: The NovaObject on which the value is set
:param:attr: The name of the attribute holding the value
:param:value: The natural form of the value
:returns: The serialized form of the value
"""
pass
@abc.abstractmethod
def describe(self):
"""Returns a string describing the type of the field."""
pass
class FieldType(AbstractFieldType):
@staticmethod
def coerce(obj, attr, value):
return value
@staticmethod
def from_primitive(obj, attr, value):
return value
@staticmethod
def to_primitive(obj, attr, value):
return value
def describe(self):
return self.__class__.__name__
class UnspecifiedDefault(object):
pass
class Field(object):
def __init__(self, field_type, nullable=False, default=UnspecifiedDefault):
self._type = field_type
self._nullable = nullable
self._default = default
@property
def nullable(self):
return self._nullable
@property
def default(self):
return self._default
def _null(self, obj, attr):
if self.nullable:
return None
elif self._default != UnspecifiedDefault:
# NOTE(danms): We coerce the default value each time the field
# is set to None as our contract states that we'll let the type
# examine the object and attribute name at that time.
return self._type.coerce(obj, attr, self._default)
else:
raise ValueError(_("Field `%s' cannot be None") % attr)
def coerce(self, obj, attr, value):
"""Coerce a value to a suitable type.
This is called any time you set a value on an object, like:
foo.myint = 1
and is responsible for making sure that the value (1 here) is of
the proper type, or can be sanely converted.
This also handles the potentially nullable or defaultable
nature of the field and calls the coerce() method on a
FieldType to actually do the coercion.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being set
:param:value: The value being set
:returns: The properly-typed value
"""
if value is None:
return self._null(obj, attr)
else:
return self._type.coerce(obj, attr, value)
def from_primitive(self, obj, attr, value):
"""Deserialize a value from primitive form.
This is responsible for deserializing a value from primitive
into regular form. It calls the from_primitive() method on a
FieldType to do the actual deserialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being deserialized
:param:value: The value to be deserialized
:returns: The deserialized value
"""
if value is None:
return None
else:
return self._type.from_primitive(obj, attr, value)
def to_primitive(self, obj, attr, value):
"""Serialize a value to primitive form.
This is responsible for serializing a value to primitive
form. It calls to_primitive() on a FieldType to do the actual
serialization.
:param:obj: The object being acted upon
:param:attr: The name of the attribute/field being serialized
:param:value: The value to be serialized
:returns: The serialized value
"""
if value is None:
return None
else:
return self._type.to_primitive(obj, attr, value)
def describe(self):
"""Return a short string describing the type of this field."""
name = self._type.describe()
prefix = self.nullable and 'Nullable' or ''
return prefix + name
class String(FieldType):
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should really try to avoid the need to do this
if isinstance(value, (six.string_types, int, long, float,
datetime.datetime)):
return unicode(value)
else:
raise ValueError(_('A string is required here, not %s') %
value.__class__.__name__)
class UUID(FieldType):
@staticmethod
def coerce(obj, attr, value):
# FIXME(danms): We should actually verify the UUIDness here
return str(value)
class Integer(FieldType):
@staticmethod
def coerce(obj, attr, value):
return int(value)
class Float(FieldType):
def coerce(self, obj, attr, value):
return float(value)
class Boolean(FieldType):
@staticmethod
def coerce(obj, attr, value):
return bool(value)
class DateTime(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, six.string_types):
value = timeutils.parse_isotime(value)
elif not isinstance(value, datetime.datetime):
raise ValueError(_('A datetime.datetime is required here'))
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, timeutils.parse_isotime(value))
@staticmethod
def to_primitive(obj, attr, value):
return timeutils.isotime(value)
class IPAddress(FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPAddress(value)
except netaddr.AddrFormatError as e:
raise ValueError(str(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
@staticmethod
def to_primitive(obj, attr, value):
return str(value)
class IPV4Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4:
raise ValueError(_('Network "%s" is not valid') % value)
return result
class IPV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 6:
raise ValueError(_('Network "%s" is not valid') % value)
return result
class CompoundFieldType(FieldType):
def __init__(self, element_type, **field_args):
self._element_type = Field(element_type, **field_args)
class List(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, list):
raise ValueError(_('A list is required here'))
for index, element in enumerate(list(value)):
value[index] = self._element_type.coerce(
obj, '%s[%i]' % (attr, index), element)
return value
def to_primitive(self, obj, attr, value):
return [self._element_type.to_primitive(obj, attr, x) for x in value]
def from_primitive(self, obj, attr, value):
return [self._element_type.from_primitive(obj, attr, x) for x in value]
class Dict(CompoundFieldType):
def coerce(self, obj, attr, value):
if not isinstance(value, dict):
raise ValueError(_('A dict is required here'))
for key, element in value.items():
if not isinstance(key, six.string_types):
#NOTE(guohliu) In order to keep compatibility with python3
#we need to use six.string_types rather than basestring here,
#since six.string_types is a tuple, so we need to pass the
#real type in.
raise KeyTypeError(six.string_types[0], key)
value[key] = self._element_type.coerce(
obj, '%s["%s"]' % (attr, key), element)
return value
def to_primitive(self, obj, attr, value):
primitive = {}
for key, element in value.items():
primitive[key] = self._element_type.to_primitive(
obj, '%s["%s"]' % (attr, key), element)
return primitive
def from_primitive(self, obj, attr, value):
concrete = {}
for key, element in value.items():
concrete[key] = self._element_type.from_primitive(
obj, '%s["%s"]' % (attr, key), element)
return concrete
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
super(Object, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if obj_name != self._obj_name:
raise ValueError(_('An object of type %s is required here') %
self._obj_name)
return value
@staticmethod
def to_primitive(obj, attr, value):
return value.obj_to_primitive()
@staticmethod
def from_primitive(obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from nova.objects import base as obj_base
return obj_base.NovaObject.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
class NetworkModel(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, network_model.NetworkInfo):
return value
elif isinstance(value, six.string_types):
# Hmm, do we need this?
return network_model.NetworkInfo.hydrate(value)
else:
raise ValueError(_('A NetworkModel is required here'))
@staticmethod
def to_primitive(obj, attr, value):
return value.json()
@staticmethod
def from_primitive(obj, attr, value):
return network_model.NetworkInfo.hydrate(value)
class CIDR(FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
network, length = value.split('/')
except (ValueError, AttributeError):
raise ValueError(_('CIDR "%s" is not in proper form') % value)
try:
network = netaddr.IPAddress(network)
except netaddr.AddrFormatError:
raise ValueError(_('Network "%s" is not valid') % network)
try:
length = int(length)
assert (length >= 0)
except (ValueError, AssertionError):
raise ValueError(_('Netmask length "%s" is not valid') % length)
if ((network.version == 4 and length > 32) or
(network.version == 6 and length > 128)):
raise ValueError(_('Netmask length "%(length)s" is not valid '
'for IPv%(version)i address') %
{'length': length, 'version': network.version})
return value
class AutoTypedField(Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
class StringField(AutoTypedField):
AUTO_TYPE = String()
class UUIDField(AutoTypedField):
AUTO_TYPE = UUID()
class IntegerField(AutoTypedField):
AUTO_TYPE = Integer()
class FloatField(AutoTypedField):
AUTO_TYPE = Float()
class BooleanField(AutoTypedField):
AUTO_TYPE = Boolean()
class DateTimeField(AutoTypedField):
AUTO_TYPE = DateTime()
class IPAddressField(AutoTypedField):
AUTO_TYPE = IPAddress()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class DictOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(String())
class DictOfNullableStringsField(AutoTypedField):
AUTO_TYPE = Dict(String(), nullable=True)
class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
super(ObjectField, self).__init__(**kwargs)
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = List(Object(objtype))
super(ListOfObjectsField, self).__init__(**kwargs)
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from nova.api.openstack.compute import availability_zone as az_v21
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute.legacy_v2.contrib import availability_zone \
as az_v2
from nova.api.openstack.compute.legacy_v2 import servers as servers_v2
from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import servicegroup
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
from oslo_config import cfg
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
if disabled:
return [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"nova-network"
def fake_set_availability_zones(context, services):
return services
def fake_get_availability_zones(context):
return ['nova'], []
CONF = cfg.CONF
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
self.stub_out('nova.db.service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
self.controller = self.availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at': timestamp}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at': timestamp}},
'fake_host-2': {
'nova-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
self.assertEqual(expected, zones)
def test_availability_zone_detail_no_services(self):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
self.stubs.Set(availability_zones, 'get_availability_zones',
fake_get_availability_zones)
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
availability_zone = az_v2
def setUp(self):
super(AvailabilityZoneApiTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def test_availability_zone_detail_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.detail, self.non_admin_req)
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'availability_zone': 'nova',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self)
self.stub_out('nova.db.instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-availability-zone',
'osapi_v21')
self.no_availability_zone_controller = servers_v21.ServersController(
extension_info=ext_info)
def _test_create_extra(self, params, controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
server = controller.create(self.req, body=body).obj['server']
def test_create_instance_with_availability_zone_disabled(self):
params = {'availability_zone': 'foo'}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['availability_zone'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params, self.no_availability_zone_controller)
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(admin_context,
{'name': 'agg1'}, {'availability_zone': 'nova'})
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-availability-zone': 'fake'}
self.controller = servers_v2.Controller(ext_mgr)
ext_mgr_no_az = extensions.ExtensionManager()
ext_mgr_no_az.extensions = {}
self.no_availability_zone_controller = servers_v2.Controller(
ext_mgr_no_az)
def test_create_instance_with_invalid_availability_zone_too_long(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_too_short(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_not_str(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with gcloud datastore queries."""
import base64
from gcloud._helpers import _ensure_tuple_or_list
from gcloud.datastore._generated import query_pb2 as _query_pb2
from gcloud.datastore import helpers
from gcloud.datastore.key import Key
class Query(object):
"""A Query against the Cloud Datastore.
This class serves as an abstraction for creating a query over data
stored in the Cloud Datastore.
:type client: :class:`gcloud.datastore.client.Client`
:param client: The client used to connect to datastore.
:type kind: string
:param kind: The kind to query.
:type project: string
:param project: The project associated with the query. If not passed,
uses the client's value.
:type namespace: string or None
:param namespace: The namespace to which to restrict results. If not
passed, uses the client's value.
:type ancestor: :class:`gcloud.datastore.key.Key` or None
:param ancestor: key of the ancestor to which this query's results are
restricted.
:type filters: sequence of (property_name, operator, value) tuples
:param filters: property filters applied by this query.
:type projection: sequence of string
:param projection: fields returned as part of query results.
:type order: sequence of string
:param order: field names used to order query results. Prepend '-'
to a field name to sort it in descending order.
:type group_by: sequence of string
:param group_by: field names used to group query results.
:raises: ValueError if ``project`` is not passed and no implicit
default is set.
"""
OPERATORS = {
'<=': _query_pb2.PropertyFilter.LESS_THAN_OR_EQUAL,
'>=': _query_pb2.PropertyFilter.GREATER_THAN_OR_EQUAL,
'<': _query_pb2.PropertyFilter.LESS_THAN,
'>': _query_pb2.PropertyFilter.GREATER_THAN,
'=': _query_pb2.PropertyFilter.EQUAL,
}
"""Mapping of operator strings and their protobuf equivalents."""
def __init__(self,
client,
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
group_by=()):
self._client = client
self._kind = kind
self._project = project or client.project
self._namespace = namespace or client.namespace
self._ancestor = ancestor
self._filters = []
# Verify filters passed in.
for property_name, operator, value in filters:
self.add_filter(property_name, operator, value)
self._projection = _ensure_tuple_or_list('projection', projection)
self._order = _ensure_tuple_or_list('order', order)
self._group_by = _ensure_tuple_or_list('group_by', group_by)
@property
def project(self):
"""Get the project for this Query.
:rtype: str
"""
return self._project or self._client.project
@property
def namespace(self):
"""This query's namespace
:rtype: string or None
:returns: the namespace assigned to this query
"""
return self._namespace or self._client.namespace
@namespace.setter
def namespace(self, value):
"""Update the query's namespace.
:type value: string
"""
if not isinstance(value, str):
raise ValueError("Namespace must be a string")
self._namespace = value
@property
def kind(self):
"""Get the Kind of the Query.
:rtype: string
"""
return self._kind
@kind.setter
def kind(self, value):
"""Update the Kind of the Query.
:type value: string
:param value: updated kind for the query.
.. note::
The protobuf specification allows for ``kind`` to be repeated,
but the current implementation returns an error if more than
one value is passed. If the back-end changes in the future to
allow multiple values, this method will be updated to allow passing
either a string or a sequence of strings.
"""
if not isinstance(value, str):
raise TypeError("Kind must be a string")
self._kind = value
@property
def ancestor(self):
"""The ancestor key for the query.
:rtype: Key or None
"""
return self._ancestor
@ancestor.setter
def ancestor(self, value):
"""Set the ancestor for the query
:type value: Key
:param value: the new ancestor key
"""
if not isinstance(value, Key):
raise TypeError("Ancestor must be a Key")
self._ancestor = value
@ancestor.deleter
def ancestor(self):
"""Remove the ancestor for the query."""
self._ancestor = None
@property
def filters(self):
"""Filters set on the query.
:rtype: sequence of (property_name, operator, value) tuples.
"""
return self._filters[:]
def add_filter(self, property_name, operator, value):
"""Filter the query based on a property name, operator and a value.
Expressions take the form of::
.add_filter('<property>', '<operator>', <value>)
where property is a property stored on the entity in the datastore
and operator is one of ``OPERATORS``
(ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::
>>> from gcloud import datastore
>>> query = datastore.Query('Person')
>>> query.add_filter('name', '=', 'James')
>>> query.add_filter('age', '>', 50)
:type property_name: string
:param property_name: A property name.
:type operator: string
:param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``.
:type value: :class:`int`, :class:`str`, :class:`bool`,
:class:`float`, :class:`NoneType`,
:class`datetime.datetime`
:param value: The value to filter on.
:raises: :class:`ValueError` if ``operation`` is not one of the
specified values, or if a filter names ``'__key__'`` but
passes an invalid value (a key is required).
"""
if self.OPERATORS.get(operator) is None:
error_message = 'Invalid expression: "%s"' % (operator,)
choices_message = 'Please use one of: =, <, <=, >, >=.'
raise ValueError(error_message, choices_message)
if property_name == '__key__' and not isinstance(value, Key):
raise ValueError('Invalid key: "%s"' % value)
self._filters.append((property_name, operator, value))
@property
def projection(self):
"""Fields names returned by the query.
:rtype: sequence of string
:returns: Names of fields in query results.
"""
return self._projection[:]
@projection.setter
def projection(self, projection):
"""Set the fields returned the query.
:type projection: string or sequence of strings
:param projection: Each value is a string giving the name of a
property to be included in the projection query.
"""
if isinstance(projection, str):
projection = [projection]
self._projection[:] = projection
def keys_only(self):
"""Set the projection to include only keys."""
self._projection[:] = ['__key__']
def key_filter(self, key, operator='='):
"""Filter on a key.
:type key: :class:`gcloud.datastore.key.Key`
:param key: The key to filter on.
:type operator: string
:param operator: (Optional) One of ``=``, ``<``, ``<=``, ``>``, ``>=``.
Defaults to ``=``.
"""
self.add_filter('__key__', operator, key)
@property
def order(self):
"""Names of fields used to sort query results.
:rtype: sequence of string
"""
return self._order[:]
@order.setter
def order(self, value):
"""Set the fields used to sort query results.
Sort fields will be applied in the order specified.
:type value: string or sequence of strings
:param value: Each value is a string giving the name of the
property on which to sort, optionally preceded by a
hyphen (-) to specify descending order.
Omitting the hyphen implies ascending order.
"""
if isinstance(value, str):
value = [value]
self._order[:] = value
@property
def group_by(self):
"""Names of fields used to group query results.
:rtype: sequence of string
"""
return self._group_by[:]
@group_by.setter
def group_by(self, value):
"""Set fields used to group query results.
:type value: string or sequence of strings
:param value: Each value is a string giving the name of a
property to use to group results together.
"""
if isinstance(value, str):
value = [value]
self._group_by[:] = value
def fetch(self, limit=None, offset=0, start_cursor=None, end_cursor=None,
client=None):
"""Execute the Query; return an iterator for the matching entities.
For example::
>>> from gcloud import datastore
>>> query = datastore.Query('Person')
>>> query.add_filter('name', '=', 'Sally')
>>> list(query.fetch())
[<Entity object>, <Entity object>, ...]
>>> list(query.fetch(1))
[<Entity object>]
:type limit: integer or None
:param limit: An optional limit passed through to the iterator.
:type offset: integer
:param offset: An optional offset passed through to the iterator.
:type start_cursor: bytes
:param start_cursor: An optional cursor passed through to the iterator.
:type end_cursor: bytes
:param end_cursor: An optional cursor passed through to the iterator.
:type client: :class:`gcloud.datastore.client.Client`
:param client: client used to connect to datastore.
If not supplied, uses the query's value.
:rtype: :class:`Iterator`
:raises: ValueError if ``connection`` is not passed and no implicit
default has been set.
"""
if client is None:
client = self._client
return Iterator(
self, client, limit, offset, start_cursor, end_cursor)
class Iterator(object):
"""Represent the state of a given execution of a Query.
:type query: :class:`gcloud.datastore.query.Query`
:param query: Query object holding permanent configuration (i.e.
things that don't change on with each page in
a results set).
:type client: :class:`gcloud.datastore.client.Client`
:param client: The client used to make a request.
:type limit: integer
:param limit: (Optional) Limit the number of results returned.
:type offset: integer
:param offset: (Optional) Defaults to 0. Offset used to begin
a query.
:type start_cursor: bytes
:param start_cursor: (Optional) Cursor to begin paging through
query results.
:type end_cursor: bytes
:param end_cursor: (Optional) Cursor to end paging through
query results.
"""
_NOT_FINISHED = _query_pb2.QueryResultBatch.NOT_FINISHED
_FINISHED = (
_query_pb2.QueryResultBatch.NO_MORE_RESULTS,
_query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT,
)
def __init__(self, query, client, limit=None, offset=0,
start_cursor=None, end_cursor=None):
self._query = query
self._client = client
self._limit = limit
self._offset = offset
self._start_cursor = start_cursor
self._end_cursor = end_cursor
self._page = self._more_results = None
def next_page(self):
"""Fetch a single "page" of query results.
Low-level API for fine control: the more convenient API is
to iterate on the current Iterator.
:rtype: tuple, (entities, more_results, cursor)
"""
pb = _pb_from_query(self._query)
start_cursor = self._start_cursor
if start_cursor is not None:
pb.start_cursor = base64.urlsafe_b64decode(start_cursor)
end_cursor = self._end_cursor
if end_cursor is not None:
pb.end_cursor = base64.urlsafe_b64decode(end_cursor)
if self._limit is not None:
pb.limit = self._limit
pb.offset = self._offset
transaction = self._client.current_transaction
query_results = self._client.connection.run_query(
query_pb=pb,
project=self._query.project,
namespace=self._query.namespace,
transaction_id=transaction and transaction.id,
)
# NOTE: `query_results` contains an extra value that we don't use,
# namely `skipped_results`.
#
# NOTE: The value of `more_results` is not currently useful because
# the back-end always returns an enum
# value of MORE_RESULTS_AFTER_LIMIT even if there are no more
# results. See
# https://github.com/GoogleCloudPlatform/gcloud-python/issues/280
# for discussion.
entity_pbs, cursor_as_bytes, more_results_enum = query_results[:3]
if cursor_as_bytes == b'':
self._start_cursor = None
else:
self._start_cursor = base64.urlsafe_b64encode(cursor_as_bytes)
self._end_cursor = None
if more_results_enum == self._NOT_FINISHED:
self._more_results = True
elif more_results_enum in self._FINISHED:
self._more_results = False
else:
raise ValueError('Unexpected value returned for `more_results`.')
self._page = [
helpers.entity_from_protobuf(entity)
for entity in entity_pbs]
return self._page, self._more_results, self._start_cursor
def __iter__(self):
"""Generator yielding all results matching our query.
:rtype: sequence of :class:`gcloud.datastore.entity.Entity`
"""
self.next_page()
while True:
for entity in self._page:
yield entity
if not self._more_results:
break
self.next_page()
def _pb_from_query(query):
"""Convert a Query instance to the corresponding protobuf.
:type query: :class:`Query`
:param query: The source query.
:rtype: :class:`gcloud.datastore._generated.query_pb2.Query`
:returns: A protobuf that can be sent to the protobuf API. N.b. that
it does not contain "in-flight" fields for ongoing query
executions (cursors, offset, limit).
"""
pb = _query_pb2.Query()
for projection_name in query.projection:
pb.projection.add().property.name = projection_name
if query.kind:
pb.kind.add().name = query.kind
composite_filter = pb.filter.composite_filter
composite_filter.operator = _query_pb2.CompositeFilter.AND
if query.ancestor:
ancestor_pb = helpers._prepare_key_for_request(
query.ancestor.to_protobuf())
# Filter on __key__ HAS_ANCESTOR == ancestor.
ancestor_filter = composite_filter.filter.add().property_filter
ancestor_filter.property.name = '__key__'
ancestor_filter.operator = _query_pb2.PropertyFilter.HAS_ANCESTOR
ancestor_filter.value.key_value.CopyFrom(ancestor_pb)
for property_name, operator, value in query.filters:
pb_op_enum = query.OPERATORS.get(operator)
# Add the specific filter
property_filter = composite_filter.filter.add().property_filter
property_filter.property.name = property_name
property_filter.operator = pb_op_enum
# Set the value to filter on based on the type.
if property_name == '__key__':
key_pb = value.to_protobuf()
property_filter.value.key_value.CopyFrom(
helpers._prepare_key_for_request(key_pb))
else:
helpers._set_protobuf_value(property_filter.value, value)
if not composite_filter.filter:
pb.ClearField('filter')
for prop in query.order:
property_order = pb.order.add()
if prop.startswith('-'):
property_order.property.name = prop[1:]
property_order.direction = property_order.DESCENDING
else:
property_order.property.name = prop
property_order.direction = property_order.ASCENDING
for group_by_name in query.group_by:
pb.group_by.add().name = group_by_name
return pb
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
import pickle
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
from .imdb import imdb
from .imdb import ROOT_DIR
from . import ds_utils
from .voc_eval import voc_eval
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
class kittivoc(imdb):
def __init__(self, image_set, devkit_path=None):
imdb.__init__(self, 'kittivoc_' + image_set)
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = self._devkit_path
self._classes = ('__background__', # always index 0
'pedestrian', 'car', 'cyclist')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
self._remove_empty_samples()
# Default to roidb handler
#self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
self._year = ''
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'use_diff' : False, # using difficult samples
'matlab_eval' : False,
'rpn_file' : None,
'min_size' : 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier
:param index filename stem e.g. 000000
:return filepath
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'KITTIVOC')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest, aka, the annotations.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
def rpn_roidb(self):
if self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self._data_path,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _remove_empty_samples(self):
"""
Remove images with zero annotation ()
"""
print('Remove empty annotations: ', end=' ')
for i in range(len(self._image_index)-1, -1, -1):
index = self._image_index[i]
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
non_diff_objs = [
obj for obj in objs if \
int(obj.find('difficult').text) == 0 and obj.find('name').text.lower().strip() != 'dontcare']
num_objs = len(non_diff_objs)
if num_objs == 0:
print(index, end=' ')
self._image_index.pop(i)
print('Done. ')
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
# just the same as gt_classes
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
care_inds = np.empty((0), dtype=np.int32)
dontcare_inds = np.empty((0), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = max(float(bbox.find('xmin').text) - 1, 0)
y1 = max(float(bbox.find('ymin').text) - 1, 0)
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
class_name = obj.find('name').text.lower().strip()
if class_name != 'dontcare':
care_inds = np.append(care_inds, np.asarray([ix], dtype=np.int32))
if class_name == 'dontcare':
dontcare_inds = np.append(dontcare_inds, np.asarray([ix], dtype=np.int32))
boxes[ix, :] = [x1, y1, x2, y2]
continue
cls = self._class_to_ind[class_name]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
# deal with dontcare areas
dontcare_areas = boxes[dontcare_inds, :]
boxes = boxes[care_inds, :]
gt_classes = gt_classes[care_inds]
overlaps = overlaps[care_inds, :]
seg_areas = seg_areas[care_inds]
ishards = ishards[care_inds]
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_ishard' : ishards,
'dontcare_areas' : dontcare_areas,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'KITTI', 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1], # filename(stem), score
dets[k, 0] + 1, dets[k, 1] + 1, # x1, y1, x2, y2
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir = 'output'):
annopath = os.path.join(
self._devkit_path,
'Annotations', '{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'ImageSets', 'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir,
ovthresh=0.5, use_07_metric = use_07_metric)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = kittivoc('trainval')
res = d.roidb
from IPython import embed; embed()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for the MAML algorithm and network definitions."""
# pylint: disable=g-import-not-at-top
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
# Try conditional import before continuing rest of imports
try:
import special_grads # pylint: disable=unused-import
except KeyError as e:
print(
'WARN: Cannot define MaxPoolGrad, likely already defined for this version of tensorflow: %s'
% e,
file=sys.stderr)
from tensorflow.python.platform import flags
from utils import conv_block
from utils import mse
from utils import normalize
from utils import xent
FLAGS = flags.FLAGS
class MAML:
"""Creates MAML model."""
def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):
"""must call construct_model() after initializing MAML!"""
self.dim_input = dim_input
self.dim_output = dim_output
self.update_lr = FLAGS.update_lr
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
self.classification = False
self.test_num_updates = test_num_updates
if FLAGS.label_smooth:
self.noises = 0.1 * tf.range(10, dtype=tf.float32)
if FLAGS.datasource == 'sinusoid':
self.dim_hidden = [40, 40]
self.loss_func = mse
self.forward = self.forward_fc
self.construct_weights = self.construct_fc_weights
elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
self.loss_func = xent
self.classification = True
if FLAGS.conv:
self.dim_hidden = FLAGS.num_filters
self.forward = self.forward_conv
self.construct_weights = self.construct_conv_weights
else:
self.dim_hidden = [256, 128, 64, 64]
self.forward = self.forward_fc
self.construct_weights = self.construct_fc_weights
if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
self.channels = 3
else:
self.channels = 1
self.img_size = int(np.sqrt(self.dim_input / self.channels))
else:
raise ValueError('Unrecognized data source.')
def construct_model(self, input_tensors=None, prefix='metatrain_'):
"""Builds model for train / test based on prefix."""
# a: training data for inner gradient, b: test data for meta gradient
if input_tensors is None:
self.inputa = tf.placeholder(tf.float32)
self.inputb = tf.placeholder(tf.float32)
self.labela = tf.placeholder(tf.float32)
self.labelb = tf.placeholder(tf.float32)
else:
self.inputa = input_tensors['inputa']
self.inputb = input_tensors['inputb']
self.labela = input_tensors['labela']
self.labelb = input_tensors['labelb']
with tf.variable_scope('model', reuse=None) as training_scope:
if 'weights' in dir(self):
training_scope.reuse_variables()
weights = self.weights
else:
# Define the weights
self.weights = weights = self.construct_weights()
# outputbs[i] and lossesb[i] is the output and loss after
# i+1 gradient updates
lossesa, outputas, lossesb, outputbs = [], [], [], []
accuraciesa, accuraciesb = [], []
num_updates = max(self.test_num_updates, FLAGS.num_updates)
outputbs = [[]] * num_updates
lossesb = [[]] * num_updates
accuraciesb = [[]] * num_updates
def task_metalearn(inp, reuse=True):
"""Perform gradient descent for one task in the meta-batch."""
inputa, inputb, labela, labelb = inp
if 'train' in prefix and FLAGS.label_smooth:
# Sample discrete noise and run.
index = tf.random.uniform([], 0, 10, dtype=tf.int64)
noise = self.noises[index]
# 5-way classification
# 1 0 0 --> 0.9 0 0 --> 0.933 0.033 0.033
labela = (1.0 - noise) * labela + (noise / 5.0)
labelb = (1.0 - noise) * labelb + (noise / 5.0)
# Let's normalize just in case my math is off.
labela = labela / tf.reduce_sum(labela, axis=1, keepdims=True)
labelb = labelb / tf.reduce_sum(labelb, axis=1, keepdims=True)
task_outputbs, task_lossesb = [], []
if self.classification:
task_accuraciesb = []
task_outputa = self.forward(
inputa, weights, reuse=reuse) # only reuse on the first iter
task_lossa = self.loss_func(task_outputa, labela)
grads = tf.gradients(task_lossa, list(weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(weights.keys(), grads))
fast_weights = dict(
zip(weights.keys(), [
weights[key] - self.update_lr * gradients[key]
for key in weights.keys()
]))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
for j in range(num_updates - 1):
loss = self.loss_func(
self.forward(inputa, fast_weights, reuse=True), labela)
grads = tf.gradients(loss, list(fast_weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(
zip(fast_weights.keys(), [
fast_weights[key] - self.update_lr * gradients[key]
for key in fast_weights.keys()
]))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
task_output = [task_outputa, task_outputbs, task_lossa, task_lossesb]
if self.classification:
task_accuracya = tf.contrib.metrics.accuracy(
tf.argmax(tf.nn.softmax(task_outputa), 1), tf.argmax(labela, 1))
for j in range(num_updates):
task_accuraciesb.append(
tf.contrib.metrics.accuracy(
tf.argmax(tf.nn.softmax(task_outputbs[j]), 1),
tf.argmax(labelb, 1)))
task_output.extend([task_accuracya, task_accuraciesb])
return task_output
if FLAGS.norm != 'None':
# to initialize the batch norm vars.
# might want to combine this, and not run idx 0 twice.
task_metalearn(
(self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]),
False)
out_dtype = [
tf.float32, [tf.float32] * num_updates, tf.float32,
[tf.float32] * num_updates
]
if self.classification:
out_dtype.extend([tf.float32, [tf.float32] * num_updates])
result = tf.map_fn(
task_metalearn,
elems=(self.inputa, self.inputb, self.labela, self.labelb),
dtype=out_dtype,
parallel_iterations=FLAGS.meta_batch_size)
if self.classification:
outputas, outputbs, lossesa, lossesb, accuraciesa, accuraciesb = result
else:
outputas, outputbs, lossesa, lossesb = result
## Performance & Optimization
if 'train' in prefix:
self.total_loss1 = total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(
FLAGS.meta_batch_size)
self.total_losses2 = total_losses2 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(num_updates)
]
# after the map_fn
self.outputas, self.outputbs = outputas, outputbs
if self.classification:
self.total_accuracy1 = total_accuracy1 = tf.reduce_sum(
accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.total_accuracies2 = total_accuracies2 = [
tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(num_updates)
]
self.pretrain_op = tf.train.AdamOptimizer(
self.meta_lr).minimize(total_loss1)
if FLAGS.metatrain_iterations > 0:
optimizer = tf.train.AdamOptimizer(self.meta_lr)
self.gvs = gvs = optimizer.compute_gradients(
self.total_losses2[FLAGS.num_updates - 1])
if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
gvs = [(tf.clip_by_value(grad, -10, 10), var) for grad, var in gvs]
self.metatrain_op = optimizer.apply_gradients(gvs)
else:
self.metaval_total_loss1 = total_loss1 = tf.reduce_sum(
lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_losses2 = total_losses2 = [
tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(num_updates)
]
if self.classification:
self.metaval_total_accuracy1 = total_accuracy1 = tf.reduce_sum(
accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_accuracies2 = total_accuracies2 = [
tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size)
for j in range(num_updates)
]
## Summaries
tf.summary.scalar(prefix + 'Pre-update loss', total_loss1)
if self.classification:
tf.summary.scalar(prefix + 'Pre-update accuracy', total_accuracy1)
for j in range(num_updates):
tf.summary.scalar(prefix + 'Post-update loss, step ' + str(j + 1),
total_losses2[j])
if self.classification:
tf.summary.scalar(prefix + 'Post-update accuracy, step ' + str(j + 1),
total_accuracies2[j])
### Network construction functions (fc networks and conv networks)
def construct_fc_weights(self):
"""Builds and returns variables for FC weights."""
weights = {}
weights['w1'] = tf.Variable(
tf.truncated_normal([self.dim_input, self.dim_hidden[0]], stddev=0.01))
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden[0]]))
for i in range(1, len(self.dim_hidden)):
weights['w' + str(i + 1)] = tf.Variable(
tf.truncated_normal([self.dim_hidden[i - 1], self.dim_hidden[i]],
stddev=0.01))
weights['b' + str(i + 1)] = tf.Variable(tf.zeros([self.dim_hidden[i]]))
weights['w' + str(len(self.dim_hidden) + 1)] = tf.Variable(
tf.truncated_normal([self.dim_hidden[-1], self.dim_output],
stddev=0.01))
weights['b' + str(len(self.dim_hidden) + 1)] = tf.Variable(
tf.zeros([self.dim_output]))
return weights
def forward_fc(self, inp, weights, reuse=False):
"""Add fully conencted layers."""
hidden = normalize(
tf.matmul(inp, weights['w1']) + weights['b1'],
activation=tf.nn.relu,
reuse=reuse,
scope='0')
for i in range(1, len(self.dim_hidden)):
hidden = normalize(
tf.matmul(hidden, weights['w' + str(i + 1)]) +
weights['b' + str(i + 1)],
activation=tf.nn.relu,
reuse=reuse,
scope=str(i + 1))
return tf.matmul(hidden, weights['w' + str(len(self.dim_hidden) + 1)]
) + weights['b' + str(len(self.dim_hidden) + 1)]
def construct_conv_weights(self):
"""Builds and returns weights for conv layers."""
weights = {}
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
k = 3
weights['conv1'] = tf.get_variable(
'conv1', [k, k, self.channels, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv2'] = tf.get_variable(
'conv2', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv3'] = tf.get_variable(
'conv3', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv4'] = tf.get_variable(
'conv4', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer,
dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))
if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
# assumes max pooling
weights['w5'] = tf.get_variable(
'w5', [self.dim_hidden * 5 * 5, self.dim_output],
initializer=fc_initializer)
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
else:
weights['w5'] = tf.Variable(
tf.random_normal([self.dim_hidden, self.dim_output]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
return weights
def forward_conv(self, inp, weights, reuse=False, scope=''):
"""Creates convolutional model."""
# reuse is for the normalization parameters.
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse,
scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse,
scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse,
scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse,
scope + '3')
if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
# last hidden layer is 6x6x64-ish, reshape to a vector
hidden4 = tf.reshape(
hidden4,
[-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])
else:
hidden4 = tf.reduce_mean(hidden4, [1, 2])
return tf.matmul(hidden4, weights['w5']) + weights['b5']
|
|
# package org.apache.helix.manager.zk
#from org.apache.helix.manager.zk import *
#from java.io import File
#from java.util import ArrayList
#from java.util import Arrays
#from java.util import Collections
#from java.util import HashMap
#from java.util import List
#from java.util import Map
#from java.util.concurrent import ConcurrentHashMap
#from org.apache.log4j import Logger
#from org.apache.zookeeper.data import Stat
from org.apache.helix.DataAccessor import DataAccessor
from org.apache.helix.HelixException import HelixException
from org.apache.helix.HelixProperty import HelixProperty
from org.apache.helix.PropertyPathConfig import PropertyPathConfig
from org.apache.helix.PropertyType import PropertyType
from org.apache.helix.ZNRecord import ZNRecord
@Deprecated
class ZKDataAccessor(DataAccessor):
"""
Java modifiers:
private static
Type:
Logger
"""
logger = Logger.getLogger(ZKDataAccessor.class)
"""
Parameters:
String clusterName
ZkClient zkClient
"""
def __init__(self, clusterName, zkClient):
self._clusterName = clusterName
self._zkClient = zkClient
def setProperty(self, type, value, keys):
"""
Returns boolean
Parameters:
type: PropertyTypevalue: HelixPropertykeys: String
@Override
"""
if not value.isValid():
raise HelixException("The ZNRecord for " + type + " is not valid.")
return setProperty(type, value.getRecord(), keys)
def setProperty(self, type, value, keys):
"""
Returns boolean
Parameters:
type: PropertyTypevalue: ZNRecordkeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
# String
parent = File(path).getParent()
if not _zkClient.exists(parent):
_zkClient.createPersistent(parent, True)
if _zkClient.exists(path):
if type.isCreateOnlyIfAbsent():
return False
else:
ZKUtil.createOrUpdate(_zkClient, path, value, type.isPersistent(), False)
else:
try:
if type.isPersistent():
_zkClient.createPersistent(path, value)
else:
_zkClient.createEphemeral(path, value)
except Exception, e:
logger.warn("Exception while creating path:" + path + " Most likely due to race condition(Ignorable).", e)
return False
return True
def updateProperty(self, type, value, keys):
"""
Returns boolean
Parameters:
type: PropertyTypevalue: HelixPropertykeys: String
@Override
"""
return updateProperty(type, value.getRecord(), keys)
def updateProperty(self, type, value, keys):
"""
Returns boolean
Parameters:
type: PropertyTypevalue: ZNRecordkeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
if type.isUpdateOnlyOnExists():
ZKUtil.updateIfExists(_zkClient, path, value, type.isMergeOnUpdate())
else:
# String
parent = File(path).getParent()
if not _zkClient.exists(parent):
_zkClient.createPersistent(parent, True)
if not type.usePropertyTransferServer():
ZKUtil.createOrUpdate(_zkClient, path, value, type.isPersistent(), type.isMergeOnUpdate())
else:
ZKUtil.asyncCreateOrUpdate(_zkClient, path, value, type.isPersistent(), type.isMergeOnUpdate())
return True
def getProperty(self, clazz, type, keys):
"""
Returns T
Parameters:
clazz: Class<T>type: PropertyTypekeys: String
@Override
Parameterized: <T extends HelixProperty>
"""
return HelixProperty.convertToTypedInstance(clazz, getProperty(type, keys))
def getProperty(self, type, keys):
"""
Returns ZNRecord
Parameters:
type: PropertyTypekeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
if not type.isCached():
return _zkClient.readData(path, True)
else:
# int
len = keys.length
if len == 0:
return _zkClient.readData(path, True)
else:
# String[]
subkeys = Arrays.copyOfRange(keys, 0, len - 1)
# Map<String, ZNRecord>
newChilds = refreshChildValuesCache(type, subkeys)
return newChilds.get(keys[len - 1])
def removeProperty(self, type, keys):
"""
Returns boolean
Parameters:
type: PropertyTypekeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
return _zkClient.delete(path)
def getChildNames(self, type, keys):
"""
Returns List<String>
Parameters:
type: PropertyTypekeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
if _zkClient.exists(path):
return _zkClient.getChildren(path)
else:
return Collections.emptyList()
def getChildValues(self, clazz, type, keys):
"""
Returns List<T>
Parameters:
clazz: Class<T>type: PropertyTypekeys: String
@Override
Parameterized: <T extends HelixProperty>
"""
# List<ZNRecord>
newChilds = getChildValues(type, keys)
if newChilds.size() > 0:
return HelixProperty.convertToTypedList(clazz, newChilds)
return Collections.emptyList()
def getChildValues(self, type, keys):
"""
Returns List<ZNRecord>
Parameters:
type: PropertyTypekeys: String
@Override
"""
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
if _zkClient.exists(path):
if not type.isCached():
return ZKUtil.getChildren(_zkClient, path)
else:
# Map<String, ZNRecord>
newChilds = refreshChildValuesCache(type, keys)
return ArrayList<ZNRecord>(newChilds.values())
return Collections.emptyList()
def reset(self):
"""
Returns void
"""
_cache.clear()
def refreshChildValuesCache(self, type, keys):
"""
Returns Map<String, ZNRecord>
Parameters:
type: PropertyTypekeys: String
Java modifiers:
private
"""
if not type.isCached():
raise IllegalArgumentException("Type:" + type + " is NOT cached")
# String
path = PropertyPathConfig.getPath(type, _clusterName, keys)
# Map<String, ZNRecord>
newChilds = refreshChildValues(path, _cache.get(path))
if newChilds != None && newChilds.size() > 0:
_cache.put(path, newChilds)
return newChilds
else:
_cache.remove(path)
return Collections.emptyMap()
def refreshChildValues(self, parentPath, oldChildRecords):
"""
Returns Map<String, ZNRecord>
Parameters:
parentPath: StringoldChildRecords: Map<String, ZNRecord>
Java modifiers:
private
"""
# List<String>
childs = _zkClient.getChildren(parentPath)
if childs == None or childs.size() == 0:
return Collections.emptyMap()
# Stat
newStat = Stat()
# Map<String, ZNRecord>
newChildRecords = HashMap<String, ZNRecord>()
for # String
child = None
in childs) # String
childPath = parentPath + "/" + child
if oldChildRecords == None or not oldChildRecords.containsKey(child):
# ZNRecord
record = _zkClient.readDataAndStat(childPath, newStat, True)
if record != None:
record.setVersion(newStat.getVersion())
newChildRecords.put(child, record)
else:
# ZNRecord
oldChild = oldChildRecords.get(child)
# int
oldVersion = oldChild.getVersion()
# long
oldCtime = oldChild.getCreationTime()
newStat = _zkClient.getStat(childPath)
if newStat != None:
if oldCtime < newStat.getCtime() or oldVersion < newStat.getVersion():
# ZNRecord
record = _zkClient.readDataAndStat(childPath, newStat, True)
if record != None:
record.setVersion(newStat.getVersion())
record.setCreationTime(newStat.getCtime())
record.setModifiedTime(newStat.getMtime())
newChildRecords.put(child, record)
else:
newChildRecords.put(child, oldChild)
return Collections.unmodifiableMap(newChildRecords)
def getChildValuesMap(self, clazz, type, keys):
"""
Returns Map<String, T>
Parameters:
clazz: Class<T>type: PropertyTypekeys: String
@Override
Parameterized: <T extends HelixProperty>
"""
# List<T>
list = getChildValues(clazz, type, keys)
return Collections.unmodifiableMap(HelixProperty.convertListToMap(list))
|
|
import glob
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
# TODO:
# 1. Set min/max value for every frame in AnimationScene
# See http://www.paraview.org/pipermail/paraview/2011-September/022682.html
#
LoadPlugin('/Users/schmitt/paraview/paraview/trunk/plugins/vtkLFMReader/build/libvtkLFMReader.dylib',ns=globals())
# Load LFM file(s)
#vtkLfmReaderObject = vtkLFMReader( FileNames=['/Users/schmitt/paraview/testData/LRs_mhd_1995-03-21T04-20-00Z.hdf'] )
#vtkLfmReaderObject = vtkLFMReader( FileNames=['/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_004.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_005.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_006.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_007.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_008.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_009.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_010.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_011.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_012.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_013.hdf'] )
#files = glob.glob('/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_*.hdf')
files = glob.glob('/Users/schmitt/paraview/testData/doctoredAnimation/orig/LMs_mhd_*.hdf')
files.sort()
vtkLfmReaderObject = vtkLFMReader(FileNames = files)
vtkLfmReaderObject.PointArrayStatus = []
vtkLfmReaderObject.CellArrayStatus = []
vtkLfmReaderObject.GridScaleFactor = 'Earth Radius: 6.5e8 cm'
vtkLfmReaderObject.CellArrayStatus = ['Plasma Density', 'Sound Speed', 'Velocity Vector','Magnetic Field Vector']
Show().Visibility = 0
##################
# Top-left panel #
########################################################################
# Orient the camera
TopLeftRenderView = GetRenderView()
TopLeftRenderView.CameraPosition = [-7.0, -70.0, 0.0]
TopLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
TopLeftRenderView.CameraViewUp = [0.0, 0.0, 1.0]
TopLeftRenderView.CameraClippingRange = [122.35967717295773, 129.70814347061219]
TopLeftRenderView.CameraParallelScale = 218.48459610631258
TopLeftRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
# Add plane and map it to the dataset
XZVectorPlane = Plane()
XZVectorPlane.Origin = [-60.0, 0.0, -30.0]
XZVectorPlane.Point1 = [30.0, 0.0, -30.0]
XZVectorPlane.Point2 = [-60.0, 0.0, 30.0]
XZVectorPlane.XResolution = 20
XZVectorPlane.YResolution = 15
SetActiveSource(XZVectorPlane)
Show().Visibility = 0
# ResampleWithDataset
SetActiveSource(vtkLfmReaderObject)
XZField = ResampleWithDataset( Source=XZVectorPlane )
Show().Visibility = 0
# Render vector field
XZVectors = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )
XZVectors.SetScaleFactor = 9.0
XZVectors.Vectors = ['POINTS', 'Velocity Vector']
XZVectors.GlyphTransform = "Transform2"
XZVectors.GlyphType = "Arrow"
XZVectors.GlyphType.TipRadius = 0.04
XZVectors.GlyphType.TipLength = 0.15
XZVectors.GlyphType.ShaftRadius = 0.015
XZVectors.SetScaleFactor = 2.14564239898506e-07
DataRepresentation16 = Show()
DataRepresentation16.EdgeColor = [0.0, 0.0, 0.5019607843137255]
DataRepresentation16.ColorArrayName = ''
# XY cutplane for colormap
SetActiveSource(vtkLfmReaderObject)
XZSlice = Slice( SliceType="Plane" )
XZSlice.SliceOffsetValues = [0.0]
XZSlice.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
XZSlice.SliceType = "Plane"
XZSlice.SliceType.Normal = [0.0, 1.0, 0.0]
# Calculator for pressure
Pressure = Calculator()
Pressure.AttributeMode = 'point_data'
Pressure.Function = 'Plasma Density*4.7619e23*Sound Speed*Sound Speed*3.75e8'
Pressure.ResultArrayName = 'Pressure'
PressureRepresentation = Show()
PressureRepresentation.EdgeColor = [0.0, 0.0, 0.5000076295109483]
PressureRepresentation.ColorArrayName = 'Pressure'
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[7.232339585875363e+19, 0.0, 0.0, 1.0, 3.964840999531023e+24, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_Pressure_PiecewiseFunction = CreatePiecewiseFunction()
PressureRepresentation.LookupTable = a1_Pressure_PVLookupTable
ScalarBarWidgetLog10Pressure = CreateScalarBar( Orientation='Horizontal', Title='Pressure', Position2=[0.5, 0.15], LabelFontSize=12, Enabled=1, TitleFontSize=12, Position=[0.25,0.85] )
TopLeftRenderView.Representations.append(ScalarBarWidgetLog10Pressure)
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, UseLogScale=1, RGBPoints=[1e+22, 0.0, 0.0, 1.0, 3.96484e+24, 1.0, 0.0, 0.0], LockScalarRange=1 )
TopLeftRenderView.CameraClippingRange = [119.96970760320372, 132.85099018726737]
ScalarBarWidgetLog10Pressure.LookupTable = a1_Pressure_PVLookupTable
# Describe the view
minValue = a1_Pressure_PVLookupTable.RGBPoints[0]
maxValue = a1_Pressure_PVLookupTable.RGBPoints[4]
TopLeftText = Text()
TopLeftText.Text = 'XZ (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(TopLeftText)
TextRep.Visibility = 1
###################
# Top-Right panel #
########################################################################
TopRightRenderView = CreateRenderView()
#TopRightRenderView.CameraPosition = [-9.54128751659703, -1.5694684006493071, 150.56293391130203]
TopRightRenderView.CameraPosition = [-7, 0.0, 70]
#TopRightRenderView.CameraFocalPoint = [-9.54128751659703, -1.5694684006493071, 0.0]
TopRightRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
TopRightRenderView.CameraViewUp = [0.0, 1.0, 0.0]
TopRightRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
TopRightRenderView.UseLight = 1
TopRightRenderView.LightSwitch = 0
TopRightRenderView.RemoteRenderThreshold = 3.0
TopRightRenderView.ViewTime = 0.0
TopRightRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
TopRightRenderView.CenterAxesVisibility = 0
TopRightRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
TopRightRenderView.CameraParallelScale = 317.214749894812
TopRightRenderView.CameraClippingRange = [149.05730362328296, 152.8213791144487]
# XY Cutplane
SetActiveSource(vtkLfmReaderObject)
# Subtract Dipole
BzMinusDipole = Calculator()
BzMinusDipole.AttributeMode = 'point_data'
BzMinusDipole.Function = '(Magnetic Field Vector_Z*1e5)+(3.05e4*((coordsX^2+coordsY^2+coordsZ^2)^(-1.5))*(2-(3*(coordsX^2+coordsY^2))/(coordsX^2+coordsY^2+coordsZ^2)))'
BzMinusDipole.ResultArrayName = 'Bz-Dipole'
BzNoDipole = Slice( SliceType="Plane" )
BzNoDipole.SliceOffsetValues = [0.0]
BzNoDipole.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
BzNoDipole.SliceType = "Plane"
BzNoDipole.SliceType.Normal = [0.0, 0.0, 1.0]
DataRepresentation22 = Show()
DataRepresentation22.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation22.EdgeColor = [0.0, 0.0, 0.5019607843137255]
a1_BzDipole_PVLookupTable = GetLookupTableForArray( "Bz-Dipole", 1, RGBPoints=[-20.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.498039, 0.498039, 0.498039], ColorSpace='HSV', ScalarRangeInitialized=1.0, LockScalarRange=1 )
a1_BzDipole_PiecewiseFunction = CreatePiecewiseFunction()
DataRepresentation22.ColorArrayName = 'Bz-Dipole'
DataRepresentation22.LookupTable = a1_BzDipole_PVLookupTable
ScalarBarWidgetBzNoDipole = CreateScalarBar( Orientation='Horizontal',Title='Bz-Dipole', LabelFontSize=12,Position2=[0.5, 0.15], Enabled=1, TitleFontSize=12,Position=[0.25,0.85] )
TopRightRenderView.Representations.append(ScalarBarWidgetBzNoDipole)
a1_BzNoDip_PVLookupTable = GetLookupTableForArray( "Bz-Dipole", 1, UseLogScale=1 )
ScalarBarWidgetBzNoDipole.LookupTable = a1_BzNoDip_PVLookupTable
# Describe the view
minValue = a1_BzNoDip_PVLookupTable.RGBPoints[0]
maxValue = a1_BzNoDip_PVLookupTable.RGBPoints[4]
TopRightText = Text()
TopRightText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(TopRightText)
TextRep.Visibility = 1
#####################
# Bottom-left panel #
########################################################################
SetActiveView(TopLeftRenderView)
BotLeftRenderView = CreateRenderView()
#BotLeftRenderView.CameraPosition = [0.0, 0.0, 116.77367590722402]
#BotLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
#BotLeftRenderView.CameraViewUp = [0.0, 0.0, 1.0]
BotLeftRenderView.CameraPosition = [-7, 0.0, 70]
BotLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
BotLeftRenderView.CameraViewUp = [0.0, 1.0, 0.0]
BotLeftRenderView.CameraParallelScale = 1.7320508075688772
BotLeftRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
BotLeftRenderView.UseLight = 1
BotLeftRenderView.LightSwitch = 0
BotLeftRenderView.RemoteRenderThreshold = 3.0
BotLeftRenderView.CameraClippingRange = [111.82555065103759, 126.028886930742]
BotLeftRenderView.LODResolution = 50.0
BotLeftRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
BotLeftRenderView.CenterAxesVisibility = 0
BotLeftRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
# Add plane and map it to the dataset
XYVectorPlane = Plane()
XYVectorPlane.Origin = [-60.0, -30.0, 0.0]
XYVectorPlane.Point1 = [30.0, -30.0, 0.0]
XYVectorPlane.Point2 = [-60.0, 30.0, 0.0]
XYVectorPlane.XResolution = 20
XYVectorPlane.YResolution = 15
SetActiveSource(XYVectorPlane)
Show().Visibility = 0
RenameSource("XY Vector Plane", XYVectorPlane)
# ResampleWithDataset
SetActiveSource(vtkLfmReaderObject)
XYField = ResampleWithDataset( Source=XYVectorPlane )
Show().Visibility = 0
# Render vector field
XYVectors = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )
XYVectors.SetScaleFactor = 9.0
XYVectors.Vectors = ['POINTS', 'Velocity Vector']
XYVectors.GlyphTransform = "Transform2"
XYVectors.GlyphType = "Arrow"
XYVectors.GlyphType.TipRadius = 0.04
XYVectors.GlyphType.TipLength = 0.15
XYVectors.GlyphType.ShaftRadius = 0.015
XYVectors.SetScaleFactor = 2.14564239898506e-07
DataRepresentation16 = Show()
DataRepresentation16.EdgeColor = [0.0, 0.0, 0.5019607843137255]
DataRepresentation16.ColorArrayName = ''
# XY cutplane for colormap
SetActiveSource(vtkLfmReaderObject)
XYSlice = Slice( SliceType="Plane" )
XYSlice.SliceOffsetValues = [0.0]
XYSlice.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
XYSlice.SliceType = "Plane"
XYSlice.SliceType.Normal = [0.0, 0.0, 1.0]
# Calculator for pressure
Pressure = Calculator()
Pressure.AttributeMode = 'point_data'
Pressure.Function = 'Plasma Density*4.7619e23*Sound Speed*Sound Speed*3.75e8'
Pressure.ResultArrayName = 'Pressure'
PressureRepresentation = Show()
PressureRepresentation.EdgeColor = [0.0, 0.0, 0.5000076295109483]
PressureRepresentation.ColorArrayName = 'Pressure'
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[7.232339585875363e+19, 0.0, 0.0, 1.0, 3.964840999531023e+24, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_Pressure_PiecewiseFunction = CreatePiecewiseFunction()
PressureRepresentation.LookupTable = a1_Pressure_PVLookupTable
ScalarBarWidgetLog10Pressure = CreateScalarBar( Orientation='Horizontal', Title='Pressure', Position2=[0.5, 0.15],LabelFontSize=12, Enabled=1, TitleFontSize=12,Position=[0.25,0.85] )
BotLeftRenderView.Representations.append(ScalarBarWidgetLog10Pressure)
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, UseLogScale=1,RGBPoints=[1e+22, 0.0, 0.0, 1.0, 3.96484e+24, 1.0, 0.0, 0.0], LockScalarRange=1 )
ScalarBarWidgetLog10Pressure.LookupTable = a1_Pressure_PVLookupTable
# Describe the view
minValue = a1_Pressure_PVLookupTable.RGBPoints[0]
maxValue = a1_Pressure_PVLookupTable.RGBPoints[4]
BotLeftText = Text()
BotLeftText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(BotLeftText)
TextRep.Visibility = 1
######################
# Bottom-Right panel #
########################################################################
SetActiveView(TopRightRenderView)
BotRightRenderView = CreateRenderView()
#BotRightRenderView.CameraPosition = [-8.45319037422091, 0.7965184288563187, 127.82383156323988]
#BotRightRenderView.CameraFocalPoint = [-8.45319037422091, 0.7965184288563187, 0.0]
BotRightRenderView.CameraPosition = [-7, 0.0, 70]
BotRightRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
BotRightRenderView.CameraViewUp = [0.0, 1.0, 0.0]
BotRightRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
BotRightRenderView.UseLight = 1
BotRightRenderView.LightSwitch = 0
BotRightRenderView.RemoteRenderThreshold = 3.0
BotRightRenderView.LODResolution = 50.0
BotRightRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
BotRightRenderView.CenterAxesVisibility = 0
BotRightRenderView.CameraClippingRange = [126.54559229870145, 129.7411902311656]
BotRightRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
BotRightRenderView.CameraParallelScale = 325.86109001049476
SetActiveSource(vtkLfmReaderObject)
# XY Cutplane
Slice5 = Slice( SliceType="Plane" )
Slice5.SliceOffsetValues = [0.0]
Slice5.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
Slice5.SliceType = "Plane"
Slice5.SliceType.Normal = [0.0, 0.0, 1.0]
DataRepresentation23 = Show()
DataRepresentation23.EdgeColor = [0.0, 0.0, 0.5000076295109483]
a3_VelocityVector_PVLookupTable = GetLookupTableForArray( "Velocity Vector", 3, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[6236.560207233221, 0.0, 0.0, 1.0, 59331831.819066755, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a3_VelocityVector_PiecewiseFunction = CreatePiecewiseFunction()
DataRepresentation23.ColorArrayName = 'Velocity Vector'
DataRepresentation23.LookupTable = a3_VelocityVector_PVLookupTable
ScalarBarWidgetVelocity = CreateScalarBar( ComponentTitle='Magnitude', Orientation='Horizontal', Title='Velocity Vector', Position2=[0.5, 0.15], Enabled=1, LabelFontSize=12, TitleFontSize=12,Position=[0.25,0.85] )
BotRightRenderView.Representations.append(ScalarBarWidgetVelocity)
a3_VelocityVector_PVLookupTable = GetLookupTableForArray( "Velocity Vector", 3, RGBPoints=[0.0, 0.0, 0.0, 1.0, 50000000.0, 1.0, 0.0, 0.0], LockScalarRange=1 )
ScalarBarWidgetVelocity.LookupTable = a3_VelocityVector_PVLookupTable
# Describe the view
minValue = a3_VelocityVector_PVLookupTable.RGBPoints[0]
maxValue = a3_VelocityVector_PVLookupTable.RGBPoints[4]
BotRightText = Text()
BotRightText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(BotRightText)
TextRep.Visibility = 1
#################################
# Global visualization settings #
########################################################################
AnimationScene = GetAnimationScene()
AnimationScene.ViewModules = [ TopLeftRenderView, TopRightRenderView, BotLeftRenderView, BotRightRenderView ]
#WriteAnimation('/Users/schmitt/paraview/scripts/testAnimation.jpg', Magnification=1, Quality=2, FrameRate=1.000000)
Render()
#WriteImage('/Users/schmitt/paraview/scripts/LRs_mhd_1995-03-21T04-20-00Z.png')
#### Animate from 1st time step to last
###AnimationScene.StartTime = vtkLfmReaderObject.TimestepValues.GetData()[0]
###AnimationScene.EndTime = vtkLfmReaderObject.TimestepValues.GetData()[-1]
###
###for idx, cur_time in enumerate(vtkLfmReaderObject.TimestepValues.GetData()):
### AnimationScene.AnimationTime = cur_time
### vtkLfmReaderObject.UpdatePipelineInformation()
###
### WriteImage("testAnimation_topLeft_%03d.png" % idx, TopLeftRenderView);
### #WriteImage("testAnimation_topright_%03d.png" % idx, TopRightRenderView);
### #WriteImage("testAnimation_botLeft_%03d.png" % idx, BotLeftRenderView);
### #WriteImage("testAnimation_botRight_%03d.png" % idx, BotRightRenderView);
|
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility to provide exploration of policy definition files.
Allows read only access of policy definition files. The library
creates a Policy object, which has filters containing terms.
This library does no expansion on the tokens directly, such as in policy.py.
TODO: This library is currently incomplete, and does not allow access to
every argument of a policy term.
"""
__author__ = '[email protected] (Tony Watson)'
import naming
class FileOpenError(Exception):
"""Trouble opening a file."""
class Filter(object):
"""Simple filter with a name a list of terms."""
def __init__(self, filtername=''):
self.name = filtername
self.term = []
def __str__(self):
rval = []
title = 'Filter: %s' % str(self.name)
rval.append('\n%s' % title)
rval.append('-' * len(title))
for term in self.term:
rval.append(str(term))
return '\n\n'.join(rval)
class Term(object):
"""Simple term with a name a list of attributes."""
def __init__(self, termname=''):
self.name = termname
self.source = []
self.destination = []
self.sport = []
self.dport = []
self.action = []
self.option = []
self.protocol = []
def __str__(self):
rval = []
rval.append(' Term: %s' % self.name)
rval.append(' Source-address:: %s' % ' '.join(self.source))
rval.append(' Destination-address:: %s' % ' '.join(self.destination))
rval.append(' Source-port:: %s' % ' '.join(self.sport))
rval.append(' Destination-port:: %s' % ' '.join(self.dport))
rval.append(' Protocol:: %s' % ' '.join(self.protocol))
rval.append(' Option:: %s' % ' '.join(self.option))
rval.append(' Action:: %s' % ' '.join(self.action))
return '\n'.join(rval)
class Policy(object):
"""Holds basic attributes of an unexpanded policy definition file."""
def __init__(self, filename, defs_data=None):
"""Build policy object and naming definitions from provided filenames.
Args:
filename: location of a .pol file
defs_data: location of naming definitions directory, if any
"""
self.defs = naming.Naming(defs_data)
self.filter = []
try:
self.data = open(filename, 'r').readlines()
except IOError, error_info:
info = str(filename) + ' cannot be opened'
raise FileOpenError('%s\n%s' % (info, error_info))
indent = 0
in_header = False
in_term = False
filt = Filter()
term = Term()
in_string = False
for line in self.data:
words = line.strip().split()
quotes = len(line.split('"')) + 1
if quotes % 2: # are we in or out of double quotes
in_string = not in_string # flip status of quote status
if not in_string:
if '{' in words:
indent += 1
if words:
if words[0] == 'header':
in_header = True
if words[0] == 'term':
in_term = True
term = Term(words[1])
if in_header and words[0] == 'target::':
if filt.name != words[2]: # avoid empty dupe filters due to
filt = Filter(words[2]) # multiple target header lines
if in_term:
if words[0] == 'source-address::':
term.source.extend(words[1:])
if words[0] == 'destination-address::':
term.destination.extend(words[1:])
if words[0] == 'source-port::':
term.sport.extend(words[1:])
if words[0] == 'destination-port::':
term.dport.extend(words[1:])
if words[0] == 'action::':
term.action.extend(words[1:])
if words[0] == 'protocol::':
term.protocol.extend(words[1:])
if words[0] == 'option::':
term.option.extend(words[1:])
if '}' in words:
indent -= 1
if in_header:
self.filter.append(filt)
in_header = False
if in_term:
filt.term.append(term)
in_term = False
def __str__(self):
return '\n'.join(str(next) for next in self.filter)
def Matches(self, src=None, dst=None, dport=None, sport=None,
filtername=None):
"""Return list of term names that match specific attributes.
Args:
src: source ip address '12.1.1.1'
dst: destination ip address '10.1.1.1'
dport: any port/protocol combo, such as '80/tcp' or '53/udp'
sport: any port/protocol combo, such as '80/tcp' or '53/udp'
filtername: a filter name or None to search all filters
Returns:
results: list of lists, each list is index to filter & term in the policy
Example:
p=policyreader.Policy('policy_path', 'definitions_path')
p.Matches(dst='209.85.216.5', dport='25/tcp')
[[0, 26]]
print p.filter[0].term[26].name
for match in p.Matches(dst='209.85.216.5'):
print p.filter[match[0]].term[match[1]].name
"""
rval = []
results = []
filter_list = []
dport_parents = None
sport_parents = None
destination_parents = None
source_parents = None
if dport:
dport_parents = self.defs.GetServiceParents(dport)
if sport:
sport_parents = self.defs.GetServiceParents(sport)
if dst:
destination_parents = self.defs.GetIpParents(dst)
try:
destination_parents.remove('ANY')
destination_parents.remove('RESERVED')
except ValueError:
pass # ignore and continue
if src:
source_parents = self.defs.GetIpParents(src)
try:
source_parents.remove('ANY')
source_parents.remove('RESERVED')
except ValueError:
pass # ignore and continue
if not filtername:
filter_list = self.filter
else:
for idx, next in enumerate(self.filter):
if filtername == next.name:
filter_list = [self.filter[idx]]
if not filter_list:
raise 'invalid filter name: %s' % filtername
for findex, xfilter in enumerate(filter_list):
mterms = []
mterms.append(set()) # dport
mterms.append(set()) # sport
mterms.append(set()) # dst
mterms.append(set()) # src
for tindex, term in enumerate(xfilter.term):
if dport_parents:
for token in dport_parents:
if token in term.dport:
mterms[0].add(tindex)
else:
mterms[0].add(tindex)
if sport_parents:
for token in sport_parents:
if token in term.sport:
mterms[1].add(tindex)
else:
mterms[1].add(tindex)
if destination_parents:
for token in destination_parents:
if token in term.destination:
mterms[2].add(tindex)
else:
mterms[2].add(tindex)
if source_parents:
for token in source_parents:
if token in term.source:
mterms[3].add(tindex)
else:
mterms[3].add(tindex)
rval.append(list(mterms[0] & mterms[1] & mterms[2] & mterms[3]))
for findex, fresult in enumerate(rval):
for next in list(fresult):
results.append([findex, next])
return results
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
import re
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name =" +
self.switch.br_name)
class OVSBridge:
def __init__(self, br_name, root_helper):
self.br_name = br_name
self.root_helper = root_helper
self.re_id = self.re_compile_id()
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def re_compile_id(self):
external = 'external_ids\s*'
mac = 'attached-mac="(?P<vif_mac>([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"'
iface = 'iface-id="(?P<vif_id>[^"]+)"'
name = 'name\s*:\s"(?P<port_name>[^"]*)"'
port = 'ofport\s*:\s(?P<ofport>-?\d+)'
_re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }'
' \s+ %(name)s \s+ %(port)s' % {'external': external,
'mac': mac,
'iface': iface, 'name': name,
'port': port})
return re.compile(_re, re.M | re.X)
def run_vsctl(self, args):
full_args = ["ovs-vsctl", "--timeout=2"] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def reset_bridge(self):
self.run_vsctl(["--", "--if-exists", "del-br", self.br_name])
self.run_vsctl(["add-br", self.br_name])
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def _build_flow_expr_arr(self, **kwargs):
flow_expr_arr = []
is_delete_expr = kwargs.get('delete', False)
if not is_delete_expr:
prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" %
(kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1')))
flow_expr_arr.append(prefix)
elif 'priority' in kwargs:
raise Exception(_("Cannot match priority on flow deletion"))
table = ('table' in kwargs and ",table=%s" %
kwargs['table'] or '')
in_port = ('in_port' in kwargs and ",in_port=%s" %
kwargs['in_port'] or '')
dl_type = ('dl_type' in kwargs and ",dl_type=%s" %
kwargs['dl_type'] or '')
dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" %
kwargs['dl_vlan'] or '')
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
match = (table + in_port + dl_type + dl_vlan + dl_src + dl_dst +
(proto or ip) + nw_src + nw_dst + tun_id)
if match:
match = match[1:] # strip leading comma
flow_expr_arr.append(match)
return flow_expr_arr
def add_or_mod_flow_str(self, **kwargs):
if "actions" not in kwargs:
raise Exception(_("Must specify one or more actions"))
if "priority" not in kwargs:
kwargs["priority"] = "0"
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
return flow_str
def add_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
kwargs['delete'] = True
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
if "actions" in kwargs:
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_str + '\n'
else:
self.run_ofctl("del-flows", [flow_str])
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
for action, flows in self.deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=constants.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
self.set_db_attribute("Interface", port_name, "type", tunnel_type)
if tunnel_type == constants.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
self.set_db_attribute("Interface", port_name,
"options:dst_port",
vxlan_udp_port)
self.set_db_attribute("Interface", port_name, "options:remote_ip",
remote_ip)
self.set_db_attribute("Interface", port_name, "options:local_ip",
local_ip)
self.set_db_attribute("Interface", port_name, "options:in_key", "flow")
self.set_db_attribute("Interface", port_name, "options:out_key",
"flow")
return self.get_port_ofport(port_name)
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name])
self.set_db_attribute("Interface", local_name, "type", "patch")
self.set_db_attribute("Interface", local_name, "options:peer",
remote_name)
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
str = output.rstrip("\n\r")
return self.db_str_to_map(str)
return {}
def db_get_val(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name])
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids")
ofport = self.db_get_val("Interface", name, "ofport")
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids',
'list', 'Interface']
result = self.run_vsctl(args)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
if "iface-id" in external_ids and "attached-mac" in external_ids:
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
return edge_ports
def get_vif_port_by_id(self, port_id):
args = ['--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
match = self.re_id.search(result)
try:
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
return VifPort(port_name, ofport, vif_id, vif_mac, self)
except Exception as e:
LOG.info(_("Unable to parse regex results. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=2", "iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=2", "list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
return []
def get_installed_ovs_usr_version(root_helper):
args = ["ovs-vsctl", "--version"]
try:
cmd = utils.execute(args, root_helper=root_helper)
ver = re.findall("\d+\.\d+", cmd)[0]
return ver
except Exception:
LOG.exception(_("Unable to retrieve OVS userspace version."))
def get_installed_ovs_klm_version():
args = ["modinfo", "openvswitch"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
if 'version: ' in line and not 'srcversion' in line:
ver = re.findall("\d+\.\d+", line)
return ver[0]
except Exception:
LOG.exception(_("Unable to retrieve OVS kernel module version."))
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
|
|
import logging
import math
import copy
import os
import pickle
import warnings
from collections.abc import Iterable
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.logger import AstropyUserWarning
import matplotlib.pyplot as plt
import stingray.utils as utils
from .utils import assign_value_if_none, is_string, order_list_of_arrays
from .gti import get_gti_from_all_extensions, load_gtis
# Python 3
import pickle
_H5PY_INSTALLED = True
try:
import h5py
except ImportError:
_H5PY_INSTALLED = False
def rough_calibration(pis, mission):
"""Make a rough conversion betwenn PI channel and energy.
Only works for NICER, NuSTAR, and XMM.
Parameters
----------
pis: float or array of floats
PI channels in data
mission: str
Mission name
Returns
-------
energies : float or array of floats
Energy values
Examples
--------
>>> rough_calibration(0, 'nustar')
1.6
>>> # It's case-insensitive
>>> rough_calibration(1200, 'XMm')
1.2
>>> rough_calibration(10, 'asDf')
Traceback (most recent call last):
...
ValueError: Mission asdf not recognized
>>> rough_calibration(100, 'nicer')
1.0
"""
if mission.lower() == "nustar":
return pis * 0.04 + 1.6
elif mission.lower() == "xmm":
return pis * 0.001
elif mission.lower() == "nicer":
return pis * 0.01
raise ValueError(f"Mission {mission.lower()} not recognized")
def get_file_extension(fname):
"""Get the extension from the file name.
If g-zipped, add '.gz' to extension.
Examples
--------
>>> get_file_extension('ciao.tar')
'.tar'
>>> get_file_extension('ciao.tar.gz')
'.tar.gz'
>>> get_file_extension('ciao.evt.gz')
'.evt.gz'
>>> get_file_extension('ciao.a.tutti.evt.gz')
'.evt.gz'
"""
fname_root = fname.replace('.gz', '')
fname_root = os.path.splitext(fname_root)[0]
return fname.replace(fname_root, '')
def high_precision_keyword_read(hdr, keyword):
"""Read FITS header keywords, also if split in two.
In the case where the keyword is split in two, like
MJDREF = MJDREFI + MJDREFF
in some missions, this function returns the summed value. Otherwise, the
content of the single keyword
Parameters
----------
hdr : dict_like
The FITS header structure, or a dictionary
keyword : str
The key to read in the header
Returns
-------
value : long double
The value of the key, or ``None`` if something went wrong
"""
try:
value = np.longdouble(hdr[keyword])
return value
except KeyError:
pass
try:
if len(keyword) == 8:
keyword = keyword[:7]
value = np.longdouble(hdr[keyword + 'I'])
value += np.longdouble(hdr[keyword + 'F'])
return value
except KeyError:
return None
def _patch_mission_info(info, mission=None):
"""Add some information that is surely missing in xselect.mdb.
Examples
--------
>>> info = {'gti': 'STDGTI'}
>>> new_info = _patch_mission_info(info, mission=None)
>>> new_info['gti'] == info['gti']
True
>>> new_info = _patch_mission_info(info, mission="xmm")
>>> new_info['gti']
'STDGTI,GTI0'
"""
if mission is None:
return info
if mission.lower() == "xmm" and "gti" in info:
info["gti"] += ",GTI0"
return info
def read_mission_info(mission=None):
"""Search the relevant information about a mission in xselect.mdb."""
curdir = os.path.abspath(os.path.dirname(__file__))
fname = os.path.join(curdir, "datasets", "xselect.mdb")
# If HEADAS is defined, search for the most up-to-date version of the
# mission database
if os.getenv("HEADAS"):
hea_fname = os.path.join(os.getenv("HEADAS"), "bin", "xselect.mdb")
if os.path.exists(hea_fname):
fname = hea_fname
if mission is not None:
mission = mission.lower()
db = {}
with open(fname) as fobj:
for line in fobj.readlines():
line = line.strip()
if mission is not None and not line.lower().startswith(mission):
continue
if line.startswith("!") or line == "":
continue
allvals = line.split()
string = allvals[0]
value = allvals[1:]
if len(value) == 1:
value = value[0]
data = string.split(":")[:]
if mission is None:
if data[0] not in db:
db[data[0]] = {}
previous_db_step = db[data[0]]
else:
previous_db_step = db
data = data[1:]
for key in data[:-1]:
if key not in previous_db_step:
previous_db_step[key] = {}
previous_db_step = previous_db_step[key]
previous_db_step[data[-1]] = value
return _patch_mission_info(db, mission)
def _case_insensitive_search_in_list(string, list_of_strings):
"""Search for a string in a list of strings, in a case-insensitive way.
Example
-------
>>> _case_insensitive_search_in_list("a", ["A", "b"])
'A'
>>> _case_insensitive_search_in_list("a", ["c", "b"]) is None
True
"""
for s in list_of_strings:
if string.lower() == s.lower():
return s
return None
def _get_additional_data(lctable, additional_columns):
"""Get additional data from a FITS data table.
Parameters
----------
lctable: `astropy.io.fits.fitsrec.FITS_rec`
Data table
additional_columns: list of str
List of column names to retrieve from the table
Returns
-------
additional_data: dict
Dictionary associating to each additional column the content of the
table.
"""
additional_data = {}
if additional_columns is not None:
for a in additional_columns:
key = _case_insensitive_search_in_list(a, lctable._coldefs.names)
if key is not None:
additional_data[a] = np.array(lctable.field(key))
else:
warnings.warn('Column ' + a + ' not found')
additional_data[a] = np.zeros(len(lctable))
return additional_data
def get_key_from_mission_info(info, key, default, inst=None, mode=None):
"""Get the name of a header key or table column from the mission database.
Many entries in the mission database have default values that can be
altered for specific instruments or observing modes. Here, if there is a
definition for a given instrument and mode, we take that, otherwise we use
the default).
Parameters
----------
info : dict
Nested dictionary containing all the information for a given mission.
It can be nested, e.g. contain some info for a given instrument, and
for each observing mode of that instrument.
key : str
The key to read from the info dictionary
default : object
The default value. It can be of any type, depending on the expected
type for the entry.
Other parameters
----------------
inst : str
Instrument
mode : str
Observing mode
Returns
-------
retval : object
The wanted entry from the info dictionary
Examples
--------
>>> info = {'ecol': 'PI', "A": {"ecol": "BLA"}, "C": {"M1": {"ecol": "X"}}}
>>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode=None)
'BLA'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="B", mode=None)
'PI'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="A", mode="M1")
'BLA'
>>> get_key_from_mission_info(info, "ecol", "BU", inst="C", mode="M1")
'X'
>>> get_key_from_mission_info(info, "ghghg", "BU", inst="C", mode="M1")
'BU'
"""
filt_info = copy.deepcopy(info)
if inst is not None and inst in filt_info:
filt_info.update(info[inst])
filt_info.pop(inst)
if mode is not None and mode in filt_info:
filt_info.update(info[inst][mode])
filt_info.pop(mode)
if key in filt_info:
return filt_info[key]
return default
def lcurve_from_fits(
fits_file,
gtistring="GTI",
timecolumn="TIME",
ratecolumn=None,
ratehdu=1,
fracexp_limit=0.9,
outfile=None,
noclobber=False,
outdir=None,
):
"""Load a lightcurve from a fits file.
.. note ::
FITS light curve handling is still under testing.
Absolute times might be incorrect depending on the light curve format.
Parameters
----------
fits_file : str
File name of the input light curve in FITS format
Returns
-------
data : dict
Dictionary containing all information needed to create a
:class:`stingray.Lightcurve` object
Other Parameters
----------------
gtistring : str
Name of the GTI extension in the FITS file
timecolumn : str
Name of the column containing times in the FITS file
ratecolumn : str
Name of the column containing rates in the FITS file
ratehdu : str or int
Name or index of the FITS extension containing the light curve
fracexp_limit : float
Minimum exposure fraction allowed
noclobber : bool
If True, do not overwrite existing files
"""
warnings.warn(
"""WARNING! FITS light curve handling is still under testing.
Absolute times might be incorrect."""
)
# TODO:
# treat consistently TDB, UTC, TAI, etc. This requires some documentation
# reading. For now, we assume TDB
from astropy.io import fits as pf
from astropy.time import Time
import numpy as np
from stingray.gti import create_gti_from_condition
lchdulist = pf.open(fits_file)
lctable = lchdulist[ratehdu].data
# Units of header keywords
tunit = lchdulist[ratehdu].header["TIMEUNIT"]
try:
mjdref = high_precision_keyword_read(
lchdulist[ratehdu].header, "MJDREF"
)
mjdref = Time(mjdref, scale="tdb", format="mjd")
except Exception:
mjdref = None
try:
instr = lchdulist[ratehdu].header["INSTRUME"]
except Exception:
instr = "EXTERN"
# ----------------------------------------------------------------
# Trying to comply with all different formats of fits light curves.
# It's a madness...
try:
tstart = high_precision_keyword_read(
lchdulist[ratehdu].header, "TSTART"
)
tstop = high_precision_keyword_read(lchdulist[ratehdu].header, "TSTOP")
except Exception: # pragma: no cover
raise (Exception("TSTART and TSTOP need to be specified"))
# For nulccorr lcs this whould work
timezero = high_precision_keyword_read(
lchdulist[ratehdu].header, "TIMEZERO"
)
# Sometimes timezero is "from tstart", sometimes it's an absolute time.
# This tries to detect which case is this, and always consider it
# referred to tstart
timezero = assign_value_if_none(timezero, 0)
# for lcurve light curves this should instead work
if tunit == "d":
# TODO:
# Check this. For now, I assume TD (JD - 2440000.5).
# This is likely wrong
timezero = Time(2440000.5 + timezero, scale="tdb", format="jd")
tstart = Time(2440000.5 + tstart, scale="tdb", format="jd")
tstop = Time(2440000.5 + tstop, scale="tdb", format="jd")
# if None, use NuSTAR defaulf MJDREF
mjdref = assign_value_if_none(
mjdref,
Time(
np.longdouble("55197.00076601852"), scale="tdb", format="mjd"
),
)
timezero = (timezero - mjdref).to("s").value
tstart = (tstart - mjdref).to("s").value
tstop = (tstop - mjdref).to("s").value
if timezero > tstart:
timezero -= tstart
time = np.array(lctable.field(timecolumn), dtype=np.longdouble)
if time[-1] < tstart:
time += timezero + tstart
else:
time += timezero
try:
dt = high_precision_keyword_read(lchdulist[ratehdu].header, "TIMEDEL")
if tunit == "d":
dt *= 86400
except Exception:
warnings.warn(
"Assuming that TIMEDEL is the median difference between the"
" light curve times",
AstropyUserWarning,
)
# Avoid NaNs
good = time == time
dt = np.median(np.diff(time[good]))
# ----------------------------------------------------------------
if ratecolumn is None:
for name in ["RATE", "RATE1", "COUNTS"]:
if name in lctable.names:
ratecolumn = name
break
else: # pragma: no cover
raise ValueError(
"None of the accepted rate columns were found in the file")
rate = np.array(lctable.field(ratecolumn), dtype=float)
errorcolumn = "ERROR"
if ratecolumn == "RATE1":
errorcolumn = "ERROR1"
try:
rate_e = np.array(lctable.field(errorcolumn), dtype=np.longdouble)
except Exception:
rate_e = np.zeros_like(rate)
if "RATE" in ratecolumn:
rate *= dt
rate_e *= dt
try:
fracexp = np.array(lctable.field("FRACEXP"), dtype=np.longdouble)
except Exception:
fracexp = np.ones_like(rate)
good_intervals = (
(rate == rate) * (fracexp >= fracexp_limit) * (fracexp <= 1)
)
rate[good_intervals] /= fracexp[good_intervals]
rate_e[good_intervals] /= fracexp[good_intervals]
rate[~good_intervals] = 0
try:
gtitable = lchdulist[gtistring].data
gti_list = np.array(
[
[a, b]
for a, b in zip(
gtitable.field("START"), gtitable.field("STOP")
)
],
dtype=np.longdouble,
)
except Exception:
gti_list = create_gti_from_condition(time, good_intervals)
lchdulist.close()
res = {"time": time,
"counts": rate,
"err": rate_e,
"gti": gti_list,
"mjdref": mjdref.mjd,
"dt": dt,
"instr": instr,
"header": lchdulist[ratehdu].header.tostring()}
return res
def load_events_and_gtis(
fits_file,
additional_columns=None,
gtistring=None,
gti_file=None,
hduname=None,
column=None,
):
"""Load event lists and GTIs from one or more files.
Loads event list from HDU EVENTS of file fits_file, with Good Time
intervals. Optionally, returns additional columns of data from the same
HDU of the events.
Parameters
----------
fits_file : str
Other parameters
----------------
additional_columns: list of str, optional
A list of keys corresponding to the additional columns to extract from
the event HDU (ex.: ['PI', 'X'])
gtistring : str
Comma-separated list of accepted GTI extensions (default GTI,STDGTI),
with or without appended integer number denoting the detector
gti_file : str, default None
External GTI file
hduname : str or int, default 1
Name of the HDU containing the event list
column : str, default None
The column containing the time values. If None, we use the name
specified in the mission database, and if there is nothing there,
"TIME"
return_limits: bool, optional
Return the TSTART and TSTOP keyword values
Returns
-------
retvals : Object with the following attributes:
ev_list : array-like
Event times in Mission Epoch Time
gti_list: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
GTIs in Mission Epoch Time
additional_data: dict
A dictionary, where each key is the one specified in additional_colums.
The data are an array with the values of the specified column in the
fits file.
t_start : float
Start time in Mission Epoch Time
t_stop : float
Stop time in Mission Epoch Time
pi_list : array-like
Raw Instrument energy channels
cal_pi_list : array-like
Calibrated PI channels (those that can be easily converted to energy
values, regardless of the instrument setup.)
energy_list : array-like
Energy of each photon in keV (only for NuSTAR, NICER, XMM)
instr : str
Name of the instrument (e.g. EPIC-pn or FPMA)
mission : str
Name of the instrument (e.g. XMM or NuSTAR)
mjdref : float
MJD reference time for the mission
header : str
Full header of the FITS file, for debugging purposes
detector_id : array-like, int
Detector id for each photon (e.g. each of the CCDs composing XMM's or
Chandra's instruments)
"""
from astropy.io import fits as pf
hdulist = pf.open(fits_file)
probe_header = hdulist[0].header
# Let's look for TELESCOP here. This is the most common keyword to be
# found in well-behaved headers. If it is not in header 0, I take this key
# and the remaining information from header 1.
if "TELESCOP" not in probe_header:
probe_header = hdulist[1].header
mission_key = "MISSION"
if mission_key not in probe_header:
mission_key = "TELESCOP"
mission = probe_header[mission_key].lower()
db = read_mission_info(mission)
instkey = get_key_from_mission_info(db, "instkey", "INSTRUME")
instr = mode = None
if instkey in probe_header:
instr = probe_header[instkey].strip()
modekey = get_key_from_mission_info(db, "dmodekey", None, instr)
if modekey is not None and modekey in probe_header:
mode = probe_header[modekey].strip()
gtistring = get_key_from_mission_info(db, "gti", "GTI,STDGTI", instr, mode)
if hduname is None:
hduname = get_key_from_mission_info(db, "events", "EVENTS", instr, mode)
if hduname not in hdulist:
warnings.warn(f'HDU {hduname} not found. Trying first extension')
hduname = 1
datatable = hdulist[hduname].data
header = hdulist[hduname].header
ephem = timeref = timesys = None
if "PLEPHEM" in header:
ephem = header["PLEPHEM"].strip().lstrip('JPL-').lower()
if "TIMEREF" in header:
timeref = header["TIMEREF"].strip().lower()
if "TIMESYS" in header:
timesys = header["TIMESYS"].strip().lower()
if column is None:
column = get_key_from_mission_info(db, "time", "TIME", instr, mode)
ev_list = np.array(datatable.field(column), dtype=np.longdouble)
detector_id = None
ckey = get_key_from_mission_info(db, "ccol", "NONE", instr, mode)
if ckey != "NONE" and ckey in datatable.columns.names:
detector_id = datatable.field(ckey)
det_number = None if detector_id is None else list(set(detector_id))
timezero = np.longdouble(0.)
if "TIMEZERO" in header:
timezero = np.longdouble(header["TIMEZERO"])
ev_list += timezero
t_start = ev_list[0]
t_stop = ev_list[-1]
if "TSTART" in header:
t_start = np.longdouble(header["TSTART"])
if "TSTOP" in header:
t_stop = np.longdouble(header["TSTOP"])
mjdref = np.longdouble(high_precision_keyword_read(header, "MJDREF"))
# Read and handle GTI extension
accepted_gtistrings = gtistring.split(",")
if gti_file is None:
# Select first GTI with accepted name
try:
gti_list = get_gti_from_all_extensions(
hdulist,
accepted_gtistrings=accepted_gtistrings,
det_numbers=det_number,
)
except Exception: # pragma: no cover
warnings.warn(
"No extensions found with a valid name. "
"Please check the `accepted_gtistrings` values.",
AstropyUserWarning,
)
gti_list = np.array([[t_start, t_stop]], dtype=np.longdouble)
else:
gti_list = load_gtis(gti_file, gtistring)
pi_col = get_key_from_mission_info(db, "ecol", "PI", instr, mode)
if additional_columns is None:
additional_columns = [pi_col]
if pi_col not in additional_columns:
additional_columns.append(pi_col)
# If data were already calibrated, use this!
if "energy" not in additional_columns:
additional_columns.append("energy")
additional_data = _get_additional_data(datatable, additional_columns)
hdulist.close()
# Sort event list
order = np.argsort(ev_list)
ev_list = ev_list[order]
if detector_id is not None:
detector_id = detector_id[order]
additional_data = order_list_of_arrays(additional_data, order)
pi = additional_data[pi_col].astype(np.float32)
cal_pi = pi
# EventReadOutput() is an empty class. We will assign a number of attributes to
# it, like the arrival times of photons, the energies, and some information
# from the header.
returns = EventReadOutput()
returns.ev_list = ev_list
returns.gti_list = gti_list
returns.pi_list = pi
returns.cal_pi_list = cal_pi
if "energy" in additional_data and np.any(additional_data["energy"] > 0.):
returns.energy_list = additional_data["energy"]
else:
try:
returns.energy_list = rough_calibration(cal_pi, mission)
except ValueError:
returns.energy_list = None
returns.instr = instr.lower()
returns.mission = mission.lower()
returns.mjdref = mjdref
returns.header = header.tostring()
returns.additional_data = additional_data
returns.t_start = t_start
returns.t_stop = t_stop
returns.detector_id = detector_id
returns.ephem = ephem
returns.timeref = timeref
returns.timesys = timesys
return returns
class EventReadOutput():
def __init__(self):
pass
def mkdir_p(path): # pragma: no cover
"""Safe ``mkdir`` function, found at [so-mkdir]_.
Parameters
----------
path : str
The absolute path to the directory to be created
Notes
-----
.. [so-mkdir] http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
import os
os.makedirs(path, exist_ok=True)
def read_header_key(fits_file, key, hdu=1):
"""Read the header key key from HDU hdu of the file ``fits_file``.
Parameters
----------
fits_file: str
The file name and absolute path to the event file.
key: str
The keyword to be read
Other Parameters
----------------
hdu : int
Index of the HDU extension from which the header key to be read.
Returns
-------
value : object
The value stored under ``key`` in ``fits_file``
"""
hdulist = fits.open(fits_file, ignore_missing_end=True)
try:
value = hdulist[hdu].header[key]
except KeyError: # pragma: no cover
value = ''
hdulist.close()
return value
def ref_mjd(fits_file, hdu=1):
"""Read ``MJDREFF``, ``MJDREFI`` or, if failed, ``MJDREF``, from the FITS header.
Parameters
----------
fits_file : str
The file name and absolute path to the event file.
Other Parameters
----------------
hdu : int
Index of the HDU extension from which the header key to be read.
Returns
-------
mjdref : numpy.longdouble
the reference MJD
"""
if isinstance(fits_file, Iterable) and\
not is_string(fits_file): # pragma: no cover
fits_file = fits_file[0]
logging.info("opening %s" % fits_file)
hdulist = fits.open(fits_file, ignore_missing_end=True)
ref_mjd_val = high_precision_keyword_read(hdulist[hdu].header, "MJDREF")
hdulist.close()
return ref_mjd_val
def common_name(str1, str2, default='common'):
"""Strip two strings of the letters not in common.
Filenames must be of same length and only differ by a few letters.
Parameters
----------
str1 : str
str2 : str
Other Parameters
----------------
default : str
The string to return if ``common_str`` is empty
Returns
-------
common_str : str
A string containing the parts of the two names in common
"""
if not len(str1) == len(str2):
return default
common_str = ''
# Extract the MP root of the name (in case they're event files)
for i, letter in enumerate(str1):
if str2[i] == letter:
common_str += letter
# Remove leading and trailing underscores and dashes
common_str = common_str.rstrip('_').rstrip('-')
common_str = common_str.lstrip('_').lstrip('-')
if common_str == '':
common_str = default
logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str))
return common_str
def split_numbers(number, shift=0):
"""
Split high precision number(s) into doubles.
You can specify the number of shifts to move the decimal point.
Parameters
----------
number: long double
The input high precision number which is to be split
Other parameters
----------------
shift: integer
Move the cut by `shift` decimal points to the right (left if negative)
Returns
-------
number_I: double
First part of high precision number
number_F: double
Second part of high precision number
Examples
--------
>>> n = 12.34
>>> i, f = split_numbers(n)
>>> i == 12
True
>>> np.isclose(f, 0.34)
True
>>> split_numbers(n, 2)
(12.34, 0.0)
>>> split_numbers(n, -1)
(10.0, 2.34)
"""
if isinstance(number, Iterable):
number = np.asarray(number)
number *= 10**shift
mods = [math.modf(n) for n in number]
number_F = [f for f, _ in mods]
number_I = [i for _, i in mods]
else:
number *= 10**shift
number_F, number_I = math.modf(number)
return np.double(number_I) / 10**shift, np.double(number_F) / 10**shift
def _save_pickle_object(object, filename):
"""
Save a class object in pickle format.
Parameters
----------
object: class instance
A class object whose attributes are saved in a
dictionary format
filename: str
Name of the file in which object is saved
"""
with open(filename, "wb") as f:
pickle.dump(object, f)
def _retrieve_pickle_object(filename):
"""
Retrieves a pickled class object.
Parameters
----------
filename: str
Name of the file in which object is saved
Returns
-------
data: class object
"""
with open(filename, "rb") as f:
return pickle.load(f)
def _save_hdf5_object(object, filename):
"""
Save a class object in hdf5 format.
Parameters
----------
object: class instance
A class object whose attributes are saved in a
dictionary format
filename: str
Name of the file in which object is saved
"""
items = vars(object)
attrs = [name for name in items if items[name] is not None]
with h5py.File(filename, 'w') as hf:
for attr in attrs:
data = items[attr]
# If data is a single number, store as an attribute.
if _isattribute(data):
if isinstance(data, np.longdouble):
data_I, data_F = split_numbers(data)
names = [attr + '_I', attr + '_F']
hf.attrs[names[0]] = data_I
hf.attrs[names[1]] = data_F
else:
hf.attrs[attr] = data
# If data is an array or list, create a dataset.
else:
try:
if isinstance(data[0], np.longdouble):
data_I, data_F = split_numbers(data)
names = [attr + '_I', attr + '_F']
hf.create_dataset(names[0], data=data_I)
hf.create_dataset(names[1], data=data_F)
else:
hf.create_dataset(attr, data=data)
except IndexError:
# To account for numpy arrays of type 'None' (0-d)
pass
def _retrieve_hdf5_object(filename):
"""
Retrieves an hdf5 format class object.
Parameters
----------
filename: str
The name of file with which object was saved
Returns
-------
data: dictionary
Loads the data from an hdf5 object file and returns
in dictionary format.
"""
with h5py.File(filename, 'r') as hf:
dset_keys = hf.keys()
attr_keys = hf.attrs.keys()
data = {}
dset_copy = list(dset_keys)[:]
for key in dset_keys:
# Make sure key hasn't been removed
if key in dset_copy:
# Longdouble case
if key[-2:] in ['_I', '_F']:
m_key = key[:-2]
# Add integer and float parts
data[m_key] = np.longdouble(hf[m_key + '_I'][()])
data[m_key] += np.longdouble(hf[m_key + '_F'][()])
# Remove integer and float parts from attributes
dset_copy.remove(m_key + '_I')
dset_copy.remove(m_key + '_F')
else:
data[key] = hf[key][()]
attr_copy = list(attr_keys)[:]
for key in attr_keys:
# Make sure key hasn't been removed
if key in attr_copy:
# Longdouble case
if key[-2:] in ['_I', '_F']:
m_key = key[:-2]
# Add integer and float parts
data[m_key] = np.longdouble(hf.attrs[m_key + '_I'])
data[m_key] += np.longdouble(hf.attrs[m_key + '_F'])
# Remove integer and float parts from attributes
attr_copy.remove(m_key + '_I')
attr_copy.remove(m_key + '_F')
else:
data[key] = hf.attrs[key]
return data
def _save_ascii_object(object, filename, fmt="%.18e", **kwargs):
"""
Save an array to a text file.
Parameters
----------
object : numpy.ndarray
An array with the data to be saved
filename : str
The file name to save to
fmt : str or sequence of strs, optional
Use for formatting of columns. See `numpy.savetxt` documentation
for details.
Other Parameters
----------------
kwargs : any keyword argument taken by `numpy.savetxt`
"""
try:
np.savetxt(filename, object, fmt=fmt, **kwargs)
except TypeError:
raise Exception("Formatting of columns not recognized! Use 'fmt' "
"to format columns including strings or mixed types!")
pass
def _retrieve_ascii_object(filename, **kwargs):
"""
Helper function to retrieve ascii objects from file.
Uses astropy.Table for reading and storing the data.
Parameters
----------
filename : str
The name of the file with the data to be retrieved.
Other Parameters
-----------------------------
usecols : {int | iterable}
The indices of the columns in the file to be returned.
By default, all columns will be returned
skiprows : int
The number of rows at the beginning to skip
By default, no rows will be skipped.
names : iterable
A list of column names to be attached to the columns.
By default, no column names are added, unless they are specified
in the file header and can be read by astropy.Table.read
automatically.
Returns
-------
data : astropy.Table object
An astropy.Table object with the data from the file
"""
if not isinstance(filename, str):
raise TypeError("filename must be string!")
if 'usecols' in list(kwargs.keys()):
if np.size(kwargs['usecols']) != 2:
raise ValueError("Need to define two columns")
usecols = kwargs["usecols"]
else:
usecols = None
if 'skiprows' in list(kwargs.keys()):
assert isinstance(kwargs["skiprows"], int)
skiprows = kwargs["skiprows"]
else:
skiprows = 0
if "names" in list(kwargs.keys()):
names = kwargs["names"]
else:
names = None
data = Table.read(filename, data_start=skiprows,
names=names, format="ascii")
if usecols is None:
return data
else:
colnames = np.array(data.colnames)
cols = colnames[usecols]
return data[cols]
def _save_fits_object(object, filename, **kwargs):
"""
Save a class object in fits format.
Parameters
----------
object: class instance
A class object whose attributes would be saved in a dictionary format.
filename: str
The file name to save to
Additional Keyword Parameters
-----------------------------
tnames: str iterable
The names of HDU tables. For instance, in case of eventlist,
tnames could be ['EVENTS', 'GTI']
colsassign: dictionary iterable
This indicates the correct tables to which to assign columns
to. If this is None or if a column is not provided, it/they will
be assigned to the first table.
For example, [{'gti':'GTI'}] indicates that gti values should be
stored in GTI table.
"""
tables = []
if 'colsassign' in list(kwargs.keys()):
colsassign = kwargs['colsassign']
iscolsassigned = True
else:
iscolsassigned = False
if 'tnames' in list(kwargs.keys()):
tables = kwargs['tnames']
else:
tables = ['MAIN']
items = vars(object)
attrs = [name for name in items if items[name] is not None]
cols = []
hdrs = []
for t in tables:
cols.append([])
hdrs.append(fits.Header())
for attr in attrs:
data = items[attr]
# Get the index of table to which column belongs
if iscolsassigned and attr in colsassign.keys():
index = tables.index(colsassign[attr])
else:
index = 0
# If data is a single number, store as metadata
if _isattribute(data):
if isinstance(data, np.longdouble):
# Longdouble case. Split and save integer and float parts
data_I, data_F = split_numbers(data)
names = [attr + '_I', attr + '_F']
hdrs[index][names[0]] = data_I
hdrs[index][names[1]] = data_F
else:
# Normal case. Save as it is
hdrs[index][attr] = data
# If data is an array or list, insert as table column
else:
try:
if isinstance(data[0], np.longdouble):
# Longdouble case. Split and save integer and float parts
data_I, data_F = split_numbers(data)
names = [attr + '_I', attr + '_F']
cols[index].append(
fits.Column(name=names[0],
format='D',
array=data_I))
cols[index].append(
fits.Column(name=names[1],
format='D',
array=data_F))
else:
# Normal case. Save as it is
cols[index].append(
fits.Column(name=attr,
format=_lookup_format(data[0]),
array=data))
except IndexError:
# To account for numpy arrays of type 'None' (0-d)
pass
tbhdu = fits.HDUList()
# Create binary tables
for i in range(0, len(tables)):
if len(cols[i]) > 0:
tbhdu.append(fits.BinTableHDU.from_columns(cols[i],
header=hdrs[i],
name=tables[i]))
tbhdu.writeto(filename)
def _retrieve_fits_object(filename, **kwargs):
"""
Retrieves a fits format class object.
Parameters
----------
filename: str
The name of file with which object was saved
Other Parameters
----------------
cols: str iterable
The names of columns to extract from fits tables.
Returns
-------
data: dictionary
Loads the data from a fits object file and returns
in dictionary format.
"""
data = {}
if 'cols' in list(kwargs.keys()):
cols = [col.upper() for col in kwargs['cols']]
else:
cols = []
with fits.open(filename, memmap=False, ignore_missing_end=True) as hdulist:
fits_cols = []
# Get columns from all tables
for i in range(1, len(hdulist)):
fits_cols.append([h.upper() for h in hdulist[i].data.names])
for c in cols:
for i in range(0, len(fits_cols)):
# .upper() is used because `fits` stores values in upper case
hdr_keys = [h.upper() for h in hdulist[i + 1].header.keys()]
# Longdouble case. Check for columns
if c + '_I' in fits_cols[i] or c + '_F' in fits_cols[i]:
if c not in data.keys():
data[c] = np.longdouble(hdulist[i + 1].data[c + '_I'])
data[c] += np.longdouble(hdulist[i + 1].data[c + '_F'])
# Longdouble case. Check for header keys
if c + '_I' in hdr_keys or c + '_F' in hdr_keys:
if c not in data.keys():
data[c] = \
np.longdouble(hdulist[i + 1].header[c + '_I'])
data[c] += \
np.longdouble(hdulist[i + 1].header[c + '_F'])
# Normal case. Check for columns
elif c in fits_cols[i]:
data[c] = hdulist[i + 1].data[c]
# Normal case. Check for header keys
elif c in hdr_keys:
data[c] = hdulist[i + 1].header[c]
hdulist.close()
return data
def _lookup_format(var):
"""
Looks up relevant format in fits.
Parameters
----------
var : object
An object to look up in the table
Returns
-------
lookup : str
The str describing the type of ``var``
"""
lookup = {"<type 'int'>": "J", "<type 'float'>": "E",
"<type 'numpy.int64'>": "K", "<type 'numpy.float64'>": "D",
"<type 'numpy.float128'>": "D", "<type 'str'>": "30A",
"<type 'bool'": "L"}
form = type(var)
try:
return lookup[str(form)]
except KeyError:
# If an entry is not contained in lookup dictionary
return "D"
def _isattribute(data):
"""
Check if data is a single number or an array.
Parameters
----------
data : object
The object to be checked.
Returns:
bool
True if the data is a single number, False if it is an iterable.
"""
if isinstance(data, Iterable) and not isinstance(data, (str, bytes)):
return False
else:
return True
def write(input_, filename, format_='pickle', **kwargs):
"""
Pickle a class instance. For parameters depending on
``format_``, see individual function definitions.
Parameters
----------
object: a class instance
The object to be stored
filename: str
The name of the file to be created
format_: str
The format in which to store file. Formats supported
are ``pickle``, ``hdf5``, ``ascii`` or ``fits``
"""
if format_ == 'pickle':
_save_pickle_object(input_, filename)
elif format_ == 'hdf5':
if _H5PY_INSTALLED:
_save_hdf5_object(input_, filename)
else:
utils.simon('h5py not installed, using pickle instead'
'to save object.')
_save_pickle_object(input_, filename.split('.')[0] +
'.pickle')
elif format_ == 'ascii':
_save_ascii_object(input_, filename, **kwargs)
elif format_ == 'fits':
_save_fits_object(input_, filename, **kwargs)
else:
utils.simon('Format not understood.')
def read(filename, format_='pickle', **kwargs):
"""
Return a saved class instance.
Parameters
----------
filename: str
The name of the file to be retrieved.
format_: str
The format used to store file. Supported formats are
pickle, hdf5, ascii or fits.
Returns
-------
data : {``object`` | ``astropy.table`` | ``dict``}
* If ``format_`` is ``pickle``, an object is returned.
* If ``format_`` is ``ascii``, `astropy.table` object is returned.
* If ``format_`` is ``hdf5`` or 'fits``, a dictionary object is returned.
"""
if format_ == 'pickle':
return _retrieve_pickle_object(filename)
elif format_ == 'hdf5':
if _H5PY_INSTALLED:
return _retrieve_hdf5_object(filename)
else:
utils.simon('h5py not installed, cannot read an'
'hdf5 object.')
elif format_ == 'ascii':
return _retrieve_ascii_object(filename, **kwargs)
elif format_ == 'fits':
return _retrieve_fits_object(filename, **kwargs)
else:
utils.simon('Format not understood.')
def savefig(filename, **kwargs):
"""
Save a figure plotted by ``matplotlib``.
Note : This function is supposed to be used after the ``plot``
function. Otherwise it will save a blank image with no plot.
Parameters
----------
filename : str
The name of the image file. Extension must be specified in the
file name. For example filename with `.png` extension will give a
rasterized image while ``.pdf`` extension will give a vectorized
output.
kwargs : keyword arguments
Keyword arguments to be passed to ``savefig`` function of
``matplotlib.pyplot``. For example use `bbox_inches='tight'` to
remove the undesirable whitepace around the image.
"""
if not plt.fignum_exists(1):
utils.simon("use ``plot`` function to plot the image first and "
"then use ``savefig`` to save the figure.")
plt.savefig(filename, **kwargs)
|
|
import os
import sys
import subprocess
import datetime
import numpy as np
from astropy.io import ascii
from astropy.table import Table, Column, vstack
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord, ICRS
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from scipy.stats import norm, f
from scipy.odr import *
from scipy.optimize import minimize
from scipy.ndimage.filters import median_filter, gaussian_filter1d
from photutils import detect_sources, Background
# For debugging
import matplotlib.pyplot as plt
import pdb
# Add the AstroImage class
sys.path.append("C:\\Users\\Jordan\\Libraries\\python\\AstroImage")
from AstroImage import AstroImage
import image_tools
# This script will read in the background level estimated for each on-target
# image in the previous step. The background level in dimmest parts of the
# on-target image will be directly computed, and the residual between the direct
# estimate and the interpolation will be stored. The distribution of these
# residual will be used to estimate which interpolated background levels can be
# trusted.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced'
# Build the path to the S3_Asotremtry files
S3dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_data'
# This is the directory where the 2MASS tiles of the targets have been saved
# Go to "http://hachi.ipac.caltech.edu/" to download 2MASS tiles
TMASSdir = "C:\\Users\\Jordan\\Libraries\\python\\Mimir_pyPol\\2MASSimages"
# Setup new directory for background subtracted data
bkgSubDir = os.path.join(pyPol_data, 'bkgSubtracted')
if (not os.path.isdir(bkgSubDir)):
os.mkdir(bkgSubDir, 0o755)
# Read in Kokopelli mask generated in previous step
kokopelliMask = (AstroImage('kokopelliMask.fits').arr != 0)
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
# Grab the file basenames for later use
fileIndexFileNames = np.array([os.path.basename(file1)
for file1 in fileIndex['Filename'].data])
# Modify the fileIndex to include rejections by residual value
if 'Background Cut' not in fileIndex.keys():
fileIndex.add_column(Column(name='Background Cut',
data = np.repeat(0, len(fileIndex))))
# Determine which parts of the fileIndex pertain to science images
useFiles = np.where(np.logical_and(fileIndex['Use'].data == 1,
fileIndex['Background'].data >= 0))
skipFiles = np.where(np.logical_or(fileIndex['Use'].data == 0,
fileIndex['Background'].data < 0))
# Cull the file index to only include files selected for use
fileIndex1 = fileIndex[useFiles]
fileIndex2 = fileIndex[skipFiles]
# Group files by target and waveband
groupFileIndex = fileIndex1.group_by(['PPOL Name'])
allFileList = []
allResidualList = []
# Loop through all the usable images and comute their residuals
for group in groupFileIndex.groups:
# Grab the current target information
thisTarget = str(np.unique(group['Target'].data)[0])
thisWaveband = str(np.unique(group['Waveband'].data)[0])
thisPPOLname = str(np.unique(group['PPOL Name'].data)[0])
# if thisPPOLname != 'NGC2023_H3': continue
print('\nProcessing images for')
print('\tPPOL Group : {0}'.format(thisPPOLname))
print('')
# Read in the 2MASS image
TMASSfile = os.path.join(TMASSdir, '_'.join([thisTarget, thisWaveband]) + '.fits')
TMASSimg = AstroImage(TMASSfile)
TMASSwcs = WCS(TMASSimg.header)
# Estimate the "nebula free" level
mean, median, stddev = sigma_clipped_stats(TMASSimg.arr.flatten())
bkgThresh = median - 0.5*stddev
# Find the "nebula free" pixels
bkgRegion = TMASSimg.arr < bkgThresh
neighborCount = np.zeros_like(bkgRegion, dtype=int)
for dx in range(-1,2):
for dy in range(-1,2):
neighborCount += np.roll(np.roll(bkgRegion, dy, axis = 0), dx, axis = 1)
# Find pixels with at least 3 neighbors (other than self)
bkgRegion = neighborCount > 4
groupFileList = []
groupResidualList = []
for file1, interpBkg in zip(group['Filename'].data, group['Background'].data):
# Read in this image.
img = AstroImage(file1)
# See which pixels in this image map to background pixels
ny, nx = img.arr.shape
yy, xx = np.mgrid[0:ny, 0:nx]
wcs = WCS(img.header)
RAs, Decs = wcs.wcs_pix2world(xx, yy, 0)
Tx, Ty = TMASSwcs.wcs_world2pix(RAs, Decs, 0)
Tx, Ty = (Tx.round()).astype(int), (Ty.round()).astype(int)
# Grab the value of the TMASS background mask for each pixel
MimirBkgRegion = bkgRegion[Ty, Tx]
# Get the indices of the background pixel
bkgInds = np.where(MimirBkgRegion)
bkgVals = img.arr[bkgInds]
# Compute the direct estimate of background level
mean, median, stddev = sigma_clipped_stats(bkgVals)
# Compute the residual level and store it in the list
thisResidual = mean - interpBkg
groupFileList.append(os.path.basename(file1))
groupResidualList.append(thisResidual)
# Place this residual list in the final total residual list
allFileList.extend(groupFileList)
allResidualList.extend(groupResidualList)
# Convert the lists to arrays
groupFileList = np.array(groupFileList)
groupResidualList = np.array(groupResidualList)
# Check for outliers and mark residuals 5-sigma outside this group's median
mean, median, stddev = sigma_clipped_stats(groupResidualList)
residMin, residMax = mean - 5*stddev, mean + 5*stddev
badInds = np.where(np.logical_or(groupResidualList < residMin,
groupResidualList > residMax))
# If some of these residuals are more than 5-sigma from the group mean, then
# mark them as bad background levels in the file index.
if len(badInds[0]) > 0:
# Select the file names of the bad backgrounds
badFiles = groupFileList[badInds]
# Grab the indices of these files in the fileIndex and mark them as bad
fileIndexInds = np.array([np.where(fileIndexFileNames == file1)[0][0]
for file1 in badFiles])
fileIndex['Background Cut'][fileIndexInds] = 1
# Convert the lists to arrays
allFileList = np.array(allFileList)
allResidualList = np.array(allResidualList)
# Now that we have the residuals for each group, plot them up as histograms
# # Start by parsing out the residuals for each group
# Now create a plot with all groups clumpped together
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.hist(allResidualList, 10, normed=1, histtype='stepfilled', stacked=True)
plt.xlabel('Residual Counts')
plt.ylabel('Fraction of Fields')
# Prepare some statistical comments
xmin, xmax = ax2.get_xlim()
ymin, ymax = ax2.get_ylim()
mean, median, stddev = sigma_clipped_stats(allResidualList)
# Mark the mean
ax2.axvline(mean, color='k', linewidth=2.0)
ax2.text(mean+0.02*(xmax-xmin), 0.95*ymax, 'mean', rotation='vertical')
# Mark the median
ax2.axvline(median, color='k', linewidth=2.0)
ax2.text(median-0.04*(xmax-xmin), 0.95*ymax, 'median', rotation='vertical')
# Mark the 3-sigma upper and lower limits
ax2.axvline(median - 5*stddev, color='k', linewidth=2.0)
ax2.axvline(median + 5*stddev, color='k', linewidth=2.0)
# Prepare the limits of the acceptable residual range
residMin, residMax = mean - 5*stddev, mean + 5*stddev
# Find any background levels that are outside the 5-sigma limits
badInds = np.where(np.logical_or(allResidualList < residMin,
allResidualList > residMax))
# If some of these residuals are more than 5-sigma from the group mean, then
# mark them as bad background levels in the file index.
if len(badInds[0]) > 0:
# Select the file names of the bad backgrounds
badFiles = allFileList[badInds]
# Grab the indices of these files in the fileIndex and mark them as bad
fileIndexInds = np.array([np.where(fileIndexFileNames == file1)[0][0]
for file1 in badFiles])
fileIndex['Background Cut'][fileIndexInds] = 1
# Then save to disk
print('*************************************')
print('Writing all background levels to disk')
print('*************************************')
pdb.set_trace()
fileIndex.write(indexFile, format='csv')
print('Done!')
|
|
"""Support for Denon Network Receivers."""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Music station"
SUPPORT_DENON = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
SUPPORT_MEDIA_MODES = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
NORMAL_INPUTS = {
"Cd": "CD",
"Dvd": "DVD",
"Blue ray": "BD",
"TV": "TV",
"Satellite / Cable": "SAT/CBL",
"Game": "GAME",
"Game2": "GAME2",
"Video Aux": "V.AUX",
"Dock": "DOCK",
}
MEDIA_MODES = {
"Tuner": "TUNER",
"Media server": "SERVER",
"Ipod dock": "IPOD",
"Net/USB": "NET/USB",
"Rapsody": "RHAPSODY",
"Napster": "NAPSTER",
"Pandora": "PANDORA",
"LastFM": "LASTFM",
"Flickr": "FLICKR",
"Favorites": "FAVORITES",
"Internet Radio": "IRADIO",
"USB/IPOD": "USB/IPOD",
}
# Sub-modes of 'NET/USB'
# {'USB': 'USB', 'iPod Direct': 'IPD', 'Internet Radio': 'IRP',
# 'Favorites': 'FVP'}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Denon platform."""
denon = DenonDevice(config.get(CONF_NAME), config.get(CONF_HOST))
if denon.update():
add_entities([denon])
class DenonDevice(MediaPlayerDevice):
"""Representation of a Denon device."""
def __init__(self, name, host):
"""Initialize the Denon device."""
self._name = name
self._host = host
self._pwstate = "PWSTANDBY"
self._volume = 0
# Initial value 60dB, changed if we get a MVMAX
self._volume_max = 60
self._source_list = NORMAL_INPUTS.copy()
self._source_list.update(MEDIA_MODES)
self._muted = False
self._mediasource = ""
self._mediainfo = ""
self._should_setup_sources = True
def _setup_sources(self, telnet):
# NSFRN - Network name
nsfrn = self.telnet_request(telnet, "NSFRN ?")[len("NSFRN ") :]
if nsfrn:
self._name = nsfrn
# SSFUN - Configured sources with names
self._source_list = {}
for line in self.telnet_request(telnet, "SSFUN ?", all_lines=True):
source, configured_name = line[len("SSFUN") :].split(" ", 1)
self._source_list[configured_name] = source
# SSSOD - Deleted sources
for line in self.telnet_request(telnet, "SSSOD ?", all_lines=True):
source, status = line[len("SSSOD") :].split(" ", 1)
if status == "DEL":
for pretty_name, name in self._source_list.items():
if source == name:
del self._source_list[pretty_name]
break
@classmethod
def telnet_request(cls, telnet, command, all_lines=False):
"""Execute `command` and return the response."""
_LOGGER.debug("Sending: %s", command)
telnet.write(command.encode("ASCII") + b"\r")
lines = []
while True:
line = telnet.read_until(b"\r", timeout=0.2)
if not line:
break
lines.append(line.decode("ASCII").strip())
_LOGGER.debug("Received: %s", line)
if all_lines:
return lines
return lines[0] if lines else ""
def telnet_command(self, command):
"""Establish a telnet connection and sends `command`."""
telnet = telnetlib.Telnet(self._host)
_LOGGER.debug("Sending: %s", command)
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host)
except OSError:
return False
if self._should_setup_sources:
self._setup_sources(telnet)
self._should_setup_sources = False
self._pwstate = self.telnet_request(telnet, "PW?")
for line in self.telnet_request(telnet, "MV?", all_lines=True):
if line.startswith("MVMAX "):
# only grab two digit max, don't care about any half digit
self._volume_max = int(line[len("MVMAX ") : len("MVMAX XX")])
continue
if line.startswith("MV"):
self._volume = int(line[len("MV") :])
self._muted = self.telnet_request(telnet, "MU?") == "MUON"
self._mediasource = self.telnet_request(telnet, "SI?")[len("SI") :]
if self._mediasource in MEDIA_MODES.values():
self._mediainfo = ""
answer_codes = [
"NSE0",
"NSE1X",
"NSE2X",
"NSE3X",
"NSE4",
"NSE5",
"NSE6",
"NSE7",
"NSE8",
]
for line in self.telnet_request(telnet, "NSE", all_lines=True):
self._mediainfo += line[len(answer_codes.pop(0)) :] + "\n"
else:
self._mediainfo = self.source
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == "PWSTANDBY":
return STATE_OFF
if self._pwstate == "PWON":
return STATE_ON
return None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / self._volume_max
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""Return the list of available input sources."""
return sorted(list(self._source_list.keys()))
@property
def media_title(self):
"""Return the current media info."""
return self._mediainfo
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mediasource in MEDIA_MODES.values():
return SUPPORT_DENON | SUPPORT_MEDIA_MODES
return SUPPORT_DENON
@property
def source(self):
"""Return the current input source."""
for pretty_name, name in self._source_list.items():
if self._mediasource == name:
return pretty_name
def turn_off(self):
"""Turn off media player."""
self.telnet_command("PWSTANDBY")
def volume_up(self):
"""Volume up media player."""
self.telnet_command("MVUP")
def volume_down(self):
"""Volume down media player."""
self.telnet_command("MVDOWN")
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.telnet_command("MV" + str(round(volume * self._volume_max)).zfill(2))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.telnet_command("MU" + ("ON" if mute else "OFF"))
def media_play(self):
"""Play media player."""
self.telnet_command("NS9A")
def media_pause(self):
"""Pause media player."""
self.telnet_command("NS9B")
def media_stop(self):
"""Pause media player."""
self.telnet_command("NS9C")
def media_next_track(self):
"""Send the next track command."""
self.telnet_command("NS9D")
def media_previous_track(self):
"""Send the previous track command."""
self.telnet_command("NS9E")
def turn_on(self):
"""Turn the media player on."""
self.telnet_command("PWON")
def select_source(self, source):
"""Select input source."""
self.telnet_command("SI" + self._source_list.get(source))
|
|
#
# Copyright (c) 2013-present, Anoop Kunchukuttan
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import codecs, sys
from indicnlp.script import indic_scripts as si
import re
chillu_char_map= {
'\u0d7a': '\u0d23',
'\u0d7b': '\u0d28',
'\u0d7c': '\u0d30',
'\u0d7d': '\u0d32',
'\u0d7e': '\u0d33',
'\u0d7f': '\u0d15',
}
char_chillu_map= {}
for k,v in chillu_char_map.items():
char_chillu_map[v]=k
def normalize_malayalam(word):
word_mask=re.sub(r'[0-9]','0',word)
# instead of chillu characters, use consonant+halant
for chillu,char in chillu_char_map.items():
word=word.replace(chillu,'{}\u0d4d'.format(char))
word_mask=word_mask.replace(chillu,'41')
word_mask=re.sub(r'[^0-9]','0',word_mask)
return word, word_mask
def denormalize_malayalam(word, word_mask):
word=list(word)
word_mask=list(word_mask)
## pattern 4
idx=0
while idx>=0:
try:
idx=word_mask.index('4',idx)
word[idx:idx+2]=char_chillu_map[word[idx]]
word_mask[idx:idx+2]='0'
start=idx
except ValueError as e:
break
return ''.join(word)
def normalize_punjabi(word):
word_mask=re.sub(r'[0-9]','0',word)
## replace tippi with anusvaar
word=word.replace('\u0a70','\u0a02')
word_mask=word_mask.replace('\u0a70','2')
## replace addak+consonant with consonat+halant+consonant
word=re.sub(r'\u0a71(.)','\\1\u0a4d\\1',word)
word_mask=re.sub(r'\u0a71(.)','311',word_mask)
word_mask=re.sub(r'[^0-9]','0',word_mask)
return word, word_mask
def denormalize_punjabi(word, word_mask):
word=list(word)
word_mask=list(word_mask)
## pattern 2
idx=0
while idx>=0:
try:
idx=word_mask.index('2',idx)
word[idx]='\u0a70'
word_mask[idx]='0'
start=idx
except ValueError as e:
break
## pattern 3
idx=0
while idx>=0:
try:
idx=word_mask.index('3',idx)
word[idx:idx+3]='\u0a71{}'.format(word[idx])
word_mask[idx:idx+3]='00'
start=idx
except ValueError as e:
break
return ''.join(word)
def char_backoff(syllables_list,vocab):
syllables_final=[]
if vocab is None:
syllables_final=syllables_list
else:
for s in syllables_list:
if s in vocab:
syllables_final.append(s)
else:
for x in s:
syllables_final.append(x)
return syllables_final
def orthographic_syllabify_improved(word,lang,vocab=None):
word_mask=['0']*len(word)
if lang=='ml':
word, word_mask = normalize_malayalam(word)
word=word
elif lang=='pa':
word, word_mask = normalize_punjabi(word)
p_vectors=[si.get_phonetic_feature_vector(c,lang) for c in word]
syllables=[]
syllables_mask=[]
for i in range(len(word)):
v=p_vectors[i]
syllables.append(word[i])
syllables_mask.append(word_mask[i])
### simplified syllabification
#if i+1<len(word) and \
# (not si.is_valid(p_vectors[i+1]) or si.is_misc(p_vectors[i+1])):
# syllables.append(u' ')
# syllables_mask.append(u'0')
#elif not si.is_valid(v) or si.is_misc(v) or si.is_vowel(v):
# syllables.append(u' ')
# syllables_mask.append(u'0')
#elif i+1<len(word) and \
# (si.is_consonant(v) or si.is_nukta(v)) and \
# (si.is_consonant(p_vectors[i+1]) or si.is_anusvaar(p_vectors[i+1])):
# syllables.append(u' ')
# syllables_mask.append(u'0')
#### better syllabification
if i+1<len(word) and (not si.is_valid(p_vectors[i+1]) or si.is_misc(p_vectors[i+1])):
syllables.append(' ')
syllables_mask.append('0')
elif not si.is_valid(v) or si.is_misc(v) :
syllables.append(' ')
syllables_mask.append('0')
elif si.is_vowel(v):
anu_nonplos= ( i+2<len(word) and \
si.is_anusvaar(p_vectors[i+1]) and \
not si.is_plosive(p_vectors[i+2])\
)
anu_eow= ( i+2==len(word) and \
si.is_anusvaar(p_vectors[i+1]) )
if not(anu_nonplos or anu_eow):
syllables.append(' ')
syllables_mask.append('0')
elif i+1<len(word) and \
(si.is_consonant(v) or si.is_nukta(v)):
if si.is_consonant(p_vectors[i+1]):
syllables.append(' ')
syllables_mask.append('0')
elif si.is_vowel(p_vectors[i+1]) and \
not si.is_dependent_vowel(p_vectors[i+1]):
syllables.append(' ')
syllables_mask.append('0')
elif si.is_anusvaar(p_vectors[i+1]):
anu_nonplos= ( i+2<len(word) and \
not si.is_plosive(p_vectors[i+2])\
)
anu_eow= i+2==len(word)
if not(anu_nonplos or anu_eow):
syllables.append(' ')
syllables_mask.append('0')
syllables_mask=''.join(syllables_mask)
syllables=''.join(syllables)
#assert len(syllables_mask) == len(syllables)
#assert syllables_mask.find('01') == -1
if syllables_mask.find('01') >= 0:
print('Warning')
if lang=='ml':
syllables = denormalize_malayalam(syllables,syllables_mask)
elif lang=='pa':
syllables = denormalize_punjabi(syllables,syllables_mask)
syllables_list = syllables.strip().split(' ')
return(char_backoff(syllables_list,vocab))
def orthographic_syllabify(word,lang,vocab=None):
p_vectors=[si.get_phonetic_feature_vector(c,lang) for c in word]
syllables=[]
for i in range(len(word)):
v=p_vectors[i]
syllables.append(word[i])
### simplified syllabification
#if i+1<len(word) and \
# (not si.is_valid(p_vectors[i+1]) or si.is_misc(p_vectors[i+1])):
# syllables.append(u' ')
#elif not si.is_valid(v) or si.is_misc(v) or si.is_vowel(v):
# syllables.append(u' ')
#elif i+1<len(word) and \
# (si.is_consonant(v) or si.is_nukta(v)) and \
# (si.is_consonant(p_vectors[i+1]) or si.is_anusvaar(p_vectors[i+1])):
# syllables.append(u' ')
#### better syllabification
if i+1<len(word) and (not si.is_valid(p_vectors[i+1]) or si.is_misc(p_vectors[i+1])):
syllables.append(' ')
elif not si.is_valid(v) or si.is_misc(v) :
syllables.append(' ')
elif si.is_vowel(v):
anu_nonplos= ( i+2<len(word) and \
si.is_anusvaar(p_vectors[i+1]) and \
not si.is_plosive(p_vectors[i+2])\
)
anu_eow= ( i+2==len(word) and \
si.is_anusvaar(p_vectors[i+1]) )
if not(anu_nonplos or anu_eow):
syllables.append(' ')
elif i+1<len(word) and \
(si.is_consonant(v) or si.is_nukta(v)):
if si.is_consonant(p_vectors[i+1]):
syllables.append(' ')
elif si.is_vowel(p_vectors[i+1]) and \
not si.is_dependent_vowel(p_vectors[i+1]):
syllables.append(' ')
elif si.is_anusvaar(p_vectors[i+1]):
anu_nonplos= ( i+2<len(word) and \
not si.is_plosive(p_vectors[i+2])\
)
anu_eow= i+2==len(word)
if not(anu_nonplos or anu_eow):
syllables.append(' ')
syllables_list = ''.join(syllables).strip().split(' ')
return(char_backoff(syllables_list,vocab))
def orthographic_simple_syllabify(word,lang,vocab=None):
p_vectors=[si.get_phonetic_feature_vector(c,lang) for c in word]
syllables=[]
for i in range(len(word)):
v=p_vectors[i]
syllables.append(word[i])
## simplified syllabification
if i+1<len(word) and \
(not si.is_valid(p_vectors[i+1]) or si.is_misc(p_vectors[i+1])):
syllables.append(' ')
elif not si.is_valid(v) or si.is_misc(v) or si.is_vowel(v):
syllables.append(' ')
elif i+1<len(word) and \
(si.is_consonant(v) or si.is_nukta(v)) and \
(si.is_consonant(p_vectors[i+1]) or si.is_anusvaar(p_vectors[i+1])):
syllables.append(' ')
syllables_list = ''.join(syllables).strip().split(' ')
return(char_backoff(syllables_list,vocab))
|
|
import unittest
from ctypes import *
from ctypes.test import need_symbol
from struct import calcsize
import _ctypes_test
import test.support
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
class X(Structure):
_fields_ = [("a", c_int)]
class Y(X):
_fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
def test_subclass_delayed(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0)
X._fields_ = [("a", c_int)]
class Y(X):
pass
self.assertEqual(sizeof(Y), sizeof(X))
Y._fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
class StructureTestCase(unittest.TestCase):
formats = {"c": c_char,
"b": c_byte,
"B": c_ubyte,
"h": c_short,
"H": c_ushort,
"i": c_int,
"I": c_uint,
"l": c_long,
"L": c_ulong,
"q": c_longlong,
"Q": c_ulonglong,
"f": c_float,
"d": c_double,
}
def test_simple_structs(self):
for code, tp in self.formats.items():
class X(Structure):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("c%c0%c" % (code, code)), code))
def test_unions(self):
for code, tp in self.formats.items():
class X(Union):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("%c" % (code)), code))
def test_struct_alignment(self):
class X(Structure):
_fields_ = [("x", c_char * 3)]
self.assertEqual(alignment(X), calcsize("s"))
self.assertEqual(sizeof(X), calcsize("3s"))
class Y(Structure):
_fields_ = [("x", c_char * 3),
("y", c_int)]
self.assertEqual(alignment(Y), alignment(c_int))
self.assertEqual(sizeof(Y), calcsize("3si"))
class SI(Structure):
_fields_ = [("a", X),
("b", Y)]
self.assertEqual(alignment(SI), max(alignment(Y), alignment(X)))
self.assertEqual(sizeof(SI), calcsize("3s0i 3si 0i"))
class IS(Structure):
_fields_ = [("b", Y),
("a", X)]
self.assertEqual(alignment(SI), max(alignment(X), alignment(Y)))
self.assertEqual(sizeof(IS), calcsize("3si 3s 0i"))
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), alignment(X))
self.assertEqual(sizeof(XX), calcsize("3s 3s 0s"))
def test_empty(self):
# I had problems with these
#
# Although these are pathological cases: Empty Structures!
class X(Structure):
_fields_ = []
class Y(Union):
_fields_ = []
# Is this really the correct alignment, or should it be 0?
self.assertTrue(alignment(X) == alignment(Y) == 1)
self.assertTrue(sizeof(X) == sizeof(Y) == 0)
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), 1)
self.assertEqual(sizeof(XX), 0)
def test_fields(self):
# test the offset and size attributes of Structure/Unoin fields.
class X(Structure):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, sizeof(c_int))
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
class X(Union):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, 0)
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
# XXX Should we check nested data types also?
# offset is always relative to the class...
def test_packed(self):
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 1
self.assertEqual(sizeof(X), 9)
self.assertEqual(X.b.offset, 1)
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 2
self.assertEqual(sizeof(X), 10)
self.assertEqual(X.b.offset, 2)
import struct
longlong_size = struct.calcsize("q")
longlong_align = struct.calcsize("bq") - longlong_size
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 4
self.assertEqual(sizeof(X), min(4, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(4, longlong_align))
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 8
self.assertEqual(sizeof(X), min(8, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(8, longlong_align))
d = {"_fields_": [("a", "b"),
("b", "q")],
"_pack_": -1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
@test.support.cpython_only
def test_packed_c_limits(self):
# Issue 15989
import _testcapi
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.INT_MAX + 1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.UINT_MAX + 2}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
def test_initializers(self):
class Person(Structure):
_fields_ = [("name", c_char*6),
("age", c_int)]
self.assertRaises(TypeError, Person, 42)
self.assertRaises(ValueError, Person, b"asldkjaslkdjaslkdj")
self.assertRaises(TypeError, Person, "Name", "HI")
# short enough
self.assertEqual(Person(b"12345", 5).name, b"12345")
# exact fit
self.assertEqual(Person(b"123456", 5).name, b"123456")
# too long
self.assertRaises(ValueError, Person, b"1234567", 5)
def test_conflicting_initializers(self):
class POINT(Structure):
_fields_ = [("phi", c_float), ("rho", c_float)]
# conflicting positional and keyword args
self.assertRaisesRegex(TypeError, "phi", POINT, 2, 3, phi=4)
self.assertRaisesRegex(TypeError, "rho", POINT, 2, 3, rho=4)
# too many initializers
self.assertRaises(TypeError, POINT, 2, 3, 4)
def test_keyword_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
pt = POINT(1, 2)
self.assertEqual((pt.x, pt.y), (1, 2))
pt = POINT(y=2, x=1)
self.assertEqual((pt.x, pt.y), (1, 2))
def test_invalid_field_types(self):
class POINT(Structure):
pass
self.assertRaises(TypeError, setattr, POINT, "_fields_", [("x", 1), ("y", 2)])
def test_invalid_name(self):
# field name must be string
def declare_with_name(name):
class S(Structure):
_fields_ = [(name, c_int)]
self.assertRaises(TypeError, declare_with_name, b"x")
def test_intarray_fields(self):
class SomeInts(Structure):
_fields_ = [("a", c_int * 4)]
# can use tuple to initialize array (but not list!)
self.assertEqual(SomeInts((1, 2)).a[:], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::-1], [0, 0, 2, 1])
self.assertEqual(SomeInts((1, 2)).a[::2], [1, 0])
self.assertEqual(SomeInts((1, 2)).a[1:5:6], [2])
self.assertEqual(SomeInts((1, 2)).a[6:4:-1], [])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[:], [1, 2, 3, 4])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[::], [1, 2, 3, 4])
# too long
# XXX Should raise ValueError?, not RuntimeError
self.assertRaises(RuntimeError, SomeInts, (1, 2, 3, 4, 5))
def test_nested_initializers(self):
# test initializing nested structures
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
p = Person(b"Someone", (b"1234", b"5678"), 5)
self.assertEqual(p.name, b"Someone")
self.assertEqual(p.phone.areacode, b"1234")
self.assertEqual(p.phone.number, b"5678")
self.assertEqual(p.age, 5)
@need_symbol('c_wchar')
def test_structures_with_wchar(self):
class PersonW(Structure):
_fields_ = [("name", c_wchar * 12),
("age", c_int)]
p = PersonW("Someone \xe9")
self.assertEqual(p.name, "Someone \xe9")
self.assertEqual(PersonW("1234567890").name, "1234567890")
self.assertEqual(PersonW("12345678901").name, "12345678901")
# exact fit
self.assertEqual(PersonW("123456789012").name, "123456789012")
#too long
self.assertRaises(ValueError, PersonW, "1234567890123")
def test_init_errors(self):
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
cls, msg = self.get_except(Person, b"Someone", (1, 2))
self.assertEqual(cls, RuntimeError)
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: "
"expected bytes, int found")
cls, msg = self.get_except(Person, b"Someone", (b"a", b"b", b"c"))
self.assertEqual(cls, RuntimeError)
if issubclass(Exception, object):
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: too many initializers")
else:
self.assertEqual(msg, "(Phone) TypeError: too many initializers")
def test_huge_field_name(self):
# issue12881: segfault with large structure field names
def create_class(length):
class S(Structure):
_fields_ = [('x' * length, c_int)]
for length in [10 ** i for i in range(0, 8)]:
try:
create_class(length)
except MemoryError:
# MemoryErrors are OK, we just don't want to segfault
pass
def get_except(self, func, *args):
try:
func(*args)
except Exception as detail:
return detail.__class__, str(detail)
@unittest.skip('test disabled')
def test_subclass_creation(self):
meta = type(Structure)
# same as 'class X(Structure): pass'
# fails, since we need either a _fields_ or a _abstract_ attribute
cls, msg = self.get_except(meta, "X", (Structure,), {})
self.assertEqual((cls, msg),
(AttributeError, "class must define a '_fields_' attribute"))
def test_abstract_class(self):
class X(Structure):
_abstract_ = "something"
# try 'X()'
cls, msg = self.get_except(eval, "X()", locals())
self.assertEqual((cls, msg), (TypeError, "abstract class"))
def test_methods(self):
## class X(Structure):
## _fields_ = []
self.assertIn("in_dll", dir(type(Structure)))
self.assertIn("from_address", dir(type(Structure)))
self.assertIn("in_dll", dir(type(Structure)))
def test_positional_args(self):
# see also http://bugs.python.org/issue5042
class W(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class X(W):
_fields_ = [("c", c_int)]
class Y(X):
pass
class Z(Y):
_fields_ = [("d", c_int), ("e", c_int), ("f", c_int)]
z = Z(1, 2, 3, 4, 5, 6)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 2, 3, 4, 5, 6))
z = Z(1)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 0, 0, 0, 0, 0))
self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7))
def test_pass_by_value(self):
# This should mirror the structure in Modules/_ctypes/_ctypes_test.c
class X(Structure):
_fields_ = [
('first', c_ulong),
('second', c_ulong),
('third', c_ulong),
]
s = X()
s.first = 0xdeadbeef
s.second = 0xcafebabe
s.third = 0x0bad1dea
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_large_struct_update_value
func.argtypes = (X,)
func.restype = None
func(s)
self.assertEqual(s.first, 0xdeadbeef)
self.assertEqual(s.second, 0xcafebabe)
self.assertEqual(s.third, 0x0bad1dea)
class PointerMemberTestCase(unittest.TestCase):
def test(self):
# a Structure with a POINTER field
class S(Structure):
_fields_ = [("array", POINTER(c_int))]
s = S()
# We can assign arrays of the correct type
s.array = (c_int * 3)(1, 2, 3)
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
# The following are bugs, but are included here because the unittests
# also describe the current behaviour.
#
# This fails with SystemError: bad arg to internal function
# or with IndexError (with a patch I have)
s.array[0] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [42, 2, 3])
s.array[0] = 1
## s.array[1] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
def test_none_to_pointer_fields(self):
class S(Structure):
_fields_ = [("x", c_int),
("p", POINTER(c_int))]
s = S()
s.x = 12345678
s.p = None
self.assertEqual(s.x, 12345678)
class TestRecursiveStructure(unittest.TestCase):
def test_contains_itself(self):
class Recursive(Structure):
pass
try:
Recursive._fields_ = [("next", Recursive)]
except AttributeError as details:
self.assertIn("Structure or union cannot contain itself",
str(details))
else:
self.fail("Structure or union cannot contain itself")
def test_vice_versa(self):
class First(Structure):
pass
class Second(Structure):
pass
First._fields_ = [("second", Second)]
try:
Second._fields_ = [("first", First)]
except AttributeError as details:
self.assertIn("_fields_ is final", str(details))
else:
self.fail("AttributeError not raised")
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import discover
from telemetry.internal.util import bootstrap
from telemetry.internal.util import command_line
from telemetry.internal.util import path
from telemetry.internal.util import path_set
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path.GetChromiumSrcDir(), os.pardir, deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not path.IsSubpath(module_path, path.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser, None)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
path.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
story_set = test_obj.CreateStorySet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in story_set.serving_dirs:
yield serving_dir
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(), 'telemetry', 'benchmark_runner.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(),
'telemetry', 'testing', 'run_tests.py')))
dependencies |= FindBootstrapDependencies(path.GetTelemetryDir())
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
target_paths = args.positional_args
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "shape.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
self.evaluate(v.initializer)
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
self.evaluate(v.initializer) # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(v.initializer)
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
|
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for creating and manipulating SON, the Serialized Ocument Notation.
Regular dictionaries can be used instead of SON objects, but not when the order
of keys is important. A SON object can be used just like a normal Python
dictionary."""
import copy
class SON(dict):
"""SON data.
A subclass of dict that maintains ordering of keys and provides a
few extra niceties for dealing with SON. SON objects can be
converted to and from BSON.
The mapping from Python types to BSON types is as follows:
=================================== ============= ===================
Python Type BSON Type Supported Direction
=================================== ============= ===================
None null both
bool boolean both
int number (int) both
float number (real) both
string string py -> bson
unicode string both
list array both
dict / `SON` object both
datetime.datetime [#dt]_ [#dt2]_ date both
compiled re regex both
`bson.binary.Binary` binary both
`bson.objectid.ObjectId` oid both
`bson.dbref.DBRef` dbref both
None undefined bson -> py
unicode code bson -> py
`bson.code.Code` code py -> bson
unicode symbol bson -> py
=================================== ============= ===================
Note that to save binary data it must be wrapped as an instance of
`bson.binary.Binary`. Otherwise it will be saved as a BSON string
and retrieved as unicode.
.. [#dt] datetime.datetime instances will be rounded to the nearest
millisecond when saved
.. [#dt2] all datetime.datetime instances are treated as *naive*. clients
should always use UTC.
"""
def __init__(self, data=None, **kwargs):
self.__keys = []
dict.__init__(self)
self.update(data)
self.update(kwargs)
def __repr__(self):
result = []
for key in self.__keys:
result.append("(%r, %r)" % (key, self[key]))
return "SON([%s])" % ", ".join(result)
def __setitem__(self, key, value):
if key not in self:
self.__keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self.__keys.remove(key)
dict.__delitem__(self, key)
def keys(self):
return list(self.__keys)
def copy(self):
other = SON()
other.update(self)
return other
# TODO this is all from UserDict.DictMixin. it could probably be made more
# efficient.
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
return key in self.keys()
def __contains__(self, key):
return key in self.keys()
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got "\
+ repr(1 + len(args)))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError('container is empty')
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __cmp__(self, other):
if isinstance(other, SON):
return cmp((dict(self.iteritems()), self.keys()),
(dict(other.iteritems()), other.keys()))
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
def to_dict(self):
"""Convert a SON document to a normal Python dictionary instance.
This is trickier than just *dict(...)* because it needs to be
recursive.
"""
def transform_value(value):
if isinstance(value, list):
return [transform_value(v) for v in value]
if isinstance(value, SON):
value = dict(value)
if isinstance(value, dict):
for k, v in value.iteritems():
value[k] = transform_value(v)
return value
return transform_value(dict(self))
def __deepcopy__(self, memo):
out = SON()
for k, v in self.iteritems():
out[k] = copy.deepcopy(v, memo)
return out
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import getpass
import logging
import os
import shutil
import signal
import subprocess
import socket
import tempfile
import textwrap
import time
from desktop.lib.python_util import find_unused_port
import hadoop
from hadoop.mini_cluster import write_config
from hadoop.job_tracker import LiveJobTracker
from desktop.lib.paths import get_run_root
_shared_cluster = None
LOG = logging.getLogger(__name__)
STARTUP_DEADLINE = 60.0
CLEANUP_TMP_DIR = os.environ.get('MINI_CLUSTER_CLEANUP', 'true')
class PseudoHdfs4(object):
"""Run HDFS and MR2 locally, in pseudo-distributed mode"""
def __init__(self):
self._tmpdir = tempfile.mkdtemp(prefix='tmp_hue_')
os.chmod(self._tmpdir, 0755)
self._superuser = getpass.getuser()
self._fs = None
self._jt = None
self._mr2_env = None
self._log_dir = None
self._dfs_http_port = None
self._dfs_http_address = None
self._namenode_port = None
self._fs_default_name = None
self._rm_port = None
self._nn_proc = None
self._dn_proc = None
self._rm_proc = None
self._nm_proc = None
self._hs_proc = None
self._fqdn = socket.getfqdn()
self._core_site = None
self._hdfs_site = None
self._mapred_site = None
self.shutdown_hook = None
def __str__(self):
return "PseudoHdfs5 (%(name)s) at %(dir)s --- MR2 (%(mapreduce)s) at http://%(fqdn)s:%(port)s" % {
'name': self._fs_default_name,
'dir': self._tmpdir,
'mapreduce': self.mapred_job_tracker,
'fqdn': self._fqdn,
'port': self._rm_port
}
@property
def superuser(self):
return self._superuser
@property
def mr2_env(self):
return self._mr2_env
@property
def log_dir(self):
return self._log_dir
@property
def fs_default_name(self):
return self._fs_default_name
@property
def namenode_port(self):
return self._namenode_port
@property
def dfs_http_address(self):
return self._dfs_http_address
@property
def dfs_http_port(self):
return self._dfs_http_port
@property
def mapred_job_tracker(self):
return "%s:%s" % (self._fqdn, self._rm_port,)
@property
def hadoop_conf_dir(self):
return self._tmppath('conf')
@property
def fs(self):
if self._fs is None:
if self._dfs_http_address is None:
LOG.warn("Attempt to access uninitialized filesystem")
return None
self._fs = hadoop.fs.webhdfs.WebHdfs("http://%s/webhdfs/v1" % (self._dfs_http_address,), self.fs_default_name)
return self._fs
@property
def jt(self):
if self._jt is None:
self._jt = LiveJobTracker(self._fqdn, 0)
return self._jt
def stop(self):
def _kill_proc(name, proc):
try:
while proc is not None and proc.poll() is None:
os.kill(proc.pid, signal.SIGKILL)
LOG.info('Stopping %s pid %s' % (name, proc.pid,))
time.sleep(0.5)
except Exception, ex:
LOG.exception('Failed to stop pid %s. You may want to do it manually: %s' % (proc.pid, ex))
_kill_proc('NameNode', self._nn_proc)
_kill_proc('DataNode', self._dn_proc)
_kill_proc('ResourceManager', self._rm_proc)
_kill_proc('Nodemanager', self._nm_proc)
_kill_proc('HistoryServer', self._hs_proc)
self._nn_proc = None
self._dn_proc = None
self._rm_proc = None
self._nm_proc = None
self._hs_proc = None
if CLEANUP_TMP_DIR == 'false':
LOG.info('Skipping cleanup of temp directory "%s"' % (self._tmpdir,))
else:
LOG.info('Cleaning up temp directory "%s". Use "export MINI_CLUSTER_CLEANUP=false" to avoid.' % (self._tmpdir,))
shutil.rmtree(self._tmpdir, ignore_errors=True)
if self.shutdown_hook is not None:
self.shutdown_hook()
def _tmppath(self, filename):
return os.path.join(self._tmpdir, filename)
def _logpath(self, filename):
return os.path.join(self._log_dir, filename)
def start(self):
LOG.info("Using temporary directory: %s" % (self._tmpdir,))
if not os.path.exists(self.hadoop_conf_dir):
os.mkdir(self.hadoop_conf_dir)
self._log_dir = self._tmppath('logs')
if not os.path.exists(self._log_dir):
os.mkdir(self._log_dir)
self._local_dir = self._tmppath('local')
if not os.path.exists(self._local_dir):
os.mkdir(self._local_dir)
self._write_hadoop_metrics_conf(self.hadoop_conf_dir)
self._write_core_site()
self._write_hdfs_site()
self._write_yarn_site()
self._write_mapred_site()
# More stuff to setup in the environment
env = {
'YARN_HOME': get_run_root('ext/hadoop/hadoop'),
'HADOOP_COMMON_HOME': get_run_root('ext/hadoop/hadoop'),
'HADOOP_MAPRED_HOME': get_run_root('ext/hadoop/hadoop'),
'HADOOP_HDFS_HOME': get_run_root('ext/hadoop/hadoop'),
'HADOOP_CONF_DIR': self.hadoop_conf_dir,
'YARN_CONF_DIR': self.hadoop_conf_dir,
'HADOOP_HEAPSIZE': '128',
'HADOOP_LOG_DIR': self._log_dir,
'USER': self.superuser,
'LANG': "en_US.UTF-8",
'PATH': os.environ['PATH'],
}
if "JAVA_HOME" in os.environ:
env['JAVA_HOME'] = os.environ['JAVA_HOME']
LOG.debug("Hadoop Environment:\n" + "\n".join([ str(x) for x in sorted(env.items()) ]))
# Format HDFS
self._format(self.hadoop_conf_dir, env)
# Run them
self._nn_proc = self._start_daemon('namenode', self.hadoop_conf_dir, env)
self._dn_proc = self._start_daemon('datanode', self.hadoop_conf_dir, env)
# Make sure they're running
deadline = time.time() + STARTUP_DEADLINE
while not self._is_hdfs_ready(env):
if time.time() > deadline:
self.stop()
raise RuntimeError('%s is taking too long to start' % (self,))
time.sleep(5)
# Start MR2
self._start_mr2(env)
# Create HDFS directories
if not self.fs.exists('/tmp'):
self.fs.do_as_superuser(self.mkdir, '/tmp', 01777)
self.fs.do_as_superuser(self.fs.chmod, '/tmp', 01777)
self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn', 01777)
self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn', 01777)
self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging', 01777)
self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging', 01777)
self.fs.do_as_superuser(self.fs.mkdir, '/tmp/hadoop-yarn/staging/history', 01777)
self.fs.do_as_superuser(self.fs.chmod, '/tmp/hadoop-yarn/staging/history', 01777)
self.fs.do_as_superuser(self.fs.mkdir, '/var/log/hadoop-yarn/apps', 01777)
self.fs.do_as_superuser(self.fs.chmod, '/var/log/hadoop-yarn/apps', 01777)
self.fs.do_as_user('test', self.fs.create_home_dir, '/user/test')
self.fs.do_as_user('hue', self.fs.create_home_dir, '/user/hue')
def _start_mr2(self, env):
LOG.info("Starting MR2")
self._mr2_env = env.copy()
LOG.debug("MR2 Environment:\n" + "\n".join([ str(x) for x in sorted(self.mr2_env.items()) ]))
# Run YARN
self._rm_proc = self._start_daemon('resourcemanager', self.hadoop_conf_dir, self.mr2_env, self._get_yarn_bin(self.mr2_env))
self._nm_proc = self._start_daemon('nodemanager', self.hadoop_conf_dir, self.mr2_env, self._get_yarn_bin(self.mr2_env))
self._hs_proc = self._start_daemon('historyserver', self.hadoop_conf_dir, self.mr2_env, self._get_mapred_bin(self.mr2_env))
# Give them a moment to actually start
time.sleep(1)
# Make sure they're running
deadline = time.time() + STARTUP_DEADLINE
while not self._is_mr2_ready(self.mr2_env):
if time.time() > deadline:
self.stop()
raise RuntimeError('%s is taking too long to start' % (self,))
time.sleep(5)
def _format(self, conf_dir, env):
args = (self._get_hdfs_bin(env), '--config', conf_dir, 'namenode', '-format')
LOG.info('Formatting HDFS: %s' % (args,))
stdout = tempfile.TemporaryFile()
stderr = tempfile.TemporaryFile()
try:
ret = subprocess.call(args, env=env, stdout=stdout, stderr=stderr)
if ret != 0:
stdout.seek(0)
stderr.seek(0)
raise RuntimeError('Failed to format namenode\n''=== Stdout ===:\n%s\n''=== Stderr ===:\n%s' % (stdout.read(), stderr.read()))
finally:
stdout.close()
stderr.close()
def _log_exit(self, proc_name, exit_code):
LOG.info('%s exited with %s' % (proc_name, exit_code))
LOG.debug('--------------------- STDOUT:\n' + file(self._logpath(proc_name + '.stdout')).read())
LOG.debug('--------------------- STDERR:\n' + file(self._logpath(proc_name + '.stderr')).read())
def _is_hdfs_ready(self, env):
if self._nn_proc.poll() is not None:
self._log_exit('namenode', self._nn_proc.poll())
return False
if self._dn_proc.poll() is not None:
self._log_exit('datanode', self._dn_proc.poll())
return False
# Run a `dfsadmin -report' against it
dfsreport = subprocess.Popen((self._get_hdfs_bin(env), 'dfsadmin', '-report'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
ret = dfsreport.wait()
if ret != 0:
LOG.debug('DFS not ready yet.\n%s\n%s' % (dfsreport.stderr.read(), dfsreport.stdout.read()))
return False
# Check that the DN is servicing
report_out = dfsreport.stdout.read()
if 'Live datanodes (1)' in report_out:
return True
LOG.debug('Waiting for DN to come up .................\n%s' % (report_out,))
return False
def _is_mr2_ready(self, env):
if self._rm_proc.poll() is not None:
self._log_exit('resourcemanager', self._rm_proc.poll())
return False
if self._nm_proc.poll() is not None:
self._log_exit('nodemanager', self._nm_proc.poll())
return False
if self._hs_proc.poll() is not None:
self._log_exit('historyserver', self._hs_proc.poll())
return False
# Run a `hadoop job -list all'
list_all = subprocess.Popen(
(self._get_mapred_bin(env), 'job', '-list', 'all'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
ret = list_all.wait()
if ret == 0:
return True
LOG.debug('MR2 not ready yet.\n%s\n%s' % (list_all.stderr.read(), list_all.stderr.read()))
return False
def _start_daemon(self, proc_name, conf_dir, env, hadoop_bin=None):
if hadoop_bin is None:
hadoop_bin = self._get_hadoop_bin(env)
args = (hadoop_bin, '--config', conf_dir, proc_name)
LOG.info('Starting Hadoop cluster daemon: %s' % (args,))
stdout = file(self._logpath(proc_name + ".stdout"), 'w')
stderr = file(self._logpath(proc_name + ".stderr"), 'w')
return subprocess.Popen(args=args, stdout=stdout, stderr=stderr, env=env)
def _get_hadoop_bin(self, env):
try:
return env['HADOOP_BIN']
except KeyError:
return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hadoop')
def _get_mapred_bin(self, env):
try:
return env['MAPRED_BIN']
except KeyError:
return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'mapred')
def _get_yarn_bin(self, env):
try:
return env['YARN_BIN']
except KeyError:
return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'yarn')
def _get_hdfs_bin(self, env):
try:
return env['HDFS_BIN']
except KeyError:
return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hdfs')
def _write_hdfs_site(self):
self._dfs_http_port = find_unused_port()
self._dfs_http_address = '%s:%s' % (self._fqdn, self._dfs_http_port)
hdfs_configs = {
'dfs.webhdfs.enabled': 'true',
'dfs.http.address': self._dfs_http_address,
'dfs.namenode.safemode.extension': 1,
'dfs.namenode.safemode.threshold-pct': 0,
'dfs.datanode.address': '%s:0' % self._fqdn,
'dfs.datanode.http.address': '0.0.0.0:0', # Work around webhdfs redirect bug -- bind to all interfaces
'dfs.datanode.ipc.address': '%s:0' % self._fqdn,
'dfs.replication': 1,
'dfs.safemode.min.datanodes': 1,
'dfs.namenode.fs-limits.min-block-size': '1000',
'dfs.permissions': 'true'
}
self._hdfs_site = self._tmppath('conf/hdfs-site.xml')
write_config(hdfs_configs, self._hdfs_site)
def _write_core_site(self):
self._namenode_port = find_unused_port()
self._fs_default_name = 'hdfs://%s:%s' % (self._fqdn, self._namenode_port,)
core_configs = {
'fs.default.name': self._fs_default_name,
'hadoop.security.authorization': 'true',
'hadoop.security.authentication': 'simple',
'hadoop.proxyuser.hue.hosts': '*',
'hadoop.proxyuser.hue.groups': '*',
'hadoop.proxyuser.oozie.hosts': '*',
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.%s.hosts' % (getpass.getuser(),): '*',
'hadoop.proxyuser.%s.groups' % (getpass.getuser(),): '*',
'hadoop.tmp.dir': self._tmppath('hadoop_tmp_dir'),
'fs.trash.interval': 10
}
self._core_site = self._tmppath('conf/core-site.xml')
write_config(core_configs, self._core_site)
def _write_yarn_site(self):
self._rm_resource_port = find_unused_port()
self._rm_port = find_unused_port()
self._rm_scheduler_port = find_unused_port()
self._rm_admin_port = find_unused_port()
self._rm_webapp_port = find_unused_port()
self._nm_port = find_unused_port()
self._nm_webapp_port = find_unused_port()
yarn_configs = {
'yarn.resourcemanager.resource-tracker.address': '%s:%s' % (self._fqdn, self._rm_resource_port,),
'yarn.resourcemanager.address': '%s:%s' % (self._fqdn, self._rm_port,),
'yarn.resourcemanager.scheduler.address': '%s:%s' % (self._fqdn, self._rm_scheduler_port,),
'yarn.resourcemanager.scheduler.class': 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler',
'yarn.resourcemanager.admin.address': '%s:%s' % (self._fqdn, self._rm_admin_port,),
'yarn.resourcemanager.webapp.address': '%s:%s' % (self._fqdn, self._rm_webapp_port,),
'yarn.log-aggregation-enable': 'true',
'yarn.dispatcher.exit-on-error': 'true',
'yarn.nodemanager.local-dirs': self._local_dir,
'yarn.nodemanager.log-dirs': self._logpath('yarn-logs'),
'yarn.nodemanager.remote-app-log-dir': '/var/log/hadoop-yarn/apps',
'yarn.nodemanager.localizer.address' : '%s:%s' % (self._fqdn, self._nm_port,),
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.webapp.address': '%s:%s' % (self._fqdn, self._nm_webapp_port,),
'yarn.app.mapreduce.am.staging-dir': '/tmp/hadoop-yarn/staging',
'yarn.application.classpath':
'''$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*,
$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*''',
}
self._yarn_site = self._tmppath('conf/yarn-site.xml')
write_config(yarn_configs, self._tmppath('conf/yarn-site.xml'))
def _write_mapred_site(self):
self._jh_port = find_unused_port()
self._jh_web_port = find_unused_port()
self._mr_shuffle_port = find_unused_port()
mapred_configs = {
'mapred.job.tracker': '%s:%s' % (self._fqdn, self._rm_port,),
'mapreduce.framework.name': 'yarn',
'mapreduce.jobhistory.address': '%s:%s' % (self._fqdn, self._jh_port,),
'mapreduce.jobhistory.webapp.address': '%s:%s' % (self._fqdn, self._jh_web_port,),
'mapreduce.task.tmp.dir': self._tmppath('tasks'),
'mapreduce.shuffle.port': self._mr_shuffle_port,
}
self._mapred_site = self._tmppath('conf/mapred-site.xml')
write_config(mapred_configs, self._tmppath('conf/mapred-site.xml'))
def _write_hadoop_metrics_conf(self, conf_dir):
f = file(os.path.join(conf_dir, "hadoop-metrics.properties"), "w")
try:
f.write(textwrap.dedent("""
dfs.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
mapred.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
jvm.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
rpc.class=org.apache.hadoop.metrics.spi.NoEmitMetricsContext
"""))
finally:
f.close()
def shared_cluster():
global _shared_cluster
if _shared_cluster is None:
cluster = PseudoHdfs4()
atexit.register(cluster.stop)
try:
cluster.start()
except Exception, ex:
LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
fqdn = socket.getfqdn()
webhdfs_url = "http://%s:%s/webhdfs/v1" % (fqdn, cluster.dfs_http_port,)
closers = [
hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
hadoop.conf.YARN_CLUSTERS['default'].RESOURCE_MANAGER_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
hadoop.conf.YARN_CLUSTERS['default'].PROXY_API_URL.set_for_testing('http://%s:%s' % (cluster._fqdn, cluster._rm_webapp_port,)),
hadoop.conf.YARN_CLUSTERS['default'].HISTORY_SERVER_API_URL.set_for_testing('%s:%s' % (cluster._fqdn, cluster._jh_web_port,)),
]
old = hadoop.cluster.clear_caches()
def restore_config():
hadoop.cluster.restore_caches(old)
for x in closers:
x()
cluster.shutdown_hook = restore_config
_shared_cluster = cluster
return _shared_cluster
"""
Manual start from the Hue shell.
build/env/bin/hue shell
>
from hadoop import pseudo_hdfs4
pseudo_hdfs4.main()
>
exit() # To shutdown cleanly
"""
def main():
logging.basicConfig(level=logging.DEBUG)
cluster = PseudoHdfs4()
cluster.start()
print "%s running" % (cluster,)
print "fs.default.name=%s" % (cluster.fs_default_name,)
print "dfs.http.address=%s" % (cluster.dfs_http_address,)
print "jobtracker.thrift.port=%s" % (cluster.jt_thrift_port,)
print "mapred.job.tracker=%s" % (cluster.mapred_job_tracker,)
from IPython.Shell import IPShellEmbed
IPShellEmbed()()
cluster.stop()
|
|
#!/usr/bin/env python
#
# $Id: //depot/main/platform/kosmosfs/scripts.solaris/kfsfsck.py#0 $
#
# Copyright 2008 Quantcast Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# KFS fsck
#
import os,sys,os.path,getopt
import socket,threading,popen2
import tempfile
import time
import re
from ConfigParser import ConfigParser
from time import strftime
# Global dict holding chunk info.
gChunkMap = {}
# Global list of live chunk servers
gUpServers = {}
class UpServer:
"""Keep track of an up server state"""
def __init__(self, info):
if isinstance(info, str):
serverInfo = info.split(',')
# order here is host, port, total, used, util, nblocks, last heard
for i in xrange(len(serverInfo)):
s = serverInfo[i].split('=')
setattr(self, s[0].strip(), s[1].strip())
if hasattr(self, 's'):
setattr(self, 'host', self.s)
delattr(self, 's')
if hasattr(self, 'p'):
setattr(self, 'port', self.p)
delattr(self, 'p')
self.down = 0
self.retiring = 0
def __cmp__(self, other):
""" Order by IP"""
return cmp(socket.inet_aton(self.host), socket.inet_aton(other.host))
class ChunkInfo:
"""Structure to hold information about chunk, its hosts and sizes on them"""
def __init__(self, chunkID, fileID, numServers):
self.chunkID = chunkID
self.fileID = fileID
self.numServers = numServers
self.chunkHosts = []
def addChunkHostInfo(self, chunkHostInfo):
self.chunkHosts.append(chunkHostInfo)
def printIDs(self):
print self.chunkID, self.fileID, self.numServers
def updateChunkSize(self, chunkSize, host, port):
for chunkHostInfo in self.chunkHosts:
if ((chunkHostInfo.host == host) and (chunkHostInfo.port == port)):
chunkHostInfo.chunkSize = chunkSize
def printChunkInfo(self):
outline = []
outline.append (self.chunkID)
outline.append (self.fileID)
outline.append (self.numServers)
for chunkHostInfo in self.chunkHosts:
chunkHostInfo.printChunkHostInfo(outline)
return ' '.join(outline)
class ChunkHostInfo:
"""Structure used by ChunkInfo to define a host holding a chunk"""
def __init__(self, host, port, rack):
self.host = host
self.port = port
self.rack = rack
self.chunkSize = 0
def printChunkHostInfo(self, outline):
outline.append (self.host)
outline.append (self.port)
outline.append (self.rack)
outline.append (str(self.chunkSize))
def updateChunkSize(self, chunkSize):
self.chunkSize = chunkSize
class ServerLocation:
def __init__(self, **kwds):
self.__dict__.update(kwds)
self.status = 0
def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):
""" Read dumpMetaFile, chunkSizeFile and generate outFile"""
dump = open (dumpMetaFile, "r")
chunk = open (chunkSizeFile, "r")
out = open (outFile, "w")
cline = ""
cline = chunk.readline()
cline = cline.rstrip("\n")
while dump:
dline = dump.readline()
if not dline:
break
dline = dline.rstrip("\n")
# Split line parts
dlineParts = dline.split(' ')
# Read lines from chunkSize
numEntries = int(dlineParts[2])
entries = []
for i in range(numEntries):
entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])
#entries[i][0] = dlineParts[i*3 + 3]
#entries[i][1] = dlineParts[i*3 + 4]
#entries[i][2] = dlineParts[i*3 + 5]
#entries[i][3] = 0
while True:
clineParts = cline.split(' ')
if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):
for i in range(numEntries):
if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):
entries[i][3] = clineParts[2]
else:
break
cline = chunk.readline()
cline = cline.rstrip("\n")
if not cline:
break
# Print output
out.write(dlineParts[0]+" "+dlineParts[1]+" "+dlineParts[2]+" ")
for i in range(numEntries):
out.write(str(entries[i][3])+" "+entries[i][0]+" "+entries[i][1]+" "+entries[i][2]+" ")
out.write("\n")
out.close()
def saveToFile(fileName):
"""Save gChunkMap to file which could be used by emulator code"""
outfile = open (fileName, "w")
chunkInfoKeys = gChunkMap.keys()
chunkInfoKeys.sort()
for chunkInfo in chunkInfoKeys:
c = gChunkMap[chunkInfo]
outfile.write(c.printChunkInfo())
outfile.write("\n");
def loadMetaChunkToServerMap (fileName):
"""Read metaserver chunkmap.txt and build gChunkMap hash"""
if not os.path.exists(fileName):
print "File ", fileName, " does not exists"
sys.exit(1)
infile = open (fileName, "r")
count = 0
while infile:
count = count + 1
line = infile.readline()
if not line:
break
print "DEBUGME : processing line %s, %d" % (line, count)
lineParts = line.split(' ')
gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])
# Add a ChunkHostInfo
numServers = int(lineParts[2])
for i in range(numServers):
i = i * 3
gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))
def processUpNodes(nodes):
"""Helper function to process live chunk server nodes"""
global gUpServers
servers = nodes.split('\t')
gUpServers = [UpServer(c) for c in servers if c != '']
def dumpChunkMap(chunkServer):
"""Helper function to send DUMP_CHUNKMAP RPC to chunkServer and read
the output sent over socket"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((chunkServer.node, chunkServer.port))
req = "DUMP_CHUNKMAP\r\nVersion: KFS/1.0\r\nCseq: 1\r\n\r\n"
sock.send(req)
sockIn = sock.makefile('r')
contentLength = 0
seenLength = 0
for line in sockIn:
if line.find('OK') == 0:
continue
if line.find('Cseq') == 0:
continue
if line.find('Status') == 0:
continue
if line.find('\r\n') == 0:
continue
if line.find('Content-length') == 0:
line = line.rstrip("\n")
lineParts = line.split(' ')
contentLength = int(lineParts[1])
else:
seenLength = seenLength + len(line)
line = line.rstrip("\n")
lineParts = line.split(' ')
if gChunkMap.has_key(lineParts[0]):
gChunkMap[lineParts[0]].updateChunkSize(lineParts[2], chunkServer.node, str(chunkServer.port))
if (seenLength == contentLength):
break
def ping(metaServer):
"""Helper function to send PING to meta server and populate list of
live chunk server list"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((metaServer.node, metaServer.port))
req = "PING\r\nVersion: KFS/1.0\r\nCseq: 1\r\n\r\n"
sock.send(req)
sockIn = sock.makefile('r')
for line in sockIn:
if line.find('Down Servers:') == 0:
if (len(line.split(':')) < 2):
break
pos = line.find(':')
downNodes = line[pos+1:].strip()
break
if line.find('Servers:') != 0:
continue
nodes = line.split(':')[1].strip()
processUpNodes(nodes)
gUpServers.sort()
sock.close()
def dumpMetaServerChunkMap(metaServer, dumpMetaFile, defaultMetaFile, defaultCheckPoint):
"""Helper function to send DUMP_METASERVERCHUNKMAP to meta server
and populate list of live chunk server list"""
# Get latest checkpoint file
# Gzip latest file and copy it locally
print "Compressing latest checkpoint %s on %s" % (defaultCheckPoint, metaServer.node)
if not os.path.exists("./checkpointdir"):
command = "mkdir ./checkpointdir"
os.system(command)
command = "ssh -o StrictHostKeyChecking=no %s gzip -c %s > ./checkpointdir/latest.gz" % (metaServer.node, defaultCheckPoint)
os.system(command)
#print "Copying latest checkpoint file %s.gz" % defaultCheckPoint
#command = "scp -o StrictHostKeyChecking=no %s:%s.gz ./checkpointdir" % (metaServer.node, defaultCheckPoint)
#os.system(command)
print "Uncompressing latest checkpoint ./checkpointdir/latest.gz"
command = "gunzip -f ./checkpointdir/latest.gz"
os.system(command)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((metaServer.node, metaServer.port))
req = "DUMP_CHUNKTOSERVERMAP\r\nVersion: KFS/1.0\r\nCseq: 1\r\n\r\n"
sock.send(req)
sockIn = sock.makefile('r')
for line in sockIn:
if line.find('OK') == 0:
continue
if line.find('Cseq') == 0:
continue
if line.find('Status') == 0:
continue
if line.find('\r\n') == 0:
break
sock.close()
# Gzip the file and scp over to dumMetaFile.gz and extract it
print "Compressing chunk map dump %s on %s" % (defaultMetaFile, metaServer.node)
command = "ssh -o StrictHostKeyChecking=no %s gzip -f %s" % (metaServer.node, defaultMetaFile)
os.system(command)
print "Copying chunk map dump %s.gz to %s.gz" % (defaultMetaFile, dumpMetaFile)
command = "scp -o StrictHostKeyChecking=no %s:%s.gz %s.gz" % (metaServer.node, defaultMetaFile, dumpMetaFile)
os.system(command)
print "Uncompressing chunk map dump %s.gz" % (dumpMetaFile)
command = "gunzip -f %s.gz" % dumpMetaFile
os.system(command)
print "Creating symlink chunkmap.txt to %s" % (dumpMetaFile)
command = "rm chunkmap.txt"
os.system(command)
command = "ln -s %s chunkmap.txt" % (dumpMetaFile)
os.system(command)
def usage():
print "Usage : ./kfsfsck --file machines.cfg [--machines machines] [--verbose] [--replicacheck --builddir builddir --networkdef networkdef [--checksize] [--lostfound] [--delete]]\n"
print "Example : ./kfsfsck -f machines.cfg"
print " Would ask metaserver to dump chunk map, get it locally "
print " and does basic replica checking displaying stats about "
print " Chunks"
print " : ./kfsfsck -f machines.cfg -s"
print " Would ping chunk servers to get chunk sizes and fill it "
print " in output file ./chunkListOutFile"
print " : ./kfsfsck -f machines.cfg -r -b ../build -n network.df"
print " Would also run replicachecker, which builds metaserver "
print " map and does replica verification. Optionally we could"
print " also move or delete files with missing blocks."
print " "
print " -f : kfs cluster config file"
print " -m : chunk server machine list"
print " -r : invoke replicachecker"
print " -b : build dir "
print " -n : network definition file"
print " -s : Checks replica sizes. This operation is very slow "
print " as it pings each chunk server to get replica sizes"
print " -l : move files with missing blocks to /lost+found "
print " -d : delete files with missing blocks"
print " -v : Verbose mode prints info about replicas on same rack"
def readChunkserversFile(machinesFn):
'''Given a list of chunkserver node names, one per line, construct a config
for each chunkserver and add that to the config based on the defaults'''
global config
defaultChunkOptions = config.options("chunkserver_defaults")
for l in open(machinesFn, 'r'):
line = l.strip()
if (line.startswith('#')):
# ignore commented out node names
continue
section_name = "chunkserver_" + line
config.add_section(section_name)
config.set(section_name, "node", line)
for o in defaultChunkOptions:
config.set(section_name, o, config.get("chunkserver_defaults", o))
config.remove_section("chunkserver_defaults")
def updateChunkServerInfo(host, port, config):
'''Given host, port read the the config to get list of all chunkDir
directories. Do and ls to collect chunkID <-> chunkSize mapping on host'''
sections = config.sections()
for s in sections:
if (s == 'metaserver'):
continue
node = config.get(s, 'node')
node = socket.gethostbyname(node)
if (node != host):
continue
chunkDir = config.get(s, 'chunkDir')
chunkDirs = chunkDir.split(' ')
for dir in chunkDirs:
command = "ssh %s ls -l %s" % (node, dir)
for line in os.popen(command).readlines():
line = line.rstrip('\n');
lineParts = line.split()
if (len(lineParts) == 9):
if ("lost+found" == lineParts[8]):
continue
chunkSize = lineParts[4]
chunkIDParts = lineParts[8].split('.')
chunkID = chunkIDParts[1]
if gChunkMap.has_key(chunkID):
gChunkMap[chunkID].updateChunkSize(chunkSize, host, str(port))
def fastFsck(dumpMetaFile, mytime, verbose):
'''Execute fast fsck. This just checks consistancy of chunks by looking
at chunkmap.txt dump from metaserver'''
infile = open (dumpMetaFile, "r")
numChunks = 0
num2Copies = 0
num3Copies = 0
numUnderReplicated = 0
numOverReplicated = 0
numMissing = 0
status = "HEALTHY"
print "***********************************************";
while infile:
line = infile.readline()
if not line:
break
line = line.rstrip('\n')
lineParts = line.split()
cID = int(lineParts[0])
fileID = int(lineParts[1])
numReplicas = int(lineParts[2])
replicasInfo = lineParts[3:]
replicasSize = (len(replicasInfo)) / 3
numChunks = numChunks + 1
if (replicasSize == 0):
print "Chunk %d missing" % cID
numMissing = numMissing + 1
elif (replicasSize < 3):
print "Chunk %d under replicated having %d copies" % (cID, replicasSize)
numUnderReplicated = numUnderReplicated + 1
elif (replicasSize > 3):
if (verbose):
print "Chunk %d over replicated having %d copies" % (cID, replicasSize)
numOverReplicated = numOverReplicated + 1
elif ((getRack(replicasInfo[0])) == (getRack(replicasInfo[3])) == (getRack(replicasInfo[6]))):
print "Chunk %d has 3 copies on same rack %s, %s and %s" % (cID, replicasInfo[0], replicasInfo[3], replicasInfo[6])
num3Copies = num3Copies + 1
else:
for i in range(1, replicasSize):
if (getRack(replicasInfo[i*3]) == getRack(replicasInfo[(i-1)*3])):
if (verbose):
print "Chunk %d has 2 copies on same rack %s and %s" % (cID, replicasInfo[i*3], replicasInfo[(i-1)*3])
num2Copies = num2Copies + 1
if numMissing:
status = "CORRUPT"
# Print Summary
print "***********************************************"
print " KFS Summary (%s)" % mytime
print "***********************************************"
print " Num Chunks : %d" % numChunks
print " Num Missing Chunks : %d" % numMissing
print " Num UnderReplicated Chunks : %d" % numUnderReplicated
print " Num OverReplicated Chunks : %d" % numOverReplicated
print " Num 2 Replicas on same Rack : %d" % num2Copies
print " Num 3 Replicas on same Rack : %d" % num3Copies
print " Status : %s" % status
print "***********************************************"
def getRack(ipAddress):
ipParts = ipAddress.split('.')
return ipParts[2]
def dumpChunkServerInfo(host, port, config, chunkListTempFile):
'''Given host, port read the the config to get list of all chunkDir
directories. Do and ls to collect chunkID <-> chunkSize mapping on host'''
sections = config.sections()
for s in sections:
if (s == 'metaserver'):
continue
node = config.get(s, 'node')
node = socket.gethostbyname(node)
if (node != host):
continue
else:
chunkDir = config.get(s, 'chunkDir')
chunkDirs = chunkDir.split(' ')
for dir in chunkDirs:
command = "ssh %s ls -l %s" % (node, dir)
for line in os.popen(command).readlines():
line = line.rstrip('\n')
lineParts = line.split()
if (len(lineParts) == 9):
if ("lost+found" == lineParts[8]):
continue
chunkSize = lineParts[4]
chunkIDParts = lineParts[8].split('.')
chunkID = chunkIDParts[1]
fileID = chunkIDParts[0]
chunkListTempFile.write("%s %s %s %s %s\n" % (chunkID, fileID, chunkSize, host, port))
break
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "f:vm:srb:n:ldh", ["file=", "verbose", "machines=", "checksize", "replicacheck", "builddir=", "networkdef=","lostfound","delete", "help"])
fileName = ""
now = time.localtime(time.time())
mytime = time.strftime("%y-%m-%d-%H-%M", now)
dumpMetaFile = "./chunkmap.txt"+mytime
outFile = "./chunkListOutFile"
missingBlocksFile = "./file_missing_blocks.txt"
machinesFile = ""
metaServerHost = ""
metaServerPort = 0
metaRunDir = ""
replicaCheckFlag = 0
buildDir = ""
kfsCpDir = "./checkpointdir"
networkDefFile = ""
emptyChunkSize = 1
fast = 0
verbose = 0
lostFound = 0
delete = 0
if not opts:
usage()
print "No options specified"
sys.exit(1)
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
fileName = a
elif o in ("-v", "--verbose"):
verbose = 1
elif o in ("-m", "--machines"):
machinesFile = a
elif o in ("-s", "--checksize"):
emptyChunkSize = 0
elif o in ("-r", "--replicacheck"):
replicaCheckFlag = 1
elif o in ("-b", "--builddir"):
buildDir = a
elif o in ("-n", "--networkdef"):
networkDefFile = a
elif o in ("-l", "--lostfound"):
lostFound = 1
elif o in ("-d", "--delete"):
delete = 1
if not os.path.exists(fileName):
print "Config file %s : doesn't exist\n" % fileName
sys.exit(1)
config = ConfigParser()
config.readfp(open(fileName, 'r'))
if machinesFile != "":
readChunkserversFile(machinesFile)
if (replicaCheckFlag == 1) and ((buildDir == "") or \
(networkDefFile == "")):
usage()
print "Missing Replica Checker options"
sys.exit(1)
if ((lostFound == 1) and (delete == 1)):
usage()
print "Please specify either --lostfound or --delete not both"
sys.exit(1)
sections = config.sections()
for s in sections:
if (s == 'metaserver'):
metaServerHost = config.get(s, 'node')
metaServerPort = int(config.get(s, 'baseport'))
metaRunDir = config.get(s, 'rundir')
# MetaServerLocation. For now we assume we run in on MetaServerHost
metaServer = ServerLocation(node=metaServerHost, port=metaServerPort)
# Download meta server chunk dump
defaultMetaFile = metaRunDir + "/chunkmap.txt"
defaultCheckPoint = metaRunDir + "/bin/kfscp/latest"
print "Begin ChunkServerMap dump to %s on %s" % (defaultMetaFile, metaServerHost)
dumpMetaServerChunkMap(metaServer, dumpMetaFile, defaultMetaFile, defaultCheckPoint)
print "End ChunkServerMap dump to %s on %s" % (defaultMetaFile, metaServerHost)
if (replicaCheckFlag == 0):
fast = 1
if (fast == 1):
# Check fast fsck by looking at chunk map dump.
# Do not ping chunkservers or invoke replicachecker
print "Executing fast fsck parsing %s" % dumpMetaFile
fastFsck(dumpMetaFile, mytime, verbose)
sys.exit(0)
# ping to get list of Upservers
ping(metaServer)
print "Done pinging metaserver"
# Log details about chunk servers to a file
command = "rm ./chunkListTempFile";
os.system(command)
chunkListTempFile = open ("./chunkListTempFile", "w")
if (emptyChunkSize == 0):
# We ping chunk servers only if we need chunk size info
# For each upServer, collect chunkID->chunkSize and update gChunkMap
for upServer in gUpServers:
print "Listing chunk server %s, %s" % (upServer.host, upServer.port)
dumpChunkServerInfo(upServer.host, upServer.port, config, chunkListTempFile)
chunkListTempFile.close()
# Sort file and merge them
command = "sort -n -T . ./chunkListTempFile > ./chunkListTempFile.sort";
os.system(command)
command = "mv ./chunkListTempFile.sort ./chunkListTempFile";
os.system(command)
# Save final output to file
mergeAndSaveFile(dumpMetaFile, './chunkListTempFile', outFile)
print "Generated chunk map file : %s" % outFile
# If replicaCheckFlag is set, run replica checker using outFile
if (replicaCheckFlag == 1):
print "Running replica checker"
replicaChecker = buildDir + "/bin/emulator/replicachecker"
#command = "%s -c ./checkpointdir/ \
# -n ~/work/kfs/networkdef \
# -b %s" % (replicaChecker, outFile)
command = "%s -c %s -n %s -b %s" % (replicaChecker, kfsCpDir, networkDefFile, outFile)
if (emptyChunkSize == 0):
command = command + " -s "
if (verbose == 1):
command = command + " -v "
print command+"\n"
for line in os.popen(command).readlines():
print line.rstrip('\n')
# If --lostfound option specified, move files with missing blocks to
# /lost+found
# If --delete option specified, move files with missing blocks
if ((lostFound == 1) or (delete == 1)):
if not os.path.exists(missingBlocksFile):
sys.exit(0)
missingBlocksFiles = open(missingBlocksFile, "r")
kfsShell = buildDir + "/bin/tools/kfsshell"
while missingBlocksFiles:
line = missingBlocksFiles.readline()
if not line:
break
line = line.rstrip('\n')
if (lostFound == 1):
print "Moving %s to /lost+found%s" % (line, line)
command = "%s -s %s -p %s -q mkdir /lost+found" % (kfsShell, metaServer.node, metaServer.port)
os.system(command)
command = "%s -s %s -p %s -q mv %s /lost+found%s" % (kfsShell, metaServer.node, metaServer.port, line, line)
os.system(command)
elif (delete == 1):
print "Deleting %s" % (line)
command = "%s -s %s -p %s -q rm %s" % (kfsShell, metaServer.node, metaServer.port, line)
os.system(command)
sys.exit(0)
|
|
# Django settings for ancfindersite project.
import sys
sys.path.append("lib")
import os.path
RECAPTCHA_PUBLIC_KEY = '6LeYAO8SAAAAALEZqtnk4qm7hoh8Iwv_h4lZ3lSe'
RECAPTCHA_PRIVATE_KEY = '6LeYAO8SAAAAAICslEpPIpmMmkFiuNs_hrAzSRxx'
environment_file = '/home/ancfinder/environment.yaml'
if not os.path.exists(environment_file):
# Settings for local (not public) deployments.
print "Running a local deployment..."
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# For a simple setup when debugging, we'll hard-code these values.
SECRET_KEY = '7^^6oohvb%oc3$&4z^#vplkp(!@dy24nm$d6a2g9^w#imqpme8'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'database.sqlite').replace('\\','/'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ["*"]
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../static') + '/'
else:
# Settings for a public deployment.
DEBUG = False
TEMPLATE_DEBUG = False
import yaml
with open(environment_file) as f:
env = yaml.load(f)
SECRET_KEY = env['DJANGO_SECRET_KEY']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': env['DATABASE_HOST'],
'PORT': int(env['DATABASE_PORT']),
'NAME': env['DATABASE_NAME'],
'USER': env['DATABASE_USERNAME'],
'PASSWORD': env['DATABASE_PASSWORD'],
}
}
ADMINS = env['ADMINS']
MANAGERS = ADMINS
EMAIL_HOST = env['SMTP_HOST']
EMAIL_HOST_USER = env['SMTP_USER']
EMAIL_HOST_PASSWORD = env['SMTP_PASSWORD']
EMAIL_USE_TLS = True
ALLOWED_HOSTS = ["*"] # anything unexpected will be filtered out by the http server
OPENID_TEMP_FOLDER = "/tmp/openid-ancfinder"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = env["STATIC_ROOT"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (os.path.join(os.path.dirname(__file__), 'static'),
#'/static/',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ancfindersite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'ancfindersite.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
'django.core.context_processors.request',
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'ancfindersite.views.TemplateContextProcessor',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'bootstrapform',
'tinymce',
'registration',
'emailverification',
'ancfindersite',
'annotator',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
APP_NICE_SHORT_NAME = "ANCFinder.org"
SERVER_EMAIL = "ANCFinder.org <[email protected]>" # From: address on verification emails
REGISTRATION_ASK_USERNAME = True
SITE_ROOT_URL = "http://www.ancfinder.org"
|
|
# coding=utf-8
"""
Persistence.
"""
# Copyright (c) 2015 Stefan Braun
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and
# associated documentation files (the "Software"), to deal in the Software
# without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to
# whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
from tkinter import messagebox
import postgresql.driver.dbapi20 as dbapi
from postgresql.exceptions import UniqueError
from .cache import LRUCache
from .config import get_configuration
from .group import Group
from .picture import Picture
from .tag import Tag
_TAG_CACHE = LRUCache(get_configuration('cache.tags', 1000))
_PICTURE_CACHE = LRUCache(get_configuration('cache.pictures', 20000))
_GROUP_CACHE = LRUCache(get_configuration('cache.groups', 1000))
# This module global variable will hold the Persistence instance.
_DB = None
class UnknownEntityException(Exception):
"""Raised if requested entity does not exist."""
class DuplicateException(Exception):
"""Raised if duplicate objects shall be persisted."""
def __init__(self, duplicate, caused_by=None):
"""Initialize exception.
:param duplicate: duplicate item.
:param caused_by: the exception causing this one.
"""
super().__init__(self)
self.duplicate = duplicate
self.caused_by = caused_by
class DBParameters:
"""Parameters describing database."""
def __init__(self, db_name, db_user, db_passwd, db_port):
self.name = db_name
self.user = db_user
self.passwd = db_passwd
self.port = db_port
@classmethod
def from_configuration(cls):
"""Create parameter instance based on configuration."""
name = get_configuration('db_name')
user = get_configuration('db_user')
passwd = get_configuration('db_passwd')
port = get_configuration('db_port')
return DBParameters(name, user, passwd, port)
def db_params():
"""Provide database parameters.
:return: parameters.
:rtype: DBParameters
"""
if _DB is None:
return None
return _DB.db_params
def create_db(db_=None):
"""Set the database to use.
:param db_: parameters or None to use configuration.
:type db_: DBParameters
:return: persistence instance.
:rtype: Persistence
"""
global _DB
if db_ is None:
db_ = DBParameters.from_configuration()
_DB = Persistence(db_)
def get_db():
"""Get connected persistence instance."""
if _DB is None:
# use configured database.
create_db()
return _DB
class Persistence:
"""Implementation of persistence."""
def __init__(self, db_parameters):
"""Initialize persistence mechanism.
:param db_parameters: database parameters.
:type db_parameters: DBParameters
"""
self.logger = logging.getLogger('picdb.db')
self.db_params = db_parameters
self.conn = None
self.connect()
def connect(self):
"""Connect to database."""
self.logger.debug('connecting to database ...')
self.conn = dbapi.connect(user=self.db_params.user,
database=self.db_params.name,
port=self.db_params.port,
password=self.db_params.passwd)
def close(self):
"""Close database."""
self.conn.close()
self.logger.debug('database connection closed.')
def execute_sql(self, stmt_, *args):
"""Execute the given SQL statement with arguments."""
try:
stmt = self.conn.prepare(stmt_)
try:
stmt(*args)
self.conn.commit()
return True
except UniqueError as uq_err:
self.conn.rollback()
self.logger.debug('duplicate: %s', stmt)
raise uq_err
except Exception as exc: # noqa
self.conn.rollback()
messagebox.showerror(title='Database Error',
message='{}'.format(exc))
return False
# -------- group related
def add_group(self, group):
"""Add a new group.
:param group: the group to add
:type group: Group
"""
self.logger.debug("Add group to DB: %s", group.name)
stmt = '''INSERT INTO groups (identifier, description, parent)
VALUES ($1, $2, $3)'''
parent = group.parent.key if group.parent is not None else None
try:
self.execute_sql(stmt, group.name, group.description, parent)
except UniqueError as uq_err:
raise DuplicateException(group, uq_err)
def update_group(self, series):
"""Update group record."""
self.logger.debug("Update series: %s", series.name)
stmt = "UPDATE groups SET identifier=$1, description=$2, " \
"parent=$3 " \
"WHERE id=$4"
self.execute_sql(stmt, series.name,
series.description,
series.parent.key if series.parent is not None else
None,
series.key)
def delete_group(self, group_):
"""Delete group and picture assignments."""
stmt_pics = """DELETE FROM picture2group WHERE "group"=$1"""
stmt_grp = "DELETE FROM groups WHERE id=$1"
self.execute_sql(stmt_pics, group_.key)
self.execute_sql(stmt_grp, group_.key)
def add_picture_to_group(self, picture, group_):
"""Add picture to a group.
:param picture: the picture
:type picture: Picture
:param group_: the group
:type group_: Group
"""
self.logger.debug(
"Adding picture %s to group_ %s.", str(picture), str(group_))
stmt = '''INSERT INTO picture2group VALUES($1, $2)'''
self.execute_sql(stmt, picture.key, group_.key)
def remove_picture_from_group(self, picture, group):
"""Remove picture from a series.
:param picture: the picture
:type picture: Picture
:param group: the group
:type group: Group
"""
self.logger.debug(
"Removing picture %s from series %s.", str(picture), str(group))
stmt = '''DELETE FROM picture2group WHERE picture=$1 AND "group"=$2'''
self.execute_sql(stmt, picture.key, group.key)
def retrieve_group_by_key(self, key):
"""Retrieve series by key.
:param key: the id of the series
:type key: int
:return: group.
:rtype: Group
"""
if key in _GROUP_CACHE:
return _GROUP_CACHE.get(key)
self.logger.debug("retrieve_group_by_key(%s)", str(key))
stmt = 'SELECT id, identifier, description, parent ' \
'FROM groups WHERE "id"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(key)
if result is None:
return None
row = result[0]
return self._create_group(*(list(row)))
def retrieve_groups_by_name(self, name):
"""Retrieve groups by name.
:param name: the name of the group
:type name: str
:return: groups.
:rtype: [Group]
"""
self.logger.debug("retrieve_groups_by_name(%s)", name)
stmt = 'SELECT id, identifier, description, parent ' \
'FROM groups WHERE "identifier"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(name)
records = [self._create_group(*row) for row in result]
return list(records)
def retrieve_groups_by_name_segment(self, name):
"""Retrieve groups by name segment using wildcards.
Example: name: 'a%'
:param name: the name of the series
:type name: str
:return: groups.
:rtype: [Group]
"""
self.logger.debug("retrieve_groups_by_name_segment(%s)", name)
stmt = 'SELECT id, identifier, description, parent ' \
'FROM groups WHERE "identifier"LIKE $1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(name)
records = [self._create_group(*row) for row in result]
return list(records)
def retrieve_all_groups(self):
"""Get all groups from database.
:return: groups.
:rtype: [Group]
"""
self.logger.debug("retrieve_all_groups()")
stmt = 'SELECT id, identifier, description, parent FROM groups'
stmt_ = self.conn.prepare(stmt)
result = stmt_()
records = [self._create_group(*row) for row in result]
return list(records)
def retrieve_pictures_for_group(self, group_):
"""Retrieve pictures assigned to given group.
:param group_: given group.
:type group_: Group
:return: pictures assigned to group
:rtype: [Picture]
"""
self.logger.debug("retrieve_pictures_for_group(%s)", str(group_))
stmt = 'SELECT id, identifier, path, description FROM pictures ' \
'WHERE id IN (SELECT ' \
'picture FROM picture2group WHERE "group"=$1)'
stmt_ = self.conn.prepare(stmt)
result = stmt_(group_.key)
records = [self._create_picture(*row) for row in result]
return list(records)
def retrieve_groups_for_picture(self, picture):
"""Retrieve all groups for given picture.
:param picture: the id of the picture
:return: groups.
:rtype: [Group]
"""
self.logger.debug("retrieve_groups_for_picture(%s)", str(picture))
stmt = 'SELECT id, identifier, description, parent FROM groups ' \
'WHERE id IN (SELECT ' \
'"group" FROM picture2group WHERE picture=$1)'
stmt_ = self.conn.prepare(stmt)
result = stmt_(picture.key)
records = [self._create_group(*row) for row in result]
return list(records)
def number_of_groups(self):
"""Provide number of groups currently in database."""
self.logger.debug("number_of_groups()")
stmt = 'SELECT count(*) FROM groups'
return self.conn.query.first(stmt)
def _create_group(self, key, identifier, description, parent_id):
"""Create a Group instance from raw database record info.
Creates parent object if required.
"""
try:
return _GROUP_CACHE.get(key)
except KeyError:
self.logger.debug(
"_create_group(%s, %s, ...)", str(key), identifier)
if parent_id is not None:
parent = self.retrieve_group_by_key(parent_id)
else:
parent = None
group = Group(key, identifier, description, parent=parent)
pictures = self.retrieve_pictures_for_group(group)
group.pictures = pictures
_GROUP_CACHE.put(key, group)
return group
# ------ picture related
def add_picture(self, picture):
"""Add a new picture.
:param picture: picture to add
:type picture: Picture
"""
self.logger.debug("add_picture(%s)", str(picture))
stmt = "INSERT INTO pictures (identifier, path, description) VALUES " \
"($1, $2, $3)"
try:
self.execute_sql(stmt, picture.name,
picture.path, picture.description)
except UniqueError as uq_err:
raise DuplicateException(picture, uq_err)
def update_picture(self, picture):
"""Update picture record."""
self.logger.debug("update_picture(%s)", str(picture))
stmt = "UPDATE pictures SET identifier=$1, path=$2, " \
"description=$3 WHERE id=$4"
self.execute_sql(stmt, picture.name,
picture.path,
picture.description,
picture.key)
def delete_picture(self, picture):
"""Delete given picture. Does also remove tag assignments."""
self.logger.debug("delete_picture(%s)", str(picture))
stmt_tags = "DELETE FROM picture2tag WHERE picture=$1"
stmt_pic = "DELETE FROM pictures WHERE id=$1"
self.execute_sql(stmt_tags, picture.key)
self.execute_sql(stmt_pic, picture.key)
def add_tag_to_picture(self, picture, tag):
"""Add tag to a picture.
:param picture: the picture
:type picture: Picture
:param tag: the tag
:type tag: Tag
"""
self.logger.debug(
"add_tag_to_picture(%s, %s)", repr(picture), repr(tag))
stmt = '''INSERT INTO picture2tag VALUES($1, $2)'''
self.execute_sql(stmt, picture.key, tag.key)
def remove_tag_from_picture(self, picture, tag):
"""Remove tag from given picture.
:param picture: the picture
:type picture: Picture
:param tag: the tag
:type tag: Tag
"""
self.logger.debug(
"remove_tag_from_picture(%s, %s)", repr(picture), repr(tag))
stmt = '''DELETE FROM picture2tag WHERE picture=$1 AND tag=$2'''
self.execute_sql(stmt, picture.key, tag.key)
def retrieve_picture_by_key(self, key):
"""Retrieve picture by key.
:param key: the id of the picture
:type key: int
:return: picture.
:rtype: Picture
"""
if key in _PICTURE_CACHE:
return _PICTURE_CACHE.get(key)
self.logger.debug("retrieve_picture_by_key(%s)", repr(key))
stmt = 'SELECT id, identifier, path, description ' \
'FROM pictures WHERE "id"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(key)
if result is None:
return None
row = result[0]
return self._create_picture(*(list(row)))
def retrieve_picture_by_path(self, path):
"""Retrieve picture by path.
:param path: the path to the picture
:type path: str
:return: picture.
:rtype: Picture
"""
self.logger.debug('retrieve_picture_by_path(%s)', path)
stmt = 'SELECT id, identifier, path, description ' \
'FROM pictures WHERE "path"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(path)
if result is None:
return None
row = result[0]
return self._create_picture(*(list(row)))
def retrieve_filtered_pictures(self, path, limit, groups, tags):
"""Retrieve picture by path segment using wildcards.
Example: Path: '%jpg'
:param path: the path to the picture
:type path: str
:param limit: maximum number of records to retrieve
:type limit: int
:param groups: limit result set based on given list of groups
:type groups: [Group]
:param tags: limit result set based on given list of tags
:type tags: [Tag]
:return: pictures matching given path.
:rtype: [Picture]
"""
self.logger.debug(
"retrieve_filtered_pictures(%s, %s, ...)", path, str(limit))
stmt_p = 'SELECT DISTINCT id, identifier, path, description ' \
'FROM pictures WHERE ' \
'"path" LIKE $1'
stmt_s = 'SELECT DISTINCT id, identifier, path, description ' \
'FROM pictures, picture2group WHERE ' \
'pictures.id=picture2group.picture AND ' \
'picture2group.group={}'
stmt_t = 'SELECT DISTINCT id, identifier, path, description ' \
'FROM pictures, picture2tag WHERE ' \
'pictures.id=picture2tag.picture AND picture2tag.tag={}'
stmt = stmt_p
for item in groups:
stmt += ' INTERSECT ' + stmt_s.format(str(item.key))
for item in tags:
stmt += ' INTERSECT ' + stmt_t.format(str(item.key))
if limit is not None:
stmt += ' LIMIT {}'.format(limit)
self.logger.debug(stmt)
stmt_ = self.conn.prepare(stmt)
result = stmt_(path)
records = [self._create_picture(*row) for row in result]
records.sort()
return list(records)
def retrieve_tags_for_picture(self, picture):
"""Retrieve all tags for given picture.
:param picture: the picture to get the tags for
:type picture: Picture
:return: tags.
:rtype: [Tag]
"""
self.logger.debug("retrieve_tags_for_picture(%s)", repr(picture))
stmt = 'SELECT id, identifier, description, parent ' \
'FROM tags WHERE id IN (SELECT tag ' \
'FROM picture2tag WHERE picture=$1)'
stmt_ = self.conn.prepare(stmt)
result = stmt_(picture.key)
records = [self._create_tag(*row) for row in result]
return list(records)
def retrieve_pictures_by_tag(self, tag_):
"""Retrieve pictures which have tag assigned.
:param tag_: pictures shall have assigned this tag
:type tag_: Tag
:return: pictures with tag
:rtype: [Picture]
"""
self.logger.debug("retrieve_pictures_by_tag(%s)", repr(tag_))
stmt = 'SELECT id, identifier, path, description FROM pictures ' \
'WHERE id IN (SELECT ' \
'picture FROM picture2tag WHERE tag=$1)'
stmt_ = self.conn.prepare(stmt)
result = stmt_(tag_.key)
records = [self._create_picture(*row) for row in result]
return list(records)
def number_of_pictures(self):
"""Provide number of pictures currently in database."""
self.logger.debug('number_of_pictures()')
stmt = 'SELECT count(*) FROM pictures'
return self.conn.query.first(stmt)
def _create_picture(self, key, identifier, path, description):
"""Create a Picture instance from raw database record info.
Creates parent object if required.
"""
try:
return _PICTURE_CACHE.get(key)
except KeyError:
self.logger.debug(
"_create_picture(%s, %s, ...)", str(key), identifier)
picture = Picture(key, identifier, path, description)
tags = self.retrieve_tags_for_picture(picture)
picture.tags = tags
_PICTURE_CACHE.put(key, picture)
return picture
# ------ tag related
def add_tag(self, tag):
"""Add a new tag.
:param tag: tag to add
:type tag: Tag
"""
self.logger.debug("add_tag(%s)", repr(tag))
stmt = "INSERT INTO tags(identifier, description, parent) VALUES (" \
"$1, $2, $3)"
parent = tag.parent.key if tag.parent is not None else None
try:
self.execute_sql(stmt, tag.name, tag.description, parent)
except UniqueError as uq_err:
raise DuplicateException(tag, uq_err)
def update_tag(self, tag):
"""Update tag record."""
self.logger.debug("update_tag(%s)", repr(tag))
stmt = "UPDATE tags SET identifier=$1, description=$2, parent=$3 " \
"WHERE id=$4"
self.execute_sql(stmt, tag.name,
tag.description,
tag.parent.key if tag.parent is not None
else None,
tag.key)
def delete_tag(self, tag_):
"""Delete given tag and all its assignments."""
self.logger.debug("delete_tag(%s)", repr(tag_))
stmt = "DELETE FROM tags WHERE id=$1"
self.execute_sql(stmt, tag_.key)
def number_of_tags(self):
"""Provide number of tags currently in database."""
self.logger.debug("number_of_tags()")
stmt = 'SELECT count(*) FROM tags'
return self.conn.query.first(stmt)
def retrieve_all_tags(self):
"""Get all tags from database.
:return: tags.
:rtype: [Tag]
"""
self.logger.debug("retrieve_all_tags()")
stmt = 'SELECT id, identifier, description, parent FROM tags'
stmt_ = self.conn.prepare(stmt)
records = [self._create_tag(*row) for row in stmt_()]
return list(records)
def retrieve_tag_by_name(self, name):
"""Retrieve tag by name.
:param name: the name of the tag to retrieve
:type name: str
:return: tag or None if name is unknown.
:rtype: Tag
"""
self.logger.debug("retrieve_tag_by_name(%s)", name)
stmt = 'SELECT id, identifier, description, parent ' \
'FROM tags WHERE "identifier"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(name)
if result is None:
return None
return self._create_tag(*(list(result[0])))
def retrieve_tags_by_name_segment(self, name):
"""Retrieve tags by name segment using wildcards.
Example: name: 'a%'
:param name: the name of the tag
:type name: str
:return: tags.
:rtype: [Tag]
"""
self.logger.debug("retrieve_tags_by_name_segment(%s)", name)
stmt = 'SELECT id, identifier, description, parent ' \
'FROM tags WHERE "identifier"LIKE $1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(name)
records = [self._create_tag(*row) for row in result]
return list(records)
def retrieve_tag_by_key(self, key):
"""Retrieve tag by key.
:param key: the id of the tag
:type key: int
:return: tag.
:rtype: Tag
"""
if key in _TAG_CACHE:
return _TAG_CACHE.get(key)
self.logger.debug("retrieve_tag_by_key(%s)", str(key))
stmt = 'SELECT id, identifier, description, parent FROM tags WHERE ' \
'"id"=$1'
stmt_ = self.conn.prepare(stmt)
result = stmt_(key)
if result is None:
return None
row = result[0]
return self._create_tag(*(list(row)))
def _create_tag(self, key, identifier, description, parent_id):
"""Create a Tag instance from raw database record info.
Creates parent object if required.
"""
try:
return _TAG_CACHE.get(key)
except KeyError:
self.logger.debug(
"_create_tag(%s, %s), ...", str(key), identifier)
if parent_id is not None:
parent = self.retrieve_tag_by_key(parent_id)
else:
parent = None
tag = Tag(key, identifier, description, parent=parent)
_TAG_CACHE.put(key, tag)
return tag
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras timeseries dataset utilities."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.preprocessing.timeseries_dataset_from_array', v1=[])
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Arguments:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`. `targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass None if you don't have target data (in this case the dataset will
only yield the input data).
sequence_length: Length of the output sequences (in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for create a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A tf.data.Dataset instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 99]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 3 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 99).
Example 2: temporal regression. Consider an array `data` of scalar
values, of shape `(steps,)`. To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data[:-10]
targets = data[10:]
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
input_data, targets, sequence_length=10)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
assert np.array_equal(targets[0], data[10]) # Corresponding target: step 10
break
```
"""
# Validate the shape of data and targets
if targets is not None and len(targets) != len(data):
raise ValueError('Expected data and targets to have the same number of '
'time steps (axis 0) but got '
'shape(data) = %s; shape(targets) = %s.' %
(data.shape, targets.shape))
if start_index and (start_index < 0 or start_index >= len(data)):
raise ValueError('start_index must be higher than 0 and lower than the '
'length of the data. Got: start_index=%s '
'for data of length %s.' % (start_index, len(data)))
if end_index:
if start_index and end_index <= start_index:
raise ValueError('end_index must be higher than start_index. Got: '
'start_index=%s, end_index=%s.' %
(start_index, end_index))
if end_index >= len(data):
raise ValueError('end_index must be lower than the length of the data. '
'Got: end_index=%s' % (end_index,))
if end_index <= 0:
raise ValueError('end_index must be higher than 0. '
'Got: end_index=%s' % (end_index,))
# Validate strides
if sampling_rate <= 0 or sampling_rate >= len(data):
raise ValueError(
'sampling_rate must be higher than 0 and lower than '
'the length of the data. Got: '
'sampling_rate=%s for data of length %s.' % (sampling_rate, len(data)))
if sequence_stride <= 0 or sequence_stride >= len(data):
raise ValueError(
'sequence_stride must be higher than 0 and lower than '
'the length of the data. Got: sequence_stride=%s '
'for data of length %s.' % (sequence_stride, len(data)))
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory usage).
num_seqs = end_index - start_index - (sequence_length * sampling_rate) + 1
if num_seqs < 2147483647:
index_dtype = 'int32'
else:
index_dtype = 'int64'
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = math_ops.cast(sequence_length, dtype=index_dtype)
sampling_rate = math_ops.cast(sampling_rate, dtype=index_dtype)
positions_ds = dataset_ops.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: math_ops.range( # pylint: disable=g-long-lambda
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: positions[i],
num_parallel_calls=dataset_ops.AUTOTUNE)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index)
dataset = dataset_ops.Dataset.zip((dataset, target_ds))
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = dataset_ops.Dataset.from_tensors(array[start_index : end_index])
dataset = dataset_ops.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: array_ops.gather(steps, inds), # pylint: disable=unnecessary-lambda
num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
|
|
""" interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pdb
import sys
from doctest import UnexpectedException
from _pytest import outcomes
from _pytest.config import hookimpl
def _validate_usepdb_cls(value):
try:
modname, classname = value.split(":")
except ValueError:
raise argparse.ArgumentTypeError(
"{!r} is not in the format 'modname:classname'".format(value)
)
try:
__import__(modname)
mod = sys.modules[modname]
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
parts = classname.split(".")
pdb_cls = getattr(mod, parts[0])
for part in parts[1:]:
pdb_cls = getattr(pdb_cls, part)
return pdb_cls
except Exception as exc:
raise argparse.ArgumentTypeError(
"could not get pdb class for {!r}: {}".format(value, exc)
)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--pdb",
dest="usepdb",
action="store_true",
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
)
group._addoption(
"--pdbcls",
dest="usepdb_cls",
metavar="modulename:classname",
type=_validate_usepdb_cls,
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
)
group._addoption(
"--trace",
dest="trace",
action="store_true",
help="Immediately break when running each test.",
)
def pytest_configure(config):
pdb_cls = config.getvalue("usepdb_cls")
if not pdb_cls:
pdb_cls = pdb.Pdb
if config.getvalue("trace"):
config.pluginmanager.register(PdbTrace(), "pdbtrace")
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
pytestPDB._saved.append(
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls)
)
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
pytestPDB._pdb_cls = pdb_cls
# NOTE: not using pytest_unconfigure, since it might get called although
# pytest_configure was not (if another plugin raises UsageError).
def fin():
(
pdb.set_trace,
pytestPDB._pluginmanager,
pytestPDB._config,
pytestPDB._pdb_cls,
) = pytestPDB._saved.pop()
config._cleanup.append(fin)
class pytestPDB(object):
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_pdb_cls = pdb.Pdb
_saved = []
_recursive_debug = 0
@classmethod
def _is_capturing(cls, capman):
if capman:
return capman.is_capturing()
return False
@classmethod
def _init_pdb(cls, *args, **kwargs):
""" Initialize PDB debugging, dropping any IO capturing. """
import _pytest.config
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend(in_=True)
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
if cls._recursive_debug == 0:
# Handle header similar to pdb.set_trace in py37+.
header = kwargs.pop("header", None)
if header is not None:
tw.sep(">", header)
else:
capturing = cls._is_capturing(capman)
if capturing:
if capturing == "global":
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
else:
tw.sep(
">",
"PDB set_trace (IO-capturing turned off for %s)"
% capturing,
)
else:
tw.sep(">", "PDB set_trace")
class _PdbWrapper(cls._pdb_cls, object):
_pytest_capman = capman
_continued = False
def do_debug(self, arg):
cls._recursive_debug += 1
ret = super(_PdbWrapper, self).do_debug(arg)
cls._recursive_debug -= 1
return ret
def do_continue(self, arg):
ret = super(_PdbWrapper, self).do_continue(arg)
if cls._recursive_debug == 0:
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
capman = self._pytest_capman
capturing = pytestPDB._is_capturing(capman)
if capturing:
if capturing == "global":
tw.sep(">", "PDB continue (IO-capturing resumed)")
else:
tw.sep(
">",
"PDB continue (IO-capturing resumed for %s)"
% capturing,
)
capman.resume()
else:
tw.sep(">", "PDB continue")
cls._pluginmanager.hook.pytest_leave_pdb(
config=cls._config, pdb=self
)
self._continued = True
return ret
do_c = do_cont = do_continue
def set_quit(self):
"""Raise Exit outcome when quit command is used in pdb.
This is a bit of a hack - it would be better if BdbQuit
could be handled, but this would require to wrap the
whole pytest run, and adjust the report etc.
"""
super(_PdbWrapper, self).set_quit()
if cls._recursive_debug == 0:
outcomes.exit("Quitting debugger")
def setup(self, f, tb):
"""Suspend on setup().
Needed after do_continue resumed, and entering another
breakpoint again.
"""
ret = super(_PdbWrapper, self).setup(f, tb)
if not ret and self._continued:
# pdb.setup() returns True if the command wants to exit
# from the interaction: do not suspend capturing then.
if self._pytest_capman:
self._pytest_capman.suspend_global_capture(in_=True)
return ret
_pdb = _PdbWrapper(**kwargs)
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
else:
_pdb = cls._pdb_cls(**kwargs)
return _pdb
@classmethod
def set_trace(cls, *args, **kwargs):
"""Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
frame = sys._getframe().f_back
_pdb = cls._init_pdb(*args, **kwargs)
_pdb.set_trace(frame)
class PdbInvoke(object):
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
class PdbTrace(object):
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(self, pyfuncitem):
_test_pytest_function(pyfuncitem)
yield
def _test_pytest_function(pyfuncitem):
_pdb = pytestPDB._init_pdb()
testfunction = pyfuncitem.obj
pyfuncitem.obj = _pdb.runcall
if "func" in pyfuncitem._fixtureinfo.argnames: # noqa
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
showcapture = node.config.option.showcapture
for sectionname, content in (
("stdout", rep.capstdout),
("stderr", rep.capstderr),
("log", rep.caplog),
):
if showcapture in (sectionname, "all") and content:
tw.sep(">", "captured " + sectionname)
if content[-1:] == "\n":
content = content[:-1]
tw.line(content)
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
rep._pdbshown = True
post_mortem(tb)
return rep
def _postmortem_traceback(excinfo):
if isinstance(excinfo.value, UnexpectedException):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pytestPDB._pdb_cls, object):
def get_stack(self, f, t):
stack, i = super(Pdb, self).get_stack(f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
if p.quitting:
outcomes.exit("Quitting debugger")
|
|
#!/usr/bin/python
"""
PinkWave is a pentesting tool for linux which can be used to test (web)servers with Python scripts and Selenium.
usage ./pinkwave.py -t [target] -e [pathToExploit] [parameters]
for a full list, start help:
./pinkwave.py --help
use automate.py if you would like to run multiple tests via a JSON file.
"""
# Importing external libs
import os,sys,time,json
from os.path import isfile
import argparse
# Importing own libs
from lib.Browser import Browser
from lib.Pentest import Pentest
from lib.ShellParse import ShellParse
from lib.Macro import Macro
from extensions.Util import Util,vdkException
import extensions.Http as Http
from lib.PyExploit import PyExploit
import lib.colors as colors
from lib.PyExploit import PyExploitException
# Import HTTP server for logger/bouncer
import server.pinkserver as pinkserver
# Start browser based on config.ini settings
browser = None
def get_browser():
global browser
if browser is None:
browser = Browser(Util.getConfig("browser"),Util.getConfig("ssl-verify"),Util.getConfig("timeout"),Util.getConfig("debug"))
return browser
"""Display logo art and exploit info"""
def status():
exploitsC, pyC = count_exploits()
print ('''
%s oo dP \033[1;m
%s 88 \033[1;m
%s 88d888b. dP 88d888b. 88 .dP dP dP dP .d8888b. dP .dP .d8888b. \033[1;m
%s 88' `88 88 88' `88 88888" 88 88 88 88' `88 88 d8' 88ooood8 \033[1;m
%s 88. .88 88 88 88 88 `8b. 88.88b.88' 88. .88 88 .88' 88. ... \033[1;m
%s 88Y888P' dP dP dP dP `YP 8888P Y8P `88888P8 8888P' `88888P' \033[1;m
%s 88 \033[1;m
%s dP \033[1;m
%s -- -- +=[ %d Payloads \033[1;m
%s -- -- +=[ %d Python Scripts \033[1;m
\033[1;91m[W] Make sure your current directory (pwd) is the same as pinkwave.py .\033[1;m
''' % (colors.PURPLE,colors.PURPLE,colors.PURPLE, colors.PURPLE, colors.PURPLE, colors.PURPLE, colors.PURPLE, colors.PURPLE, colors.PURPLE, exploitsC, colors.PURPLE, pyC))
hosts = {
"Bouncer": Util.getBouncer(),
"Logger": Util.getLogger()
}
for key in hosts.iterkeys():
if Http.is_ok(hosts[key]):
print "%s[^] %s is up. %s%s" % (colors.GREEN,key,hosts[key],colors.COLOR_END)
else:
print "%s[!] %s is down. %s%s" % (colors.RED,key,hosts[key],colors.COLOR_END)
print "%s[!] Some exploits can't start:\033[1;m" % colors.YELLOW
print "%s[!] PinkServer offline - bouncer(Direct POST requests) / logger(XSS)%s" % (colors.RED,colors.COLOR_END)
exit(1)
def argumentParser():
parser = argparse.ArgumentParser(description='PinkWave is a pentesting tool for linux which can be used to test (web)servers with Python scripts and Selenium. Control the browser with an easy to use API to perform actions and detect vulnerabilities.')
argeParseData = []
argeParseData.append({
"short": "-s",
"long": "--status",
"help": "Display status of PinkWave",
"required": False})
argeParseData.append({
"short": "-t",
"long": "--target",
"help": "(Required) Remote target host",
"required": False})
argeParseData.append({
"short": "-rn",
"long": "--requestNames",
"help": "POST/GET names, comma seperated",
"required": False})
argeParseData.append({
"short": "-r",
"long": "--request",
"help": "(optional) Specify request type (GET,POST or POST/DIRECT)",
"required": False})
argeParseData.append({
"short": "-e",
"long": "--exploits",
"help": "(Required) Path to python exploit script",
"required": False})
argeParseData.append({
"short": "-m",
"long": "--macros",
"help": "(optional) Path(s) to python macro script(s), comma seperated, runs before the exploit",
"required": False})
argeParseData.append({
"short": "-cr",
"long": "--creds",
"help": 'User credentials, comma seperated',
"required": False})
argeParseData.append({
"short": "-po",
"long": "--ports",
"help": 'Expected ports for port scan',
"required": False})
argeParseData.append({
"short": "-sl",
"long": "--ssl",
"help": 'SSL ports for SSL stripping/cipher testing',
"required": False})
argeParseData.append({
"short" : "-wl",
"long" : "--wordlist",
"help" : "Provide a path to a line seperated wordlist file",
"required" : False})
for apData in argeParseData:
parser.add_argument(apData['short'], apData['long'], help=apData['help'], required=apData['required'])
return parser,argeParseData
def displayLogging(target,reportId):
print "[#] Logging to %s/report-%s.csv" % (Util.getReportDir(target),str(int(reportId)))
print ""
def pinkwave_shell(argsDict = {},closeBrowser=True):
# Execute exploit script
timeStart = time.time()
pentest = None
# Single commands
if isExploitInterface():
# Displays info about exploit script
options_shell(sys.argv[2])
elif isMacroInterface():
# Execute macros
browser = get_browser()
browser.driver.delete_all_cookies()
macros = Util.strToArray(sys.argv[2])
for m in macros:
Macro(m).start()
# Status command
elif isStatusInterface():
# Display pinkwave/server status
status()
# Multiple commands
else:
# Load parameters via dictionary or parameters
if len(argsDict) != 0:
shellParse = ShellParse(argsDict)
else:
parser, apData = argumentParser()
args = vars(parser.parse_args())
args.pop("status")
shellParse = ShellParse(args)
# Display info if not loaded via automate.py
if closeBrowser:
status()
displayLogging(shellParse.target,shellParse.reportId)
try:
if shellParse.exploits is not None:
name = shellParse.exploits
print ""
print "[%s]: %s%s%s" % (
Util.getConfig("browser"), colors.YELLOW,name, colors.COLOR_END)
v = vars(shellParse)
print json.dumps(v, sort_keys = False, indent = 4)
browser = get_browser()
browser.driver.delete_all_cookies()
pentest = Pentest().create(browser, shellParse)
if pentest.macros == [] and pentest.target is None:
print "(-t) target is missing"
elif pentest.macros == [] and pentest.exploits == []:
print "(-e) exploits are required"
else:
pentest.start()
except vdkException as e:
print "%s[!] %s%s" % (colors.RED,e ,colors.COLOR_END)
if closeBrowser and browser is not None:
browser.close()
return False
except PyExploitException as e:
print "%s[!] PyExploitException error: %s%s" % (colors.RED,e ,colors.COLOR_END)
if closeBrowser and browser is not None:
browser.close()
return False
except Exception as e:
print "%s[!!!] Unknown error: %s%s" % (colors.RED,e ,colors.COLOR_END)
if closeBrowser and browser is not None:
browser.close()
raise # means an exception triggered the exiting.
print "test execution time: %d seconds" % (time.time() - timeStart)
if closeBrowser:
browser.close()
return True
"""
Counts exploits per type
"""
def count_exploits():
exploits = 0
pyFiles = 0
for root, subdirs, files in os.walk("exploits"):
for f in files:
if f.endswith(".dat"):
fPath = root + "/" + f
with open(fPath,"r") as cFile:
for line in cFile.readlines():
exploits += 1
elif f.endswith(".py") and not f.startswith('__init__'):
pyFiles += 1
return exploits,pyFiles
"""
View options for exploit script
"""
def options_shell(pathToExploit):
if not isfile(pathToExploit) and not isfile(pathToExploit + ".py"):
raise Exception("Exploit not found in path: %s" % pathToExploit)
pye = PyExploit(pathToExploit)
options = pye.options
print "%s[%s]%s > %sshow parameters%s" % (colors.GREEN, pathToExploit, colors.COLOR_END, colors.YELLOW, colors.COLOR_END)
print ""
print "Exploit options (%s)" % pathToExploit
print ""
for option in options:
parser, dictArray = argumentParser()
for d in dictArray:
if d['long'] == "--" + option:
print "[--%s] %s" % (option,d['help'])
dependencies = pye.dependencies
if len(dependencies) != 0:
print ""
print "Dependencies:"
for key in dependencies:
print "%s (%s)" % (key,dependencies[key])
print ""
print ""
def isExploitInterface():
return len(sys.argv) == 3 and (sys.argv[1] == "-e" or sys.argv[1] == "--exploits")
def isStatusInterface():
return len(sys.argv) == 2 and (sys.argv[1] == "-s" or sys.argv[1] == "--status")
def isHelpInterface():
return len(sys.argv) == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help")
def isMacroInterface():
return len(sys.argv) == 3 and (sys.argv[1] == "-m" or sys.argv[1] == "--macros")
if __name__ == '__main__':
# Do not start server when viewing exploit info
if not isExploitInterface() and not isHelpInterface():
pinkserver.start(Util.getConfig("http-port"))
time.sleep(2)
# Will exit(1) when vulnerabilities are found
secure = pinkwave_shell()
if not secure:
exit(1)
|
|
"""
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
from .exceptions import ContentDisallowed
from .serialization import prepare_accept_content
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue', 'binding']
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
*This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. Reading this article is recommended if you're
new to amqp.*
"AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
passive = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('passive', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name, ))
def declare(self, nowait=False, passive=None):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
passive = self.passive if passive is None else passive
if self.name:
return self.channel.exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
properties=None, headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties['delivery_mode'] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def binding(self, routing_key='', arguments=None, unbind_arguments=None):
return binding(self, routing_key, arguments, unbind_arguments)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (self.name or repr(''), self.type)
@property
def can_cache_declaration(self):
return self.durable and not self.auto_delete
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self, )
def __str__(self):
return '%s->%s' % (self.exchange.name, self.routing_key)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
:keyword on_declared: See :attr:`on_declared`
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
.. attribute:: on_declared
Optional callback to be applied when the queue has been
declared (the ``queue_declare`` operation is complete).
This must be a function with a signature that accepts at least 3
positional arguments: ``(name, messages, consumers)``.
"""
ContentDisallowed = ContentDisallowed
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange and self.exchange.name:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None, accept=None):
"""Poll the server for a new message.
Must return the message if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If enabled the broker will automatically
ack messages.
:keyword accept: Custom list of accepted content types.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
if message.errors:
message._reraise_error()
message.accept = prepare_accept_content(accept)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If enabled the broker will automatically ack
messages.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue {0.name} -> {bindings}'.format(
self, bindings=pretty_bindings(self.bindings),
))
return s(
'Queue {0.name} -> {0.exchange!r} -> {0.routing_key}'.format(
self))
@property
def can_cache_declaration(self):
return self.durable and not self.auto_delete
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Plecost: Wordpress vulnerabilities finder
#
# @url: http://iniqua.com/labs/
# @url: https://github.com/iniqua/plecost
#
# @author:Francisco J. Gomez aka ffranz (http://iniqua.com/)
# @author:Daniel Garcia aka cr0hn (http://www.cr0hn.com/me/)
#
# Copyright (c) 2015, Iniqua Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This file contains reporting functions.
"""
import json
from abc import ABCMeta, abstractmethod
from os.path import splitext
from xml.etree import ElementTree as ET
from .exceptions import PlecostInvalidReportFormat
# --------------------------------------------------------------------------
# Abstract
# --------------------------------------------------------------------------
class Reporter(object, metaclass=ABCMeta):
"""Reporter abstract class"""
# ----------------------------------------------------------------------
def __init__(self, output_filename):
"""
:param output_filename: Output file name.
:type output_filename: str
"""
if not isinstance(output_filename, str):
raise TypeError("Expected basestring, got '%s' instead" % type(output_filename))
self.__output_filename = output_filename
# ----------------------------------------------------------------------
@property
def output_filename(self):
"""
:return: output file name.
:rtype: str
"""
return self.__output_filename
# ----------------------------------------------------------------------
# Abstract methods
# ----------------------------------------------------------------------
@abstractmethod
def generate(self, info):
"""
Generates content of report
:param info: PlecostResults instance
:type info: `PlecostResults`
:return: content of report
:rtype: object
"""
raise NotImplemented()
# ----------------------------------------------------------------------
@abstractmethod
def save(self, content):
"""
Save the the content of report into output_file
:param content: object with content
:type content: object
"""
raise NotImplemented()
# --------------------------------------------------------------------------
# Implementation
# --------------------------------------------------------------------------
class ReporterJSON(Reporter):
"""JSON reporter"""
# ----------------------------------------------------------------------
def generate(self, info):
"""
Generates content of report
:param info: PlecostResults instance
:type info: `PlecostResults`
"""
js_info = {}
# Set target
js_info["target"] = info.target
# Set time info
js_info["start_time"] = info.start_time.strftime("%H-%m-%Y %H:%M:%S")
js_info["end_time"] = info.end_time.strftime("%H-%m-%Y %H:%M:%S")
# WordPress info
js_info["wordpress"] = {
"current_version": info.wordpress_info.current_version,
"last_version": info.wordpress_info.latest_version,
"cves": [x for x in info.wordpress_info.vulnerabilities]
}
# Plugins info
js_info["plugins"] = []
for plugin in info.plugins:
json_plugin = {}
json_plugin["plugin_name"] = plugin.plugin_name
json_plugin["current_version"] = plugin.current_version
json_plugin["last_version"] = plugin.latest_version
json_plugin["url"] = plugin.plugin_uri
json_plugin["outdated"] = True if plugin.is_outdated else False
# Set CVE
json_plugin["cves"] = [cve for cve in plugin.cves]
# Set exploits
json_plugin["exploits"] = [exploit for exploit in plugin.exploits]
js_info["plugins"].append(json_plugin)
return js_info
# ----------------------------------------------------------------------
def save(self, content):
# Save to file
json.dump(content, open(self.output_filename, "w"))
# --------------------------------------------------------------------------
class ReporterXML(Reporter):
"""XML reporter"""
# ----------------------------------------------------------------------
def generate(self, info):
"""
Generates content of report
:param info: PlecostResults instance
:type info: `PlecostResults`
"""
root = ET.Element("libs")
# Set target
target = ET.SubElement(root, "target")
target.text = info.target
# Set time info
time_start = ET.SubElement(root, "start_time")
time_start.text = info.start_time.strftime("%H-%m-%Y %H:%M:%S")
time_end = ET.SubElement(root, "end_time")
time_end.text = info.end_time.strftime("%H-%m-%Y %H:%M:%S")
# WordPress info
wordpress = ET.SubElement(root, "wordpress")
wordpress.set("current_version", info.wordpress_info.current_version)
wordpress.set("last_version", info.wordpress_info.latest_version)
# Set CVE
if info.wordpress_info.vulnerabilities:
cves = ET.SubElement(wordpress, "cves")
for cve in info.wordpress_info.vulnerabilities:
xml_cve = ET.SubElement(cves, "cve")
xml_cve.text = cve
# Plugins info
plugins = ET.SubElement(root, "plugins")
for plugin in info.plugins:
xml_plugin = ET.SubElement(plugins, "plugin")
xml_plugin.text = plugin.plugin_name
xml_plugin.set("current_version", plugin.current_version)
xml_plugin.set("last_version", plugin.latest_version)
xml_plugin.set("url", plugin.plugin_uri)
xml_plugin.set("outdated", "Yes" if plugin.is_outdated else "No")
# Set CVE
if plugin.cves:
cves = ET.SubElement(xml_plugin, "cves")
for cve in plugin.cves:
xml_cve = ET.SubElement(cves, "cve")
xml_cve.text = cve
# Set exploits
if plugin.cves:
exploits = ET.SubElement(xml_plugin, "exploits")
for exploit in plugin.exploits:
xml_exploit = ET.SubElement(exploits, "exploits")
xml_exploit.text = exploit
return root
# ----------------------------------------------------------------------
def save(self, content):
# Save to file
tree = ET.ElementTree(content)
tree.write(self.output_filename, encoding="UTF-8")
# ----------------------------------------------------------------------
def get_reporter(filename):
"""
Select correct reporter by their extension.
:param filename: file name path.
:type filename: basestring
:return: Reporter instance
:rtype: `Reporter`
"""
reporters = dict(xml=ReporterXML,
json=ReporterJSON)
try:
extension = splitext(filename)[1][1:]
return reporters[extension]
except KeyError:
raise PlecostInvalidReportFormat("Report format '%s' not found." % extension)
__all__ = ["Reporter", "get_reporter"]
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo_log import log as logging
import routes
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova.api import wsgi as base_wsgi
import nova.conf
from nova.i18n import translate
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception("Caught error: %s", inner)
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info("%(url)s returned with HTTP %(status)d", msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
user_locale = req.best_match_language()
inner_msg = translate(inner.message, user_locale)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class LegacyV2CompatibleWrapper(base_wsgi.Middleware):
def _filter_request_headers(self, req):
"""For keeping same behavior with v2 API, ignores microversions
HTTP headers X-OpenStack-Nova-API-Version and OpenStack-API-Version
in the request.
"""
if wsgi.API_VERSION_REQUEST_HEADER in req.headers:
del req.headers[wsgi.API_VERSION_REQUEST_HEADER]
if wsgi.LEGACY_API_VERSION_REQUEST_HEADER in req.headers:
del req.headers[wsgi.LEGACY_API_VERSION_REQUEST_HEADER]
return req
def _filter_response_headers(self, response):
"""For keeping same behavior with v2 API, filter out microversions
HTTP header and microversions field in header 'Vary'.
"""
if wsgi.API_VERSION_REQUEST_HEADER in response.headers:
del response.headers[wsgi.API_VERSION_REQUEST_HEADER]
if wsgi.LEGACY_API_VERSION_REQUEST_HEADER in response.headers:
del response.headers[wsgi.LEGACY_API_VERSION_REQUEST_HEADER]
if 'Vary' in response.headers:
vary_headers = response.headers['Vary'].split(',')
filtered_vary = []
for vary in vary_headers:
vary = vary.strip()
if (vary == wsgi.API_VERSION_REQUEST_HEADER or
vary == wsgi.LEGACY_API_VERSION_REQUEST_HEADER):
continue
filtered_vary.append(vary)
if filtered_vary:
response.headers['Vary'] = ','.join(filtered_vary)
else:
del response.headers['Vary']
return response
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.set_legacy_v2()
req = self._filter_request_headers(req)
response = req.get_response(self.application)
return self._filter_response_headers(response)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class ProjectMapper(APIMapper):
def _get_project_id_token(self):
# NOTE(sdague): project_id parameter is only valid if its hex
# or hex + dashes (note, integers are a subset of this). This
# is required to hand our overlaping routes issues.
project_id_regex = '[0-9a-f\-]+'
if CONF.osapi_v21.project_id_regex:
project_id_regex = CONF.osapi_v21.project_id_regex
return '{project_id:%s}' % project_id_regex
def resource(self, member_name, collection_name, **kwargs):
project_id_token = self._get_project_id_token()
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '%s/' % project_id_token
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/%s/:%s_id' % (
project_id_token,
p_collection,
p_member)
routes.Mapper.resource(
self,
member_name,
collection_name,
**kwargs)
# while we are in transition mode, create additional routes
# for the resource that do not include project_id.
if 'parent_resource' not in kwargs:
del kwargs['path_prefix']
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
def create_route(self, path, method, controller, action):
project_id_token = self._get_project_id_token()
# while we transition away from project IDs in the API URIs, create
# additional routes that include the project_id
self.connect('/%s%s' % (project_id_token, path),
conditions=dict(method=[method]),
controller=controller,
action=action)
self.connect(path,
conditions=dict(method=[method]),
controller=controller,
action=action)
class PlainMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' in kwargs:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo.utils import timeutils
from nova.api.openstack.compute.contrib import instance_usage_audit_log as ial
from nova.api.openstack.compute.plugins.v3 import instance_usage_audit_log as \
v21_ial
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_service
from nova import utils
service_base = test_service.fake_service
TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
dict(service_base, host='bar', topic='compute'),
dict(service_base, host='baz', topic='compute'),
dict(service_base, host='plonk', topic='compute'),
dict(service_base, host='wibble', topic='bogus'),
]
begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
# test data
TEST_LOGS1 = [
# all services done, no errors.
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=17, message="test2"),
dict(host="bar", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=10, message="test3"),
dict(host="foo", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=7, message="test4"),
]
TEST_LOGS2 = [
# some still running...
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=17, message="test6"),
dict(host="bar", period_beginning=begin2, period_ending=end2,
state="RUNNING", errors=0, task_items=10, message="test7"),
dict(host="foo", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=7, message="test8"),
]
TEST_LOGS3 = [
# some errors..
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
state="DONE", errors=2, task_items=17, message="test10"),
dict(host="bar", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=10, message="test11"),
dict(host="foo", period_beginning=begin3, period_ending=end3,
state="DONE", errors=1, task_items=7, message="test12"),
]
def fake_task_log_get_all(context, task_name, begin, end,
host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
return TEST_LOGS1
if begin == begin2 and end == end2:
return TEST_LOGS2
if begin == begin3 and end == end3:
return TEST_LOGS3
raise AssertionError("Invalid date %s to %s" % (begin, end))
def fake_last_completed_audit_period(unit=None, before=None):
audit_periods = [(begin3, end3),
(begin2, end2),
(begin1, end1)]
if before is not None:
for begin, end in audit_periods:
if before > end:
return begin, end
raise AssertionError("Invalid before date %s" % (before))
return begin1, end1
class InstanceUsageAuditLogTestV21(test.NoDBTestCase):
def setUp(self):
super(InstanceUsageAuditLogTestV21, self).setUp()
self.context = context.get_admin_context()
timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
self._set_up_controller()
self.host_api = self.controller.host_api
def fake_service_get_all(context, disabled):
self.assertIsNone(disabled)
return TEST_COMPUTE_SERVICES
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
self.stubs.Set(db, 'service_get_all',
fake_service_get_all)
self.stubs.Set(db, 'task_log_get_all',
fake_task_log_get_all)
def _set_up_controller(self):
self.controller = v21_ial.InstanceUsageAuditLogController()
def tearDown(self):
super(InstanceUsageAuditLogTestV21, self).tearDown()
timeutils.clear_time_override()
def test_index(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
use_admin_context=True)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_logs', result)
logs = result['instance_usage_audit_logs']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_non_admin(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
use_admin_context=False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_show(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-instance_usage_audit_log/show',
use_admin_context=True)
result = self.controller.show(req, '2012-07-05 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
def test_show_non_admin(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
use_admin_context=False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, '2012-07-05 10:00:00')
def test_show_with_running(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-instance_usage_audit_log/show',
use_admin_context=True)
result = self.controller.show(req, '2012-07-06 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(0, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(3, logs['num_hosts_done'])
self.assertEqual(1, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("3 of 4 hosts done. 0 errors.",
logs['overall_status'])
def test_show_with_errors(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-instance_usage_audit_log/show',
use_admin_context=True)
result = self.controller.show(req, '2012-07-07 10:00:00')
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEqual(57, logs['total_instances'])
self.assertEqual(3, logs['total_errors'])
self.assertEqual(4, len(logs['log']))
self.assertEqual(4, logs['num_hosts'])
self.assertEqual(4, logs['num_hosts_done'])
self.assertEqual(0, logs['num_hosts_running'])
self.assertEqual(0, logs['num_hosts_not_run'])
self.assertEqual("ALL hosts done. 3 errors.",
logs['overall_status'])
class InstanceUsageAuditLogTest(InstanceUsageAuditLogTestV21):
def _set_up_controller(self):
self.controller = ial.InstanceUsageAuditLogController()
|
|
import os, shutil, glob, winreg
from os import path
def mtime(f):
try:
stat = os.stat(f)
return stat.st_mtime
except:
return 0
def unglob(src, files):
out = []
for f in files:
if '*' in f:
for o in glob.iglob(path.join(src, f)):
out.append(o[len(src)+1:])
else: out.append(f)
return out
def copyFilesFlat(src, dst, files):
for __f in files:
f_s = __f
f_d = __f
if '|' in __f:
f_d, f_s = __f.split('|', 1)
s = path.join(src, f_s)
d = path.join(dst, f_d)
if path.isdir(s):
for root, dirs, files in os.walk(s):
try:
os.makedirs(d)
except:
pass
copyFilesFlat(s, d, dirs + files)
return
s_time = mtime(s)
d_time = mtime(d)
if s_time > d_time:
print(d)
try: os.mkdir(os.path.dirname(d))
except: pass
shutil.copy(s, d)
# HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Microsoft SDKs\Windows
def SDKs():
sdks = {}
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows") as key:
index = 0
while True:
try:
subkey = winreg.EnumKey(key, index)
index += 1
with winreg.OpenKey(key, subkey) as sub:
sdks[subkey] = winreg.QueryValueEx(sub, "InstallationFolder")[0]
except:
break
return sdks
def win_sdk():
sdks = SDKs()
sdk = None
sdk_vers = list(sdks.keys())
sdk_vers.sort()
for ver in sdk_vers:
signtool = path.join(sdks[ver], "Bin", "signtool.exe")
if path.exists(signtool):
sdk = path.join(sdks[ver], "Bin")
return sdk
def file_list(storage):
ret = []
for key in storage:
dir = storage[key]
for f in glob.iglob(path.join(key, "*")):
if path.isdir(f):
for root, dirs, files in os.walk(f):
for p in files:
s = path.join(root, p)
dest = path.join(root[len(key)+1:], p)
if dir != "":
dest = path.join(dir, dest)
ret.append((s, dest))
else:
dest = f[len(key)+1:]
if dir != "":
dest = path.join(dir, dest)
ret.append((f, dest))
return ret
class component:
def __init__(self, cab_id, dir, dir_name, name, guid):
self.id = cab_id
self.dir = dir
self.dir_name = dir_name
self.name = name
self.guid = guid
self.files = []
def append(self, file, source):
self.files.append((file, source))
def print_out(self, out, links, depth = 1):
print('{}<Component Id="{}" Guid="{}">'.format(" " * depth, self.name, self.guid), file=out)
depth += 1
prefix = self.dir.replace(os.sep, '_').replace('-', '_')
if len(prefix):
prefix += '_'
keypath = ' KeyPath="yes"'
for file in self.files:
__id = '{}{}'.format(prefix, file[0].replace('.', '_'))
out.write('{}<File Id="{}" Name="{}" Source="{}" DiskId="{}"{} '.format(" " * depth, __id, file[0], file[1], self.id, keypath))
if file[0] in links:
print('>', file=out)
for link in links[file[0]]:
id = '{}_{}.Shortcut'.format(link[1], file[0].replace('.', '_'))
print('{} <Shortcut Id="{}" Directory="{}" Name="{}"'.format(" " * depth, id, link[1], link[0]), file=out)
print('{} WorkingDirectory="{}"'.format(" " * depth, self.dir_name), file=out)
print('{} Icon="{}" IconIndex="{}"'.format(" " * depth, link[2], link[3]), file=out)
if len(link) > 4: # Display name
print('{} DisplayResourceDll="[#{}]" DisplayResourceId="{}"'.format(" " * depth, __id, link[4]), file=out)
if len(link) > 5: # Infotip
print('{} DescriptionResourceDll="[#{}]" DescriptionResourceId="{}"'.format(" " * depth, __id, link[5]), file=out)
print('{} Advertise="yes" />'.format(" " * depth), file=out)
print('{}</File>'.format(" " * depth), file=out)
else:
print('/>', file=out)
keypath = ""
depth -= 1
print('{}</Component>'.format(" " * depth), file=out)
class directory:
def __init__(self, id, dir, name = None):
self.id = id
self.dir = dir
self.name = name
self.comps = {}
self.subs = {}
def append_here(self, cabId, file, source, comp_tmpl):
if cabId not in self.comps:
comp_id = (cabId, self.dir)
comp_name = None
comp_guid = 'PUT-GUID-HERE'
if comp_id in comp_tmpl:
comp_name, comp_guid = comp_tmpl[comp_id]
if comp_name is None:
comp_name = "{}.Component".format(self.dir.replace(os.sep, '_').replace('-', '_'))
self.comps[cabId] = component(cabId, self.dir, self.id, comp_name, comp_guid)
self.comps[cabId].append(file, source)
def append_deep(self, deep, cabId, file, source, comp_tmpl):
if len(deep) == 0:
self.append_here(cabId, file, source, comp_tmpl)
return
here = deep[0]
deep = deep[1:]
if here not in self.subs:
dir_here = path.join(self.dir, here)
if self.dir == "": dir_here = here
dir_id = dir_here.replace(os.sep, "_").replace('-', '_') + ".Directory"
self.subs[here] = directory(dir_id, path.join(self.dir, here), here)
self.subs[here].append_deep(deep, cabId, file, source, comp_tmpl)
def append(self, cabId, file, source, comp_tmpl):
file_path = path.dirname(file)
file = path.basename(file)
if file_path == "":
self.append_here(cabId, file, source, comp_tmpl)
return
self.append_deep(file_path.split(os.sep), cabId, file, source, comp_tmpl)
def print_out(self, out, links, depth = 1):
out.write(" " * depth)
if self.name is None:
print("<DirectoryRef Id='{}'>".format(self.id), file=out)
else:
print("<Directory Id='{}' Name='{}'>".format(self.id, self.name), file=out)
depth += 1
for comp in self.comps:
self.comps[comp].print_out(out, links, depth)
for d in self.subs:
self.subs[d].print_out(out, links, depth)
depth -= 1
out.write(" " * depth)
if self.name is None:
print("</DirectoryRef>", file=out)
else:
print("</Directory>", file=out)
def print_fragment(self, out, links):
print("""<?xml version="1.0" encoding="utf-8"?>
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
<Fragment>""", file=out)
self.print_out(out, links, 2)
print(""" </Fragment>
</Wix>""", file=out)
def dir_cab(cabs, name):
for i in range(len(cabs)):
if name in cabs[i]:
return i + 1
return 0
def dir_from(file_path):
pair = file_path.split(os.sep, 1)
if len(pair) == 1: return ""
return pair[0]
def msi_fragment(cabs, comps, list):
ret = directory("INSTALLDIR", "")
for source, dest in list:
cab = dir_cab(cabs, dir_from(source))
ret.append(cab, dest, source, comps)
return ret
|
|
# -*- coding: utf-8 -*-
"""
Query classes for Tokyo Tyrant API implementation.
"""
import copy
import warnings
from protocol import TyrantProtocol
import utils
CACHE_CHUNK_SIZE = 1000
class Query(object):
"""
A lazy abstraction for queries via Tyrant protocol.
You will normally instantiate Query this way::
>>> from pyrant import Tyrant
>>> t = Tyrant(host='localhost', port=1983)
>>> query = t.query
.. note:: the results are cached in two ways. First, the complete list of
relevant keys is fetched and stored in the query object. Second, the
corresponding data is fetched in large chunks depending on what slices
or indices you request. Sometimes the chunks are not large enough and
we hit the database too many times. To minimize the overhead you may
want to increase the chunk size. You can use
:meth:`~pyrant.query.Query.set_chunk_size` for that purpose.
"""
def __init__(self, proto, db_type, literal=False, conditions=None,
columns=None, ms_type=None, ms_conditions=None):
if conditions:
assert isinstance(conditions, list) and \
all(isinstance(c, Condition) for c in conditions), \
'Expected a list of Condition instances, got %s' % conditions
self.literal = literal
self._conditions = conditions or []
self._ordering = Ordering()
self._proto = proto
self._db_type = db_type
self._columns = columns
self._ms_type = ms_type
self._ms_conditions = ms_conditions
# cache
self._cache = ResultCache(self)
#
# PYTHON MAGIC METHODS
#
def __and__(self, other):
return self.intersect(other)
def __contains__(self, key):
keys = self._cache.get_keys(self._do_search)
return key in keys
def __getitem__(self, k):
# Retrieve an item or slice from the set of results.
# XXX do we really need to cache the data? What if there are a couple
# millions items and the user wants to just iterate over them in
# order to calculate an aggregate? I think cache should be either
# kept small or turned off by default or completely removed.
# The user can always easily cache the data explicitly by keeping
# the results in a variable (i.e. "records = query[:]").
if isinstance(k, slice):
return self._get_slice(k)
elif isinstance(k, (int, long)):
return self._get_item(k)
else:
raise TypeError("Query indices must be integers")
def _get_slice(self, s):
# Check slice integrity XXX check if this is still resonable
for x in s.start, s.stop:
if x is not None and x < 0:
raise ValueError('Negative indexing is not supported')
if s.start and s.start == s.stop:
raise ValueError('Zero-length slices are not supported')
# retrieve and cache keys
self._cache.get_keys(self._do_search)
items = self._cache.get_items(s.start or 0, s.stop)
return list(items)
def _get_item(self, index):
if index < 0:
raise ValueError('Negative indexing is not supported')
# retrieve and cache keys
self._cache.get_keys(self._do_search)
item = self._cache.get_item(index)
if item is None:
raise IndexError
return item
def __len__(self):
return len(self[:])
def __or__(self, other):
return self.union(other)
def __repr__(self):
# Do the query using getitem
return str(self[:])
def __sub__(self, other):
return self.minus(other)
#
# PRIVATE METHODS
#
def _add_to_metasearch(self, other, operator):
"""
Returns a new Query object resulting from mapping `self` with ``other``
by applying the given ``operator`` which is one of the operators defined
in Tokyo Tyrant protocol: `TyrantProtocol.TDBMSUNION`,
`TyrantProtocol.TDBMSISECT` or `TyrantProtocol.TDBMSDIFF`.
"""
query = self._clone()
assert isinstance(other, Query), "This function needs other Query object type"
assert query._ms_type in (None, operator), "You can not mix union with intersect or minus"
if query._ms_conditions is None:
query._ms_conditions = []
other = other._clone()
query._ms_conditions.append(other._conditions)
query._ms_type = operator
return query
def _clone(self):
defaults = {
'literal': self.literal,
'conditions': [c._clone() for c in self._conditions],
'ms_type': self._ms_type,
}
if self._ms_conditions:
defaults.update(
ms_conditions = [[query._clone() for query in conds]
for conds in self._ms_conditions],
)
if self._columns:
defaults.update(columns=self._columns[:])
return Query(self._proto, self._db_type, **defaults)
def _do_search(self, conditions=None, limit=None, offset=None,
out=False, count=False, hint=False, columns=None):
"""
Returns keys of items that correspond to the Query instance.
"""
defaults = {
'out': out,
'count': count,
'hint': hint,
'conditions': conditions or [c.prepare() for c in self._conditions],
'limit': limit,
'offset': offset,
}
if columns:
defaults.update(columns=columns[:])
if self._ordering:
defaults.update(
order_column = self._ordering.name,
order_type = self._ordering.type,
)
if self._ms_conditions:
# update search conditions with metaseach conditions
defaults.update(
ms_type = self._ms_type,
ms_conditions = [
[condition.prepare() for condition in metasearch_conditions]
for metasearch_conditions in self._ms_conditions
]
)
return self._proto.search(**defaults)
def _filter(self, negate, args, kwargs):
query = self._clone()
# Iterate arguments. Should be instances of Q
for cond in args:
assert isinstance(cond, Condition), "Arguments must be instances of Q"
c = cond._clone()
c.negate = c.negate ^ negate
query._conditions.append(c)
# Generate Condition objects with arguments as needed
for name, expr in kwargs.iteritems():
c = Condition(name, expr)
c.negate = negate
query._conditions.append(c)
return query
def _to_python(self, elem):
return utils.to_python(elem, self._db_type)
#
# PUBLIC API
#
def columns(self, *names):
"""
Returns a list of items with only specified columns per item. Expects
names of columns to fetch. If none specified or '*' is in the names,
all available columns are fetched. Current query object is not
modified. Returned is a list of dictionaries, not a derivative query.
.. note:: primary keys are *not* returned along with data, so this is
not an equivalent for ``SELECT x`` of SQL.
Usage::
query.columns() # fetches whole items
query.columns('*') # same as above
query.columns('name', 'age') # only fetches data for these columns
.. warning:: results are not cached in any way.
This method does not retrieve "normal" cached items and filter their
contents; instead, it issues a modified search statement and retrieves
pre-filtered items directly from the database. This is much faster than
fetching and processing the whole bulk of data in Python.
"""
if '*' in names:
return self[:]
values = self._do_search(columns=names)
return [self._to_python(value) for value in values]
def count(self):
"""
Returns the number of matched items.
"""
return int(self._do_search(count=True)[0])
def delete(self, quick=False):
"""
Deletes all matched items from the database. Returns `True` on success
or `False` if the operation could not be performed.
.. warning:: current implementation is inefficient due to a bug on a
lower level (probably within Pyrant). The underlying function does
not tell us whether the operation was successful, so we perform an
additional query. This may substantially decrease performance in
some rare cases. A workaround is to use the param `quick`.
:param quick: if `True`, the method always returns `None` and does not
check whether the operation was successful. Useful if you call this
method *very* frequently. Default is `False`. Please note that this
param will be deprecated after the underlying code is fixed so the
method will always return a boolean.
"""
# FIXME this is broken: lower level always returns empty list, not sure why
response = self._do_search(out=True)
# assert 1 == len(response)
# return True if response[0] == 'true' else False
# XXX emulating the proper response
# TODO: deprecate the `confirm` param
if quick:
return not bool(self._do_search(count=True))
else:
return None
def exclude(self, *args, **kwargs):
"""
Antipode of :meth:`~pyrant.query.Query.filter`.
"""
return self._filter(True, args, kwargs)
def filter(self, *args, **kwargs): # TODO: provide full list of lookups
"""
Returns a clone of the Query object with given conditions applied.
Conditions can be specified as keyword arguments in this form::
t.query.filter(name__is='John', age__gte=50)
Supported keyword lookups and appropriate expression types are:
* `between`: (list of numbers)
* `contains`: (string or list of strings)
* `contains_any` (list of strings)
* `endswith`: (string)
* `exists`: (boolean)
* `gt`: (number)
* `gte`: (number)
* `in`: (list of strings or numbers)
* `is`: (string, list of strings or a number)
* `like`: (string or list of strings)
* `like_any`: (list of strings)
* `lt` (number)
* `lte` (number)
* `matches` (string)
* `search` (string)
* `startswith` (string)
If a column name is provided with no lookup, exact match (`is`) is
assumed.
Connect to a remote table database::
>>> t.table_enabled
True
Stuff some data into the storage::
>>> t['a'] = {'name': 'Foo', 'price': 1}
>>> t['b'] = {'name': 'Bar', 'price': 2}
>>> t['c'] = {'name': 'Foo', 'price': 3}
Find everything with price > 1::
>>> for k, v in t.query.filter(price__gt=1):
... print k
b
c
Find everything with name "Foo"::
>>> for k, v in t.query.filter(name='Foo'):
... print k
a
c
Chain queries::
>>> cheap_items = t.query.filter(price__lt=3)
>>> cheap_bars = cheap_items.filter(name='Bar')
>>> for k, v in cheap_items:
... print k
a
b
>>> for k, v in cheap_bars:
... print k
b
"""
return self._filter(False, args, kwargs)
def hint(self):
"""
Returns the hint string.
.. warning:: currently this executes the query and does not cache its
results. If you fetch the results before or after calling this
method, the search will be made twice.
"""
# TODO: the results should be cached and accessible via __getitem__
results = self._do_search(hint=True) # list of keys + hint string
return results[-1]
def intersect(self, other):
"""
Returns a Query instance with items matched by both this query and the
`other` one. Semantically equivalent to "a AND b".
"""
return self._add_to_metasearch(other, TyrantProtocol.TDBMSISECT)
def minus(self, other):
"""
Returns a Query instance with items matched by either this query or
the `other` but not both.
"""
return self._add_to_metasearch(other, TyrantProtocol.TDBMSDIFF)
def order_by(self, name, numeric=False):
"""
Defines order in which results should be retrieved.
:param name: the column name. If prefixed with ``-``, direction changes
from ascending (default) to descending.
:param numeric: if True, values are treated as numbers. Default is False.
Examples::
q.order_by('name') # ascending
q.order_by('-name') # descending
q.order_by('-price', numeric=True)
"""
query = self._clone()
# handle "name"/"-name"
if name.startswith('-'):
name = name[1:]
direction = Ordering.DESC
else:
direction = Ordering.ASC
query._ordering = Ordering(name, direction, numeric)
if self._ordering == query._ordering:
# provide link to existing cache
query._cache = self._cache
return query
def set_chunk_size(self, size=None):
"""
Sets cache chunk size. Makes sense only if the query has not been
executed yet.
:param size: an `int` (custom size) or `None` (default size).
Useful if you expect a really large number of results and want to cut
the number of database hits. In this case you will increase the chunk
size for given query object.
.. note:: any existing cache for this query will be dropped.
"""
self._cache = ResultCache(self, chunk_size=size)
def stat(self):
"""
Returns statistics on key usage.
"""
collected = {}
for _, data in self[:]:
for k in data:
collected[k] = collected.get(k, 0) + 1
return collected
def union(self, other):
"""
Returns a Query instance which items are matched either by this query
or the `other` one or both of them. Sematically equivalent to "a OR b".
"""
return self._add_to_metasearch(other, TyrantProtocol.TDBMSUNION)
def values(self, key):
"""
Returns a list of unique values for given key.
"""
return list(set(d[key] for d in self.columns(key)))
class Lookup(object):
"""
Lookup definition.
"""
has_custom_value = False
min_args = None
max_args = None
def __init__(self, constant, iterable=False, string=False, numeric=False,
boolean=False, value=None, min_args=None, max_args=None,
extra=None):
self.boolean = boolean
self.iterable = iterable
self.numeric = numeric
self.string = string
self.operator = getattr(TyrantProtocol, constant)
# custom value; only used if "has_custom_value" is True
self.value = value
if min_args or max_args:
assert iterable, 'number of arguments can be specified only for iterables'
self.min_args = min_args
self.max_args = max_args
# additional value processor; executed per item if value is iterable
self.extra = extra
def accepts(self, value):
"""
Returns True if given value is acceptable for this lookup definition.
"""
if self.iterable:
if not hasattr(value, '__iter__'):
return False
if value:
value = value[0]
if self.boolean:
if not isinstance(value, bool):
return False
if self.numeric:
if not isinstance(value, (int, float)):
try:
int(value)
except (ValueError, TypeError):
return False
if self.string:
if not isinstance(value, basestring):
return False
return True
def process_value(self, value):
if self.extra:
if hasattr(value, '__iter__'):
return [self.extra(v) for v in value]
else:
return self.extra(value)
else:
return value
def validate(self, value):
"""
Checks if value does not only look acceptable, but is also valid. Returns
the value.
"""
if hasattr(value, '__iter__'):
if self.min_args and len(value) < self.min_args:
raise ValueError('expected at least %d arguments' % self.min_args)
if self.max_args and self.max_args < len(value):
raise ValueError('expected at most %d arguments' % self.max_args)
return value
class ExistanceLookup(Lookup):
has_custom_value = True
class Condition(object):
"""
Representation of a query condition. Maps lookups to protocol constants.
"""
# each lookup has 1..n definitions that can be used to a) check if the
# lookup suits the expression, and b) to construct the condition in terms
# of low-level API.
LOOKUP_DEFINITIONS = {
'between': [Lookup('RDBQCNUMBT', iterable=True, numeric=True,
min_args=2, max_args=2)],
'contains': [Lookup('RDBQCSTRINC', string=True),
Lookup('RDBQCSTRAND', iterable=True, string=True)],
'contains_any': [Lookup('RDBQCSTROR', iterable=True, string=True)],
'endswith': [Lookup('RDBQCSTREW', string=True)],
'exists': [ExistanceLookup('RDBQCSTRRX', boolean=True, value='')],
'gt': [Lookup('RDBQCNUMGT', numeric=True)],
'gte': [Lookup('RDBQCNUMGE', numeric=True)],
'in': [Lookup('RDBQCSTROREQ', iterable=True, string=True),
Lookup('RDBQCNUMOREQ', iterable=True, numeric=True)],
'is': [Lookup('RDBQCNUMEQ', numeric=True),
Lookup('RDBQCSTREQ')],
'like': [Lookup('RDBQCFTSPH', string=True,
extra=lambda v:v.lower()),
Lookup('RDBQCFTSAND', iterable=True, string=True,
extra=lambda v:v.lower())],
'like_any': [Lookup('RDBQCFTSOR', iterable=True, string=True,
extra=lambda v:v.lower())],
'lt': [Lookup('RDBQCNUMLT', numeric=True)],
'lte': [Lookup('RDBQCNUMLE', numeric=True)],
'matches': [Lookup('RDBQCSTRRX', string=True)],
'search': [Lookup('RDBQCFTSEX', string=True)],
'startswith': [Lookup('RDBQCSTRBW', string=True)],
}
# default lookup (if none provided by the user)
LOOKUP_DEFINITIONS[None] = LOOKUP_DEFINITIONS['is']
def __init__(self, lookup, expr, negate=False):
name, lookup = self._parse_lookup(lookup)
self.name = name
self.lookup = lookup
self.expr = expr
self.negate = negate
def __repr__(self): # pragma: nocover
return u'<%s %s%s %s>' % (self.name, ('not ' if self.negate else ''),
self.lookup, repr(self.expr))
def _clone(self):
return copy.copy(self)
def _parse_lookup(self, lookup):
"""
Expects lookup ("foo", "foo__contains").
Returns column name and the normalized operator name.
"""
if '__' in lookup:
col_name, op_name = lookup.split('__', 1)
else:
col_name, op_name = lookup, 'is'
return col_name, op_name
def prepare(self):
"""
Returns search-ready triple: column name, operator code, expression.
"""
if not self.lookup in self.LOOKUP_DEFINITIONS:
available_lookups = ', '.join(str(x) for x in self.LOOKUP_DEFINITIONS)
raise NameError('Unknown lookup "%s". Available are: %s' %
(self.lookup, available_lookups))
definitions = self.LOOKUP_DEFINITIONS[self.lookup]
for definition in definitions:
if definition.accepts(self.expr):
try:
value = definition.validate(self.expr)
except ValueError, e:
raise ValueError(u'Bad lookup %s__%s=%s: %s' % (
self.name,
self.lookup,
(self.expr if hasattr(self.expr,'__iter__') else u'"%s"'%self.expr),
unicode(e)))
op = definition.operator
# deal with negation: it can be external ("exclude(...)") or
# internal ("foo__exists=False")
negate = self.negate
if definition.has_custom_value:
if isinstance(value, bool) and not value:
# if the value is substituted and only provided to define
# the expected result of a test (yes/no), we must modify
# our internal negation state according to the value
negate = not negate
value = definition.value
else:
value = definition.process_value(value)
if negate:
op = op | TyrantProtocol.RDBQCNEGATE
# boolean values are stored as integers
value = utils.from_python(value)
# flatten list (TC can search tokens)
if hasattr(value, '__iter__'):
value = ', '.join(unicode(x) for x in value)
return self.name, op, value
raise ValueError(u'could not find a definition for lookup "%s" suitable'
u' for value "%s"' % (self.lookup, self.expr))
class Ordering(object):
"""
Representation of ordering policy for a query. Accepts column name,
sorting direction (ascending or descending) and sorting method
(alphabetic or numeric) and selects the appropriate protocol constant.
Default sorting settings are: ascending + alphabetic.
"""
ASC, DESC = 0, 1
ALPHABETIC, NUMERIC = 0, 1
PROTOCOL_MAP = {
DESC: {
NUMERIC: TyrantProtocol.RDBQONUMDESC,
ALPHABETIC: TyrantProtocol.RDBQOSTRDESC
},
ASC: {
NUMERIC: TyrantProtocol.RDBQONUMASC,
ALPHABETIC: TyrantProtocol.RDBQOSTRASC,
}
}
def __init__(self, name=None, direction=None, numeric=False):
self.name = name
self.direction = direction or self.ASC
self.method = self.NUMERIC if numeric else self.ALPHABETIC
def __eq__(self, other):
"""
Returns True if key attributes of compared instances are the same.
"""
if not isinstance(other, type(self)):
raise TypeError('Expected %s instance, got %s' % type(self), other)
for attr in 'name', 'direction', 'method':
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __nonzero__(self):
return bool(self.name)
def __repr__(self): # pragma: nocover
return u'<Order by %s (%s, %s)>' % (
self.name,
'desc' if self.direction else 'asc',
'numeric' if self.method else 'alphabetic',
)
@property
def type(self):
return self.PROTOCOL_MAP[self.direction][self.method]
class ResultCache(object):
"""
Represents query results. Implements result caching by chunks. Supports
slicing and access by item index. Intended to be used internally by
:class:`~pyrant.query.Query` objects.
"""
def __init__(self, query, chunk_size=None):
self.query = query
self.chunks = {}
self.keys = None
self.chunk_size = chunk_size or CACHE_CHUNK_SIZE
def get_keys(self, getter):
"""
Returns cached list of keys. If it is not yet defined, calls the
`getter` which must provide such list.
"""
assert hasattr(getter, '__call__'), (
'getter must be a callable, got %s' % getter)
if self.keys is None:
keys = getter()
assert hasattr(keys, '__iter__'), (
'getter must return an iterable, got %s' % keys)
self.keys = list(keys)
return self.keys
def get_item(self, index):
"""
Returns an item corresponding to current query and given index. Fills
related chunk of cache behind the scenes.
"""
chunk = self.get_chunk_number(index)
items = self.get_chunk_data(chunk) or []
start, _ = self.get_chunk_boundaries(chunk)
return items[index - start]
def get_items(self, start, stop=None):
"""
Generates a sequence of items corresponding to current query and given
slice boundaries. Fills related chunks of cache behind the scenes.
"""
if stop:
assert start < stop
chunk = self.get_chunk_number(start)
while 1:
chunk_start, chunk_stop = self.get_chunk_boundaries(chunk)
if stop and stop <= chunk_start:
raise StopIteration
data = self.get_chunk_data(chunk)
if data is None:
raise StopIteration
for i, item in enumerate(data):
if stop and stop <= chunk_start + i:
raise StopIteration
if start <= chunk_start + i:
yield item
chunk += 1
def get_chunk_number(self, index):
"""
Returns the number of chunk to which given item index belongs. For
example, if chunk size is set to 10, item #5 will belong to chunk #0
and item with index #25 will be found in chunk #2.
"""
return index / self.chunk_size
def get_chunk_boundaries(self, number):
"""
Returns first and last item indices that belong to given chunk. For
example, if chunk size is set to 10, the first chunk will have
boundaries `(0, 9)`, the second -- `(10, 19)` and so on.
"""
start = number * self.chunk_size
stop = start + self.chunk_size - 1
return start, stop
def get_chunk_data(self, number):
"""
Returns a list of items that belong to given chunk. Hits the database
and fills chunk cache. If there are no items for the chunk, returns
`None`.
"""
# TODO: do not create empty chunks; check if right boundary is within
# keys length
if not number in self.chunks:
# fill cache chunk
assert self.keys is not None, 'Cache keys must be filled by query'
start, stop = self.get_chunk_boundaries(number)
# make sure the chunk is not going to be empty
if len(self.keys) <= start:
return None
# get keys that correspond to the chunk
keys = self.keys[start:stop+1]
if not keys:
return None
# hit the database: retrieve values for these keys
pairs = self.query._proto.mget(keys)
# extend previously created empty list
prep = lambda k,v: (k, self.query._to_python(v))
self.chunks[number] = [prep(k,v) for k,v in pairs]
return self.chunks[number]
|
|
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.utils.text import slugify
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.utils import simplejson
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from django_countries.countries import OFFICIAL_COUNTRIES
from datetime import datetime
from .forms import (
RegistrationForm,
TournamentPageForm,
TournamentForm,
TournamentPlayerForm,
EmailPlayersForm,
TournamentRegistrationForm,
TournamentNewsItemForm)
from .models import (
TournamentPage,
TournamentPlayer,
NoTournamentSpotsException,
TournamentNewsItem)
from .utils.pdga import PDGARanking
FLIPPED_COUNTRIES = dict([(x, y) for y, x in OFFICIAL_COUNTRIES.items()])
@login_required
@csrf_exempt
def ajax_player_action(request):
t = request.tournament
# User must be admin
if not request.is_tournament_admin:
raise Http404
# We only accept POST requests
if request.method != 'POST':
raise Http404
# Our allowed actions for a player
allowed_actions = [
'waiting-list-accept',
'waiting-list-remove',
]
action = request.POST.get('action')
player_id = request.POST.get('tournamentplayer_id')
json_data = {
'success': False,
}
try:
player = request.tournament.tournamentplayer_set.get(
id=player_id)
except TournamentPlayer.DoesNotExist:
raise Http404
if action not in allowed_actions:
raise Http404
if action == 'waiting-list-remove':
player.delete()
json_data.update({
'success': True,
'removed': True})
if action == 'waiting-list-accept':
try:
player.accept_player()
except NoTournamentSpotsException:
json_data.update({'error': 'No available tournament spots'})
else:
json_data.update({
'success': True,
'removed': True})
# Add some counters
json_data.update({
'updater_data': {
'wildcard-spots': t.wildcard_spots,
'available-spots': t.get_available_spots(),
'waiting-list-count': t.get_waiting_list_count(),
'player-list-count': t.get_player_list_count(),
'max-players': t.max_players,
}
})
return HttpResponse(
simplejson.dumps(json_data),
mimetype='application/json')
def options(request):
return render(
request,
'tournament/admin/options.html')
def waiting_list(request, embed=False):
tournament = request.tournament
players = tournament.tournamentplayer_set.filter(
is_waiting_list=True).order_by('registered')
tmpl_dict = {
'players': players,
'is_embedded': embed,
'extends_tmpl': 'tournament/tournament_base.html',
'csrf': csrf(request),
}
if embed:
tmpl_dict.update({
'extends_tmpl': 'tournament/embed_base.htm'})
return render(
request,
'tournament/players/waiting-list.html',
tmpl_dict)
@login_required
def email_players(request):
t = request.tournament
if not request.is_tournament_admin:
raise Http404
tmpl_dict = {}
if request.method == 'POST':
form = EmailPlayersForm(request.POST)
if form.is_valid():
form.save(tournament=t)
messages.success(
request,
_('Email has been sent out according to your selections.'))
return HttpResponseRedirect(reverse(
'tournament-admin-email-players'))
else:
form = EmailPlayersForm()
if t.tournament_admin_email:
form.fields['sender'].initial = t.tournament_admin_email
form.fields['email_player_list'].label = \
'Email accepted players (%i)' % t.get_player_list_email_count()
form.fields['email_waiting_list'].label = \
'Email players on waiting list (%i)' % t.get_waiting_list_email_count()
tmpl_dict.update({
'form': form})
return render(
request,
'tournament/admin/email-players.html',
tmpl_dict)
@login_required
def edit_tournament(request):
if not request.is_tournament_admin:
raise Http404
if request.method == 'POST':
form = TournamentForm(
request.POST,
instance=request.tournament)
if form.is_valid():
t = form.save()
messages.success(
request,
_('Tournament has been updated.'))
return HttpResponseRedirect(reverse(
'tournament-admin-edit'))
else:
form = TournamentForm(
instance=request.tournament)
tmpl_dict = {
'form': form,
}
return render(
request,
'tournament/admin/edit-tournament.html',
tmpl_dict)
def player_edit_registration(request, tp_id):
# User must be admin
if not request.is_tournament_admin:
raise Http404
try:
tp = request.tournament.tournamentplayer_set.get(
id=tp_id)
except TournamentPlayer.DoesNotExist:
raise Http404
if request.method == 'POST':
form = TournamentRegistrationForm(
request.POST,
instance=tp)
if form.is_valid():
tp = form.save()
messages.success(
request,
_('Tournament registration has been updated.'))
return HttpResponseRedirect(reverse(
'tournament-registration-edit', args=[tp.id]))
else:
form = TournamentRegistrationForm(instance=tp)
tmpl_dict = {
'form': form,
'player': tp,
}
return render(
request,
'tournament/admin/edit-registration.html',
tmpl_dict)
def player_edit(request, tp_id):
# User must be admin
if not request.is_tournament_admin:
raise Http404
try:
tp = request.tournament.tournamentplayer_set.get(
id=tp_id)
except TournamentPlayer.DoesNotExist:
raise Http404
if request.method == 'POST':
form = TournamentPlayerForm(
request.POST,
instance=tp.player)
if form.is_valid():
player = form.save()
messages.success(
request,
_('Tournament player has been updated.'))
return HttpResponseRedirect(reverse(
'tournament-player-edit', args=[tp.id]))
else:
form = TournamentPlayerForm(instance=tp.player)
tmpl_dict = {
'form': form,
'player': tp,
}
return render(
request,
'tournament/admin/edit-player.html',
tmpl_dict)
def players(request, embed=False):
extends_tmpl = 'tournament/tournament_base.html'
if embed:
extends_tmpl = 'tournament/embed_base.html'
tmpl_dict = {
'csrf': csrf(request),
'extends_tmpl': extends_tmpl,
'is_embedded': embed,
}
return render(
request, 'tournament/players.html', tmpl_dict)
def check_pdga_number(request):
num = request.GET.get('pdga_number', False)
json_data = {
'success': False,
}
if num:
pdga = PDGARanking(num)
if pdga.rating:
# Make some checks on location to
# auto-detect country as well
country_code = None
if pdga.location:
country_search = pdga.location.split(
', ')[1].upper()
country_code = FLIPPED_COUNTRIES.get(country_search, None)
if country_code:
json_data.update({'country_code': country_code})
json_data.update({
'rating': pdga.rating,
'name': pdga.name,
'success': True})
return HttpResponse(
simplejson.dumps(json_data),
mimetype='application/json')
def registration_complete(request, embed=False):
extends_tmpl = 'tournament/tournament_base.html'
if embed:
extends_tmpl = 'tournament/embed_base.html'
tmpl_dict = {'extends_tmpl': extends_tmpl}
return render(
request,
'tournament/registration-complete.html',
tmpl_dict)
def registration(request, embed=False):
tournament = request.tournament
extends_tmpl = 'tournament/tournament_base.html'
if embed:
extends_tmpl = 'tournament/embed_base.html'
if request.method == 'POST':
form = RegistrationForm(
request.POST,
tournament=tournament)
if form.is_valid():
was_full = tournament.is_registration_full()
tp = form.save()
# Redirect to Paypal if payments is turned on
if tournament.enable_payments:
tp.is_pending_payment = True
tp.save()
request.session['last_tournamentplayer_id'] = tp.id
url = tp.get_paypal_redirect_url()
return HttpResponseRedirect(url)
if was_full:
tp.is_waiting_list = True
tp.save()
else:
tp.send_registration_email()
if embed:
return HttpResponseRedirect(reverse(
'tournament-registration-complete-embed'))
else:
return HttpResponseRedirect(reverse(
'tournament-registration-complete'))
else:
form = RegistrationForm(
tournament=tournament)
tmpl_dict = {
'form': form,
'extends_tmpl': extends_tmpl,
'is_embedded': embed,
}
if tournament.registration_stages:
tmpl_dict.update({
'current_stage': tournament.get_registration_stage()
})
response = render(
request, 'tournament/registration.html', tmpl_dict)
# We need this header so IE will allow third-party
# cookies (required for the embedded iframes)
response['P3P'] = "CP=\"CAO PSA OUR\""
return response
def paypal_return(request):
import paypalrestsdk
tournament = request.tournament
payment_id = request.GET.get('paymentId')
payer_id = request.GET.get('PayerID')
tp = tournament.tournamentplayer_set.get(
paypal_payment_id=payment_id)
tp.paypal_payer_id = payer_id
tp.save()
paypal_api = tp.get_paypal_api()
payment = paypalrestsdk.Payment.find(payment_id, api=paypal_api)
is_full = tournament.is_registration_full()
if payment.execute({'payer_id': payer_id}):
tp.is_pending_payment = False
tp.is_paid = True
if is_full:
tp.is_waiting_list = True
else:
tp.send_registration_email()
tp.save()
return HttpResponseRedirect(reverse(
'tournament-registration-complete'))
else:
return HttpResponse('Unable to execute payment')
def paypal_cancel(request):
tp_id = request.session.get('last_tournamentplayer_id', False)
if tp_id:
tp = request.tournament.tournamentplayer_set.get(id=tp_id)
tp.delete()
del request.session['last_tournamentplayer_id']
return render(
request, 'tournament/paypal_cancel.html')
raise Exception(request.session['last_tournamentplayer_id'])
pass
def index(request):
try:
page = request.tournament.tournamentpage_set.get(
slug='frontpage')
except TournamentPage.DoesNotExist:
page = None
news_items = request.tournament.tournamentnewsitem_set.filter(
is_published=True).order_by('-published')
tmpl_dict = {
'page': page,
'news_items': news_items,
}
return render(
request,
'tournament/index.html',
tmpl_dict)
def page_edit(request, slug):
page = get_object_or_404(
TournamentPage, slug=slug, tournament=request.tournament)
if request.method == 'POST':
form = TournamentPageForm(
request.POST, instance=page)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse(
'tournament-page', args=[page.slug, ]))
else:
form = TournamentPageForm(instance=page)
tmpl_dict = {
'page': page,
'form': form,
'sidebar': False,
}
return render(
request, 'tournament/page_edit.html', tmpl_dict)
def page(request, slug):
page = get_object_or_404(
TournamentPage, slug=slug, tournament=request.tournament)
tmpl_dict = {
'page': page
}
return render(
request, 'tournament/page.html', tmpl_dict)
def news_item(request, slug):
lookup_args = {
'slug': slug,
}
if not request.user.is_authenticated():
lookup_args.update({
'is_published': True})
try:
item = request.tournament.tournamentnewsitem_set.get(
**lookup_args)
except TournamentNewsItem.DoesNotExist:
raise Http404
tmpl_dict = {
'news_item': item,
}
return render(
request,
'tournament/news_item.html',
tmpl_dict)
def news_edit(request, slug=None):
if not request.user.is_authenticated():
return HttpResponse('No access!')
create_new = True
kwargs = {}
tmpl_dict = {}
if slug:
news_item = get_object_or_404(
TournamentNewsItem,
tournament=request.tournament,
slug=slug)
kwargs.update({'instance': news_item})
tmpl_dict.update({'news_item': news_item})
create_new = False
if request.method == 'POST':
form = TournamentNewsItemForm(request.POST, **kwargs)
if form.is_valid():
item = form.save(commit=False)
if create_new:
item.user = request.user
item.tournament = request.tournament
item.created = datetime.now()
item.slug = slugify(item.title)
if item.is_published and item.published is None:
item.published = datetime.now()
item.save()
return HttpResponseRedirect(reverse(
'tournament-news-item', args=[item.slug, ]))
else:
form = TournamentNewsItemForm(**kwargs)
tmpl_dict.update({
'form': form,
'sidebar': None,
})
return render(
request,
'tournament/news_item_edit.html',
tmpl_dict)
|
|
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
import math
import sys
import scipy
import scipy.optimize
from scipy.stats.mstats import mquantiles as quantiles
import scipy.stats
### New: added possibility to use emcee for MCMCs
try:
import emcee
# import acor
emcee_import = True
except ImportError:
print("Emcee and Acor not installed. Using Metropolis-Hastings algorithm for Markov Chain Monte Carlo simulations.")
emcee_import = False
from BayesPSD import utils
from BayesPSD import powerspectrum
### See if cutting-edge numpy is installed so I can use choice
try:
from numpy.random import choice
### if not, use hack
except ImportError:
choice = utils.choice_hack
class MarkovChainMonteCarlo(object):
"""
Markov Chain Monte Carlo for Bayesian QPO searches.
Either wraps around emcee, or uses the
Metropolis-Hastings sampler defined in this file.
Parameters
----------
x : {list, array-like}
Inependent variable, most likely the frequencies of the
periodogram in this context.
y : {list, array-like}
Dependent variable, most likely the powers of the
periodogram in this context.
lpost : Posterior object
An instance of the class Posterior or one of its subclasses;
defines the likelihood and priors to be used.
For periodograms, use
* posterior.PerPosterior for unbinned periodograms
* posterior.StackPerPosterior for binned/stacked periodograms
topt : {list, array-like}
Starting point for generating an initial set of parameter samples.
Should be in a region of high posterior, such that the chains
don't spend a long time exploring regions with low posterior mass.
If possible, make a MAP fit and use the MAP parameters here.
The length of topt needs to match the number of parameters used
in whatever function is stored in lpost.func
tcov: {array-like}
The variances and covarianced between parameters used to generate an
initial set of parameter samples for all chains/walkers.
There are several options here: you can set large variances and no
covariances and effectively leave the Markov chains to explore
the prior mass until they converge. You can also use the inverse
Fisher information (as for example returned by bfgs) as covariance
matrix to make an initial guess. This usually works better in the sense
that it requires fewer steps of the Markov chains.
popt needs to have dimensions (k,k), where k is the number of parameters
taken by lpost.func
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
discard : {int, None}, optional, default None
The number of initial samples to discard from the Markov chain.
For emcee, the burn-in time is *always* 200 samples (additional to
whatever is set by niter).
For the Metropolis-Hastings algorithm, the number of initial samples
discarded is set by this variable.
If discard is None, then half of the samples are discarded as default.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
check_conv : boolean, optional, default True
If True, check for convergence of the Markov chains using check_convergence
method below.
NOTE: This was set up explicitly for Metropolis-Hastings. For emcee,
this might not necessarily produce easily interpretable results.
namestr : string, optional, default 'test'
a string to use for saving plots and output files
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
plot : boolean, optional, default True
If True, then save some useful plots; in particular,
convergence plots as well as a triangle plot showing
the posterior distributions
printobj : object, optional, default None
In theory, this allows the use of an alternative
to the standard print function in order to save
information to file etc.
NOTE: CURRENTLY DOESN'T WORK PROPERLY!
m : int, optional, default 1
If the input periodogram is the result of stacking
several individual periodograms, or the result of
binning adjacent frequencies into a coarser frequency
resolution, then the distribution to be used in the
likelihood function is different!
Set the number of periodograms averaged/stacked here.
"""
def __init__(self, x, y, lpost, topt, tcov,
covfactor=1.0,
niter=5000,
nchain=10,
discard=None,
parname = None,
check_conv = True,
namestr='test',
use_emcee=True,
plot=True,
printobj = None,
m=1):
self.m = m
self.x = x
self.y = y
self.plot = plot
print("<--- self.ps len MCMC: " + str(len(self.x)))
### set of optimal parameters from MLE fitting
self.topt = topt
print("mcobs topt: " + str(self.topt))
### covariances of fitted parameters
self.tcov = tcov*covfactor
print("mcobs tcov: " + str(self.tcov))
### number of iterations for MCMC algorithm
self.niter = niter
### number of MCMC chains to be computed
self.nchain = nchain
### Error in the fitted parameters
self.terr = np.sqrt(np.diag(tcov))
### function that was fitted
self.lpost = lpost
if discard == None:
discard = math.floor(niter/2.0)
mcall = []
### if emcee package is not installed, enforce Metropolis-Hastings implementation
if emcee_import == False:
print("Emcee not installed. Enforcing M-H algorithm!")
use_emcee = False
### if emcee should be used, then use code below
if use_emcee:
### number of walkers is the number of chains
nwalkers = self.nchain
### number of dimensions for the Gaussian (=number of parameters)
ndim = len(self.topt)
### sample random starting positions for each of the walkers
p0 = [np.random.multivariate_normal(self.topt,self.tcov) for i in xrange(nwalkers)]
### initialize sampler
sampler = emcee.EnsembleSampler(nwalkers,ndim, lpost, args=[False])
### run burn-in phase and reset sampler
pos, prob, state = sampler.run_mcmc(p0, 200)
sampler.reset()
### run actual MCMCs
sampler.run_mcmc(pos, niter, rstate0=state)
### list of all samples stored in flatchain
mcall = sampler.flatchain
### print meanacceptance rate for all walkers and autocorrelation times
print("The ensemble acceptance rate is: " + str(np.mean(sampler.acceptance_fraction)))
self.L = np.mean(sampler.acceptance_fraction)*len(mcall)
self.acceptance = np.mean(sampler.acceptance_fraction)
try:
self.acor = sampler.acor
print("The autocorrelation times are: " + str(sampler.acor))
except ImportError:
print("You can install acor: http://github.com/dfm/acor")
self.acor = None
except RuntimeError:
print("D was negative. No clue why that's the case! Not computing autocorrelation time ...")
self.acor = None
except:
print("Autocorrelation time calculation failed due to an unknown error: " + str(sys.exc_info()[0]) + ". Not computing autocorrelation time.")
self.acor = None
### if emcee_use == False, then use MH algorithm as defined in MarkovChain object below
else:
### loop over all chains
for i in range(nchain):
#t0 = topt + choice([2.0, 3.0, -3.0, -2.0], size=len(topt))*self.terr
### set up MarkovChain object
mcout = MetropolisHastings(topt, tcov, lpost, niter = niter, parname = parname, discard = discard)
### create actual chain
mcout.create_chain(self.x, self.y)
### make diagnostic plots
mcout.run_diagnostics(namestr = namestr +"_c"+str(i), parname=parname)
mcall.extend(mcout.theta)
self.L = mcout.L
mcall = np.array(mcall)
### check whether chains/walkers converged
if check_conv == True:
self.check_convergence(mcall, namestr, printobj = printobj)
### transpose list of parameter sets so that I have lists with one parameter each
self.mcall = mcall.transpose()
### make inferences from MCMC chain, plot to screen and save plots
self.mcmc_infer(namestr=namestr, printobj = printobj)
def check_convergence(self, mcall, namestr, printobj=None, use_emcee = True):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
### compute Rhat for all parameters
rh = self._rhat(mcall, printobj)
self.rhat = rh
plt.scatter(rh, np.arange(len(rh))+1.0 )
plt.axis([0.1,2,0.5,0.5+len(rh)])
plt.xlabel("$R_hat$")
plt.ylabel("Parameter")
plt.title('Rhat')
plt.savefig(namestr + '_rhat.png', format='png')
plt.close()
### compute 80% quantiles
ci0, ci1 = self._quantiles(mcall)
### set array with colours
### make sure there are enough colours available
colours_basic = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
cneeded = int(math.ceil(len(ci0[0])/7.0))
colours = []
for x in range(cneeded):
colours.extend(colours_basic)
### plot 80% quantiles
if self.plot:
plt.plot(0,0)
plt.axis([-2, 2, 0.5, 0.5+len(ci0)])
for j in range(self.nchain):
plt.hlines(y=[m+(j)/(4.0*self.nchain) for m in range(len(ci0))], xmin=[x[j] for x in ci0], xmax=[x[j] for x in ci1], color=colours[j])
#plt.hlines(y=[m+1.0+(1)/(4*self.nchain) for m in np.arange(len(ci0))], xmin=[x[1] for x in ci0], xmax=[x[1] for x in ci1], color=colours[j])
plt.xlabel("80% region (scaled)")
plt.ylabel("Parameter")
plt.title("80% quantiles")
plt.savefig(namestr + "_quantiles.png", format="png")
plt.close()
### auxiliary function used in check_convergence
### computes R_hat, which compares the variance inside chains to the variances between chains
def _rhat(self, mcall, printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
print("Computing Rhat. The closer to 1, the better!")
rh = []
### loop over parameters ###
for i,k in enumerate(self.topt):
### pick parameter out of array
tpar = np.array([t[i] for t in mcall])
### reshape back into array of niter*nchain dimensions
tpar = np.reshape(tpar, (self.nchain, len(tpar)/self.nchain))
### compute mean of variance of each chain
#### THIS DOESN'T WORK FOR SOME REASON! TAKES VARIANCE OF EACH ELEMENT!!!
### CHECK THIS!
sj = map(lambda y: np.var(y), tpar)
W = np.mean(sj)
### compute variance of means of each chain
mj = map(lambda y: np.mean(y), tpar)
### note: this assumes the discards
B = np.var(mj)*self.L
## now compute marginal posterior variance
mpv = ((float(self.L)-1.0)/float(self.L))*W + B/float(self.L)
### compute Rhat
rh.append(np.sqrt(mpv/W))
### print convergence message on screen:
print("The Rhat value for parameter " + str(i) + " is: " + str(rh[i]) + ".")
if rh[i] > 1.2:
print("*** HIGH Rhat! Check results! ***")
else:
print("Good Rhat. Hoorah!")
return rh
def _quantiles(self, mcall):
### empty lists for quantiles
ci0, ci1 = [], []
### loop over the parameters ###
for i,k in enumerate(self.topt):
print("I am on parameter: " + str(i))
### pick parameter out of array
tpar = np.array([t[i] for t in mcall])
### reshape back into array of niter*nchain dimensions
tpar = np.reshape(tpar, (self.nchain, len(tpar)/self.nchain))
### compute mean of variance of each chain
intv = map(lambda y: quantiles(y, prob=[0.1, 0.9]), tpar)
### quantiles will return a list with two elements for each
### chain: the 0.1 and 0.9 quantiles
### need to pick out these for each chain
c0 = np.array([x[0] for x in intv])
c1 = np.array([x[1] for x in intv])
### now compute the scale
scale = np.mean(c1-c0)/2.0
### compute means of each chain
mt = map(lambda y: np.mean(y), tpar)
### mean of means of all chains
offset = np.mean(mt)
### rescale quantiles (WHY??)
ci0.append((c0 - offset)/scale)
ci1.append((c1 - offset)/scale)
return ci0, ci1
def mcmc_infer(self, namestr='test', printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
### covariance of the parameters from simulations
covsim = np.cov(self.mcall)
print("Covariance matrix (after simulations): \n")
print(str(covsim))
### calculate for each parameter its (posterior) mean and equal tail
### 90% (credible) interval from the MCMC
self.mean = map(lambda y: np.mean(y), self.mcall)
self.std = map(lambda y: np.std(y), self.mcall)
self.ci = map(lambda y: quantiles(y, prob=[0.05, 0.95]), self.mcall)
### print to screen
print("-- Posterior Summary of Parameters: \n")
print("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
print("---------------------------------------------\n")
for i in range(len(self.topt)):
print("theta[" + str(i) + "] \t " + str(self.mean[i]) + "\t" + str(self.std[i]) + "\t" + str(self.ci[i][0]) + "\t" + str(self.ci[i][1]) + "\n" )
### produce matrix scatter plots
N = len(self.topt) ### number of parameters
print("N: " + str(N))
n, bins, patches = [], [], []
if self.plot:
fig = plt.figure(figsize=(15,15))
plt.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.975, wspace=0.2, hspace=0.2)
for i in range(N):
for j in range(N):
xmin, xmax = self.mcall[j][:1000].min(), self.mcall[j][:1000].max()
ymin, ymax = self.mcall[i][:1000].min(), self.mcall[i][:1000].max()
ax = fig.add_subplot(N,N,i*N+j+1)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.ticklabel_format(style="sci", scilimits=(-2,2))
if i == j:
#pass
ntemp, binstemp, patchestemp = ax.hist(self.mcall[i][:1000], 30, normed=True, histtype='stepfilled')
n.append(ntemp)
bins.append(binstemp)
patches.append(patchestemp)
ax.axis([ymin, ymax, 0, max(ntemp)*1.2])
else:
ax.axis([xmin, xmax, ymin, ymax])
### make a scatter plot first
ax.scatter(self.mcall[j][:1000], self.mcall[i][:1000], s=7)
### then add contours
xmin, xmax = self.mcall[j][:1000].min(), self.mcall[j][:1000].max()
ymin, ymax = self.mcall[i][:1000].min(), self.mcall[i][:1000].max()
### Perform Kernel density estimate on data
try:
X,Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([self.mcall[j][:1000], self.mcall[i][:1000]])
kernel = scipy.stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(X,Y,Z,7)
except ValueError:
print("Not making contours.")
plt.savefig(namestr + "_scatter.png", format='png')
plt.close()
return
#### POSTERIOR PREDICTIVE CHECKS ################
#
# Note: fpeak is calculated in mle.PerMaxLike.compute_stats
# and can be found in dictionary self.pl_r or self.bpl_r
#
## nsim [int] = number of simulations
## dist [str] = distribution, one of
## "exp": exponential distribution (=chi2_2), np.random.exponential
## "chisquare": chi^2 distribution with df degrees of freedom
## df [int] = degrees of freedom for chi^2 distribution
def simulate_periodogram(self, nsim=5000):
"""
Simulate periodograms from posterior samples of the
broadband noise model.
This method uses the results of an MCMC run to
pick samples from the posterior and use the function
stored in self.lpost.func to create a power spectral form.
In order to transform this into a model periodogram,
it picks for each frequency from an exponential distribution
with a shape parameter corresponding to the model power
at that frequency.
Parameters
----------
nsim : int, optional, default 5000
The number of periodograms to simulate. This number
must be smaller than the number of samples generated
during the MCMC run.
Returns
-------
fps : array-like
An array of shape (nsim, nfrequencies) with all
simulated periodograms.
"""
## the function to use is stored in lpost:
func = self.lpost.func
### number of simulations is either given by the user,
### or defined by the number of MCMCs run!
nsim = min(nsim,len(self.mcall[0]))
### shuffle MCMC parameters
theta = np.transpose(self.mcall)
#print "theta: " + str(len(theta))
np.random.shuffle(theta)
fps = []
percount = 1.0
for x in range(nsim):
### extract parameter set
ain = theta[x]
### compute model 'true' spectrum
mpower = func(self.x, *ain)
### define distribution
if self.m == 1:
#print("m = 1")
noise = np.random.exponential(size=len(self.x))
else:
#print("m = " + str(self.m))
noise = np.random.chisquare(2*self.m, size=len(self.x))/(2.0*self.m)
### add random fluctuations
mpower = mpower*noise
### save generated power spectrum in a PowerSpectrum object
mps = powerspectrum.PowerSpectrum()
mps.freq = self.x
mps.ps = mpower
mps.df = self.x[1] - self.x[0]
mps.n = 2.0*len(self.x)
mps.nphots = mpower[0]
mps.m = self.m
fps.append(mps)
return np.array(fps)
#### MAKE A MARKOV CHAIN OBJECT ###
#
# QUESTION: How can I make an object with variable
# parameters?
#
#
#
# NEED TO THINK ABOUT HOW TO GET ATTRIBUTES!
#
class MetropolisHastings(object):
"""
Parameters
----------
topt : {list, array-like}
Starting point for generating an initial set of parameter samples.
Should be in a region of high posterior, such that the chains
don't spend a long time exploring regions with low posterior mass.
If possible, make a MAP fit and use the MAP parameters here.
The length of topt needs to match the number of parameters used
in whatever function is stored in lpost.func
tcov: {array-like}
The variances and covarianced between parameters used to generate an
initial set of parameter samples for all chains/walkers.
There are several options here: you can set large variances and no
covariances and effectively leave the Markov chains to explore
the prior mass until they converge. You can also use the inverse
Fisher information (as for example returned by bfgs) as covariance
matrix to make an initial guess. This usually works better in the sense
that it requires fewer steps of the Markov chains.
popt needs to have dimensions (k,k), where k is the number of parameters
taken by lpost.func
lpost : Posterior object
An instance of the class Posterior or one of its subclasses;
defines the likelihood and priors to be used.
For periodograms, use
* posterior.PerPosterior for unbinned periodograms
* posterior.StackPerPosterior for binned/stacked periodograms
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
discard : {int, None}, optional, default None
The number of initial samples to discard from the Markov chain.
For emcee, the burn-in time is *always* 200 samples (additional to
whatever is set by niter).
For the Metropolis-Hastings algorithm, the number of initial samples
discarded is set by this variable.
If discard is None, then half of the samples are discarded as default.
"""
def __init__(self, topt, tcov, lpost, niter = 5000,
parname=None, discard=None):
self.niter = niter
self.topt = topt
self.tcov = tcov
self.terr = np.sqrt(np.diag(tcov))
self.t0 = topt + choice([2.0, 3.0, -3.0, -2.0], size=len(topt))*self.terr
self.lpost = lpost
self.terr = np.sqrt(np.diag(tcov))
if discard == None:
self.discard = int(niter/2)
else:
self.discard = int(discard)
if parname == None:
self.parname = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'iota', 'lappa', 'lambda', 'mu']
else:
self.parname = parname
### set up MCMC chain
### possible distributions:
### - 'mvn': multi-variate normal (default)
### - 'stt': student t-test
def create_chain(self, x, y, topt=None, tcov = None, t0 = None, dist='mvn'):
if not topt == None:
self.topt = topt
if not tcov == None:
self.tcov = tcov
if not t0 == None:
self.t0 = t0
### set up distributions
if dist=='mvn':
dist = np.random.multivariate_normal
### set acceptance value to zero
accept = 0.0
### set up array
ttemp, logp = [], []
ttemp.append(self.t0)
#lpost = posterior.PerPosterior(self.ps, self.func)
logp.append(self.lpost(self.t0, neg=False))
for t in np.arange(self.niter-1)+1:
tprop = dist(ttemp[t-1], self.tcov)
pprop = self.lpost(tprop)#, neg=False)
logr = pprop - logp[t-1]
logr = min(logr, 0.0)
r= np.exp(logr)
update = choice([True, False], size=1, p=[r, 1.0-r])
if update:
ttemp.append(tprop)
logp.append(pprop)
if t > self.discard:
accept = accept + 1
else:
ttemp.append(ttemp[t-1])
logp.append(logp[t-1])
self.theta = ttemp[self.discard+1:]
self.logp = logp[self.discard+1:]
self.L = self.niter - self.discard
self.accept = accept/self.L
return
def run_diagnostics(self, namestr=None, parname=None, printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
print("Markov Chain acceptance rate: " + str(self.accept) +".")
if namestr == None:
print("No file name string given for printing. Setting to 'test' ...")
namestr = 'test'
if parname == None:
parname = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'iota', 'lappa', 'lambda', 'mu']
fig = plt.figure(figsize=(12,10))
adj =plt.subplots_adjust(hspace=0.4, wspace=0.4)
for i,th in enumerate(self.theta[0]):
ts = np.array([t[i] for t in self.theta])
p1 = plt.subplot(len(self.topt), 3, (i*3)+1)
p1 = plt.plot(ts)
plt.axis([0, len(ts), min(ts), max(ts)])
plt.xlabel("Number of draws")
plt.ylabel("parameter value")
plt.title("Time series for parameter " + str(parname[i]) + ".")
p2 = plt.subplot(len(self.topt), 3, (i*3)+2)
### plotting histogram
p2 = count, bins, ignored = plt.hist(ts, bins=10, normed=True)
bnew = np.arange(bins[0], bins[-1], (bins[-1]-bins[0])/100.0)
p2 = plt.plot(bnew, 1.0/(self.terr[i]*np.sqrt(2*np.pi))*np.exp(-(bnew - self.topt[i])**2.0/(2.0*self.terr[i]**2.0)), linewidth=2, color='r')
plt.xlabel('value of ' + str(parname[i]))
plt.ylabel('probability')
plt.title("Histogram for parameter " + str(parname[i]) + ".")
nlags = 30
p3 = plt.subplot(len(self.topt), 3, (i*3)+3)
acorr = autocorr(ts,nlags=nlags, norm=True)
p3 = plt.vlines(range(nlags), np.zeros(nlags), acorr, colors='black', linestyles='solid')
plt.axis([0.0, nlags, 0.0, 1.0])
plt.savefig(namestr + "_diag.png", format='png',orientation='landscape')
plt.close()
##############################################################
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host-related functions (start, reboot, etc).
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
class Host(object):
"""
Implements host related operations.
"""
def __init__(self, session):
self._session = session
def host_power_action(self, host, action):
"""Reboots or shuts down the host."""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("%(action)s %(host)s"), {'action': action, 'host': host})
if action == "reboot":
host_task = self._session._call_method(
self._session._get_vim(),
"RebootHost_Task", host_mor,
force=False)
elif action == "shutdown":
host_task = self._session._call_method(
self._session._get_vim(),
"ShutdownHost_Task", host_mor,
force=False)
elif action == "startup":
host_task = self._session._call_method(
self._session._get_vim(),
"PowerUpHostFromStandBy_Task", host_mor,
timeoutSec=60)
self._session._wait_for_task(host, host_task)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s"),
{'host': host, 'mode': mode})
if mode:
host_task = self._session._call_method(
self._session._get_vim(),
"EnterMaintenanceMode_Task",
host_mor, timeout=0,
evacuatePoweredOffVms=True)
else:
host_task = self._session._call_method(
self._session._get_vim(),
"ExitMaintenanceMode_Task",
host_mor, timeout=0)
self._session._wait_for_task(host, host_task)
def set_host_enabled(self, _host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
class HostState(object):
"""Manages information about the ESX host this compute
node is running on.
"""
def __init__(self, session, host_name):
super(HostState, self).__init__()
self._session = session
self._host_name = host_name
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Update the current state of the host.
"""
host_mor = vm_util.get_host_ref(self._session)
summary = self._session._call_method(vim_util,
"get_dynamic_property",
host_mor,
"HostSystem",
"summary")
if summary is None:
return
try:
ds = vm_util.get_datastore_ref_and_name(self._session)
except exception.DatastoreNotFound:
ds = (None, None, 0, 0)
data = {}
data["vcpus"] = summary.hardware.numCpuThreads
data["cpu_info"] = \
{"vendor": summary.hardware.vendor,
"model": summary.hardware.cpuModel,
"topology": {"cores": summary.hardware.numCpuCores,
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_free"] = data["host_memory_total"] - \
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
data["hypervisor_version"] = summary.config.product.version
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self._stats = data
return data
class VCState(object):
"""Manages information about the VC host this compute
node is running on.
"""
def __init__(self, session, host_name, cluster):
super(VCState, self).__init__()
self._session = session
self._host_name = host_name
self._cluster = cluster
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Update the current state of the host.
"""
host_mor = vm_util.get_host_ref(self._session, self._cluster)
if host_mor is None:
return
summary = self._session._call_method(vim_util,
"get_dynamic_property",
host_mor,
"HostSystem",
"summary")
if summary is None:
return
try:
ds = vm_util.get_datastore_ref_and_name(self._session,
self._cluster)
except exception.DatastoreNotFound:
ds = (None, None, 0, 0)
data = {}
data["vcpus"] = summary.hardware.numCpuThreads
data["cpu_info"] =\
{"vendor": summary.hardware.vendor,
"model": summary.hardware.cpuModel,
"topology": {"cores": summary.hardware.numCpuCores,
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_free"] = data["host_memory_total"] -\
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
data["hypervisor_version"] = summary.config.product.version
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self._stats = data
return data
|
|
#! /usr/env/python
"""
Component that models 2D diffusion using an explicit finite-volume method.
Created July 2013 GT
Last updated May 2015 DEJH
"""
##############DEJH is unsure if the uplift is correctly (not) incorporated here
from __future__ import print_function
from landlab import ModelParameterDictionary, Component, FieldError
from landlab import create_and_initialize_grid
from landlab.core.model_parameter_dictionary import MissingKeyError
_ALPHA = 0.25 # time-step stability factor
#_VERSION = 'make_all_data'
#_VERSION = 'explicit'
_VERSION = 'pass_grid'
class LinearDiffuser(Component):
"""
This component implements linear diffusion of a field in the supplied
ModelGrid.
This components requires the following parameters be set in the input file,
*input_stream*, set in the component initialization:
'linear_diffusivity', the diffusivity to use
Optional inputs are:
'uplift_rate', if you want this component to include the uplift
internally
'dt', the model timestep (assumed constant)
'values_to_diffuse', a string giving the name of the grid field
containing the data to diffuse.
Supply *dt* to the diffuser through the diffuse() argument.
This allows you to set a dynamic timestep for this class.
If 'values_to_diffuse' is not provided, defaults to
'topographic__elevation'.
No particular units are necessary where they are not specified, as long as
all units are internally consistent.
The component takes *grid*, the ModelGrid object, and (optionally)
*current_time* and *input_stream*. If *current_time* is not set, it defaults
to 0.0. If *input_stream* is not set in instantiation of the class,
:func:`initialize` with *input_stream* as in input must be called instead.
*Input_stream* is the filename of (& optionally, path to) the parameter
file.
At the moment, this diffuser can only work with constant diffusivity.
Spatially variable diffusivity hopefully coming soon.
The primary method of this class is :func:`diffuse`.
"""
_name = 'LinearDiffuser'
_input_var_names = set(['topographic__elevation',
])
#############################UPDATE ME
_output_var_names = set(['topographic__elevation',
'surface_gradient',
'unit_flux',
])
_var_units = {'topographic__elevation' : 'm',
'surface_gradient' : '-',
'unit_flux' : 'm**3/s',
}
_var_mapping = {'topographic__elevation' : 'node',
'surface_gradient' : 'link',
'unit_flux' : 'link',
}
_var_defs = {'topographic__elevation' : 'Land surface topographic elevation; can be overwritten in initialization',
'surface_gradient' : 'Gradient of surface, on links',
'unit_flux' : 'Volume flux per unit width along links',
}
def __init__(self, grid, input_stream=None, current_time=0.):
self._grid = grid
self.current_time = current_time
if input_stream:
self.initialize(input_stream)
else:
print('Ensure you call the initialize(input_stream) method before '
'running the model!')
def initialize(self, input_stream):
# Create a ModelParameterDictionary for the inputs
if type(input_stream)==ModelParameterDictionary:
inputs = input_stream
else:
inputs = ModelParameterDictionary(input_stream)
# Read input/configuration parameters
self.kd = inputs.read_float('linear_diffusivity')
try:
self.uplift_rate = inputs.read_float('uplift_rate')
except MissingKeyError:
self.uplift_rate = 0.
try:
self.values_to_diffuse = inputs.read_string('values_to_diffuse')
except MissingKeyError:
self.values_to_diffuse = 'topographic__elevation'
else:
#take switch in the new field name in the class properties
for mysets in (self._input_var_names, self._output_var_names):
mysets.remove('topographic__elevation')
mysets.add(self.values_to_diffuse)
for mydicts in (self._var_units, self._var_mapping, self._var_defs):
mydicts[self.values_to_diffuse] = mydicts.pop('topographic__elevation')
try:
self.timestep_in = inputs.read_float('dt')
except MissingKeyError:
pass
# Create grid if one doesn't already exist
if self._grid is None:
self._grid = create_and_initialize_grid(input_stream)
# Set internal time step
# ..todo:
# implement mechanism to compute time-steps dynamically if grid is
# adaptive/changing
dx = self._grid.min_active_link_length() # smallest active link length
self.dt = _ALPHA*dx*dx/self.kd # CFL condition
try:
self.tstep_ratio = self.timestep_in/self.dt
except AttributeError:
pass
# Get a list of interior cells
self.interior_cells = self._grid.get_core_cell_node_ids()
##DEJH bites the bullet and forces the 2015 style with fields
# # Here we're experimenting with different approaches: with
# # 'make_all_data', we create and manage all the data we need and embed
# # it all in the grid. With 'explicit', we require the caller/user to
# # provide data.
# if _VERSION=='make_all_data':
# #print('creating internal data')
# self.z = self._grid.add_zeros('node', 'landscape_surface__elevation')
# self.g = self._grid.add_zeros('active_link', 'landscape_surface__gradient') # surface gradients
# self.qs = self._grid.add_zeros('active_link','unit_sediment_flux') # unit sediment flux
# self.dqds = self._grid.add_zeros('node', 'sediment_flux_divergence') # sed flux derivative
# elif _VERSION=='explicit':
# pass
# else:
# # Create data arrays for variables that won't (?) be shared with other
# # components
# self.g = self._grid.create_active_link_array_zeros() # surface gradients
# self.qs = self._grid.create_active_link_array_zeros() # unit sediment flux
# self.dqds = self._grid.create_node_array_zeros() # sed flux derivative
self.z = self._grid.at_node[self.values_to_diffuse]
g = self._grid.zeros(centering='link')
qs = self._grid.zeros(centering='link')
try:
self.g = self._grid.add_field('link', 'surface__gradient', g, noclobber=True) #note this will object if this exists already
except FieldError:
pass #field exists, so no problem
try:
self.qs = self._grid.add_field('link', 'unit_flux', qs, noclobber=True)
except FieldError:
pass
#note all these terms are deliberately loose, as we won't always be dealing with topo
def input_timestep(self, timestep_in):
"""
Allows the user to set a dynamic (evolving) timestep manually as part of
a run loop.
"""
self.timestep_in = timestep_in
self.tstep_ratio = timestep_in/self.dt
def run_one_step_explicit(self, mg, z, g, qs, dqsds, dzdt, delt):
# Take the smaller of delt or built-in time-step size self.dt
dt = min(self.dt, delt)
# Calculate the gradients and sediment fluxes
g = mg.calculate_gradients_at_active_links(z)
qs = -self.kd*g
# Calculate the net deposition/erosion rate at each node
dqsds = mg.calculate_flux_divergence_at_nodes(qs)
# Calculate the total rate of elevation change
dzdt = self.uplift_rate - dqsds
# Update the elevations
z[self.interior_cells] = z[self.interior_cells] \
+ dzdt[self.interior_cells] * dt
# Update current time and return it
self.current_time += dt
return z, g, qs, dqsds, dzdt
def run_one_step_internal(self, delt):
# Take the smaller of delt or built-in time-step size self.dt
dt = min(self.dt, delt)
# Calculate the gradients and sediment fluxes
self.g = self._grid.calculate_gradients_at_active_links(self.z)
self.qs = -self.kd*self.g
# Calculate the net deposition/erosion rate at each node
self.dqsds = self._grid.calculate_flux_divergence_at_nodes(self.qs)
# Calculate the total rate of elevation change
dzdt = self.uplift_rate - self.dqsds
# Update the elevations
self.z[self.interior_cells] += dzdt[self.interior_cells] * dt
# Update current time and return it
self.current_time += dt
return self.current_time
def diffuse(self, dt, internal_uplift=False, num_uplift_implicit_comps = 1):
"""
This is the primary method of the class. Call it to perform an iteration
of the model. Takes *dt*, the current timestep.
The modelgrid must contain the field to diffuse, which defaults to
'topographic__elevation'. This can be overridden with the
values_to_diffuse property in the input file.
See the class docstring for a list of the other properties necessary
in the input file for this component to run.
To improve stability, this component can incorporate uplift into its
internal mechanism. To use this, set *internal_uplift* to True, and . If you only have one module that requires this, do not add
uplift manually in your loop; this method will include uplift
automatically. If more than one of your components has this requirement,
set *num_uplift_implicit_comps* to the total number of components that
do.
You can suppress this behaviour by setting *internal_uplift* to False.
"""
# Take the smaller of delt or built-in time-step size self.dt
self.tstep_ratio = dt/self.dt
repeats = int(self.tstep_ratio//1.)
extra_time = self.tstep_ratio-repeats
z = self._grid.at_node[self.values_to_diffuse]
core_nodes = self._grid.get_core_cell_node_ids()
for i in xrange(repeats+1):
# Calculate the gradients and sediment fluxes
self.g[self._grid.active_links] = self._grid.calculate_gradients_at_active_links(z)
self.qs[self._grid.active_links] = -self.kd*self.g[self._grid.active_links]
# Calculate the net deposition/erosion rate at each node
self.dqsds = self._grid.calculate_flux_divergence_at_nodes(self.qs[self._grid.active_links])
# Calculate the total rate of elevation change
#dzdt = self.uplift_rate - self.dqsds
dzdt = - self.dqsds
# Update the elevations
timestep = self.dt
if i == (repeats):
timestep *= extra_time
else:
pass
if internal_uplift:
add_uplift = self.uplift_rate/num_uplift_implicit_comps
else:
add_uplift = 0.
self._grid.at_node[self.values_to_diffuse][core_nodes] += add_uplift + dzdt[core_nodes] * timestep
#check the BCs, update if fixed gradient
if self._grid.fixed_gradient_boundary_nodes:
self._grid.at_node[self.values_to_diffuse][self._grid.fixed_gradient_node_properties['boundary_node_IDs']] = self._grid.at_node[self.values_to_diffuse][self._grid.fixed_gradient_node_properties['anchor_node_IDs']] + self._grid.fixed_gradient_node_properties['values_to_add']
#return the grid
return self._grid
def run_until_explicit(self, mg, t, z, g, qs, dqsds, dzdt):
while self.current_time < t:
remaining_time = t - self.current_time
z, g, qs, dqsds, dzdt = self.run_one_step_explicit(mg, z, g, qs, dqsds, dzdt, remaining_time)
return z, g, qs, dqsds, dzdt
def run_until_internal(self, t):
while self.current_time < t:
remaining_time = t - self.current_time
self.run_one_step_internal(remaining_time)
def run_until(self, t): # this is just a temporary duplicate
while self.current_time < t:
remaining_time = t - self.current_time
self.run_one_step_internal(remaining_time)
def get_time_step(self):
"""
Returns time-step size.
"""
return self.dt
@property
def time_step(self):
"""
Returns time-step size (as a property).
"""
return self.dt
|
|
"""Configuration object and defaults setup
The PylonsConfig object is initialized in pylons projects inside the
:file:`config/environment.py` module. Importing the :data:`config`
object from module causes the PylonsConfig object to be created, and
setup in app-safe manner so that multiple apps being setup avoid
conflicts.
After importing :data:`config`, the project should then call
:meth:`~PylonsConfig.init_app` with the appropriate options to setup
the configuration. In the config data passed with
:meth:`~PylonsConfig.init_app`, various defaults are set use with Paste
and Routes.
"""
import copy
import logging
import os
from paste.config import DispatchingConfig
from paste.deploy.converters import asbool
from webhelpers.mimehelper import MIMETypes
default_template_engine = 'mako'
request_defaults = dict(charset='utf-8', errors='replace',
decode_param_names=False, language='en-us')
response_defaults = dict(content_type='text/html',
charset='utf-8', errors='strict',
headers={'Cache-Control': 'no-cache',
'Pragma': 'no-cache'})
log = logging.getLogger(__name__)
config = DispatchingConfig()
class PylonsConfig(dict):
"""Pylons configuration object
The Pylons configuration object is a per-application instance
object that retains the information regarding the global and app
conf's as well as per-application instance specific data such as
the mapper, and the paths for this instance.
The config object is available in your application as the Pylons
global :data:`pylons.config`. For example::
from pylons import config
template_paths = config['pylons.paths']['templates']
There's several useful keys of the config object most people will
be interested in:
``pylons.paths``
A dict of absolute paths that were defined in the applications
``config/environment.py`` module.
``pylons.environ_config``
Dict of environ keys for where in the environ to pickup various
objects for registering with Pylons. If these are present then
PylonsApp will use them from environ rather than using default
middleware from Beaker. Valid keys are: ``session, cache``
``pylons.strict_tmpl_context``
Whether or not the ``tmpl_context`` object should throw an
attribute error when access is attempted to an attribute that
doesn't exist. Defaults to True.
``pylons.tmpl_context_attach_args``
Whethor or not Routes variables should automatically be
attached to the tmpl_context object when specified in a
controllers method.
``pylons.request_options``
A dict of Content-Type related default settings for new
instances of :class:`~pylons.controllers.util.Request`. May
contain the values ``charset`` and ``errors`` and
``decode_param_names``. Overrides the Pylons default values
specified by the ``request_defaults`` dict.
``pylons.response_options``
A dict of Content-Type related default settings for new
instances of :class:`~pylons.controllers.util.Response`. May
contain the values ``content_type``, ``charset`` and
``errors``. Overrides the Pylons default values specified by
the ``response_defaults`` dict.
``routes.map``
Mapper object used for Routing. Yes, it is possible to add
routes after your application has started running.
"""
defaults = {
'debug': False,
'pylons.package': None,
'pylons.paths': {'root': None,
'controllers': None,
'templates': [],
'static_files': None},
'pylons.environ_config': dict(session='beaker.session',
cache='beaker.cache'),
'pylons.app_globals': None,
'pylons.h': None,
'pylons.request_options': request_defaults.copy(),
'pylons.response_options': response_defaults.copy(),
'pylons.strict_tmpl_context': True,
'pylons.tmpl_context_attach_args': False,
}
def init_app(self, global_conf, app_conf, package=None, paths=None):
"""Initialize configuration for the application
.. note
This *must* be called at least once, as soon as possible
to setup all the configuration options.
``global_conf``
Several options are expected to be set for a Pylons web
application. They will be loaded from the global_config
which has the main Paste options. If ``debug`` is not
enabled as a global config option, the following option
*must* be set:
* error_to - The email address to send the debug error to
The optional config options in this case are:
* smtp_server - The SMTP server to use, defaults to
'localhost'
* error_log - A logfile to write the error to
* error_subject_prefix - The prefix of the error email
subject
* from_address - Whom the error email should be from
``app_conf``
Defaults supplied via the [app:main] section from the Paste
config file. ``load_config`` only cares about whether a
'prefix' option is set, if so it will update Routes to
ensure URL's take that into account.
``package``
The name of the application package, to be stored in the
app_conf.
.. versionchanged:: 1.0
``template_engine`` option is no longer supported.
"""
log.debug("Initializing configuration, package: '%s'", package)
conf = global_conf.copy()
conf.update(app_conf)
conf.update(dict(app_conf=app_conf, global_conf=global_conf))
conf.update(self.pop('environment_load', {}))
if paths:
conf['pylons.paths'] = paths
conf['pylons.package'] = package
conf['debug'] = asbool(conf.get('debug'))
# Load the MIMETypes with its default types
MIMETypes.init()
# Ensure all the keys from defaults are present, load them if not
for key, val in copy.deepcopy(PylonsConfig.defaults).iteritems():
conf.setdefault(key, val)
# Load the errorware configuration from the Paste configuration file
# These all have defaults, and emails are only sent if configured and
# if this application is running in production mode
errorware = {}
errorware['debug'] = conf['debug']
if not errorware['debug']:
errorware['debug'] = False
errorware['error_email'] = conf.get('email_to')
errorware['error_log'] = conf.get('error_log', None)
errorware['smtp_server'] = conf.get('smtp_server',
'localhost')
errorware['error_subject_prefix'] = conf.get(
'error_subject_prefix', 'WebApp Error: ')
errorware['from_address'] = conf.get(
'from_address', conf.get('error_email_from',
'[email protected]'))
errorware['error_message'] = conf.get('error_message',
'An internal server error occurred')
# Copy in some defaults
if 'cache_dir' in conf:
conf.setdefault('beaker.session.data_dir',
os.path.join(conf['cache_dir'], 'sessions'))
conf.setdefault('beaker.cache.data_dir',
os.path.join(conf['cache_dir'], 'cache'))
conf['pylons.cache_dir'] = conf.pop('cache_dir',
conf['app_conf'].get('cache_dir'))
# Save our errorware values
conf['pylons.errorware'] = errorware
# Load conf dict into self
self.update(conf)
pylons_config = PylonsConfig()
# Push an empty config so all accesses to config at import time have something
# to look at and modify. This config will be merged with the app's when it's
# built in the paste.app_factory entry point.
pylons_config.update(copy.deepcopy(PylonsConfig.defaults))
config.push_process_config(pylons_config)
|
|
from django import forms
from django.forms import extras
import datetime
from mptt.forms import TreeNodeChoiceField
from education.utils import is_empty
from poll.models import Poll
from rapidsms.contrib.locations.models import Location
from generic.forms import ActionForm, FilterForm, ModuleForm
from mptt.forms import TreeNodeChoiceField
from rapidsms.contrib.locations.models import Location
from rapidsms.models import Backend
from .reports import get_week_date, get_month_day_range
from .models import School, EmisReporter, ReportComment
from rapidsms_xforms.models import XFormSubmissionValue
from django.contrib.auth.models import Group, User
from django.db.models import Q
from uganda_common.forms import SMSInput
from django.conf import settings
from rapidsms_httprouter.models import Message
from django.contrib.sites.models import Site
from contact.models import MassText
from rapidsms.models import Connection, Contact
from script.models import Script
from unregister.models import Blacklist
date_range_choices = (
('w', 'Previous Calendar Week'), ('m', 'Previous Calendar Month'), ('q', 'Previous calendar quarter'),)
class DateRangeForm(forms.Form): # pragma: no cover
start = forms.IntegerField(required=True, widget=forms.HiddenInput())
end = forms.IntegerField(required=True, widget=forms.HiddenInput())
def clean(self):
cleaned_data = self.cleaned_data
start_ts = cleaned_data.get('start')
cleaned_data['start'] = datetime.datetime.fromtimestamp(float(start_ts))
end_ts = cleaned_data.get('end')
cleaned_data['end'] = datetime.datetime.fromtimestamp(float(end_ts))
return cleaned_data
AREAS = Location.tree.all().select_related('type')
class ReporterFreeSearchForm(FilterForm):
""" concrete implementation of filter form
TO DO: add ability to search for multiple search terms separated by 'or'
"""
search = forms.CharField(max_length=100, required=False, label="Free-form search",
help_text="Use 'or' to search for multiple names")
def filter(self, request, queryset):
search = self.cleaned_data['search']
if search == "":
pass
else:
if search[:3] == '256':
search = search[3:]
elif search[:1] == '0':
search = search[1:]
queryset = queryset.exclude(
connection__identity__in=Blacklist.objects.values_list('connection__identity', flat=True)
).filter(
Q(name__icontains=search) |
Q(reporting_location__name__icontains=search) |
Q(connection__identity__icontains=search) |
Q(schools__name__icontains=search)
).distinct()
return queryset
class SchoolFilterForm(FilterForm):
""" filter form for emis schools """
school = forms.ChoiceField(choices=(('', '-----'), (-1, 'Has No School'),) + \
tuple(School.objects.filter(location__name__in= \
EmisReporter.objects.values_list(
'reporting_location__name',
flat=True)).values_list('pk',
'name').order_by(
'name')))
def filter(self, request, queryset):
school_pk = self.cleaned_data['school']
if school_pk == '':
return queryset
elif int(school_pk) == -1:
return queryset.filter(schools__name=None)
else:
return queryset.filter(schools=school_pk)
class LastReportingDateFilterForm(FilterForm):
""" filter form for emis reporter on reporting date """
from_date = forms.DateField()
to_date = forms.DateField(help_text='Select dates to filter by last reporting date.')
def filter(self, request, queryset):
if self.cleaned_data['to_date'] and self.cleaned_data['from_date']:
if self.cleaned_data['to_date'] < self.cleaned_data['from_date']:
date_range = [self.cleaned_data['to_date'], self.cleaned_data['from_date']]
else:
date_range = [self.cleaned_data['from_date'], self.cleaned_data['to_date']]
if queryset.model.__name__ == 'EmisReporter':
return queryset.filter(last_reporting_date__range=date_range).order_by('last_reporting_date')
if queryset.model.__name__ == 'Message':
return queryset.filter(date__range=date_range).order_by('date')
return queryset
class PollFilterForm(FilterForm):
""" filter form for message on polls """
polls = forms.ChoiceField(choices=(('', '-----'),) + \
tuple(Poll.objects.all().values_list('pk', 'name').order_by('name')))
def filter(self, request, queryset):
poll = Poll.objects.get(id=self.cleaned_data['polls'])
if poll is not None:
return queryset.filter(poll_responses__poll=poll)
return queryset
class NewConnectionForm(forms.Form):
identity = forms.CharField(max_length=15, required=True, label="Primary contact information")
class EditReporterForm(forms.ModelForm):
class Meta:
model = EmisReporter
fields = ('name', 'gender', 'grade', 'reporting_location', 'groups', 'schools')
def __init__(self, *args, **kwargs):
super(EditReporterForm, self).__init__(*args, **kwargs)
instance = kwargs['instance']
data = kwargs.get('data')
self.fields['reporting_location'] = forms.ModelChoiceField(
queryset=Location.objects.filter(type='district').order_by('name'))
if instance and data:
edited_school = School.objects.none()
schools_in_reporting_location = School.objects.filter(location=instance.reporting_location)
if not is_empty(data.get('schools')):
edited_school = School.objects.filter(pk=data.get('schools'))
self.fields['schools'] = forms.ModelChoiceField(queryset=schools_in_reporting_location | edited_school)
elif instance.reporting_location is None:
if instance.schools.count() == 0:
self.fields['schools'] = forms.ModelChoiceField(queryset=School.objects.none(),
widget=forms.Select(attrs={'disabled': 'disabled'}))
else:
self.fields['schools'] = forms.ModelChoiceField(queryset=instance.schools.all())
else:
schools_in_reporting_location = School.objects.filter(location=instance.reporting_location)
if instance.schools.all().exists() and instance.schools.all()[0] not in schools_in_reporting_location:
self.fields['schools'] = forms.ModelChoiceField(
queryset=schools_in_reporting_location | instance.schools.all())
else:
self.fields['schools'] = forms.ModelChoiceField(queryset=schools_in_reporting_location)
self.fields['schools'].required = False
self.fields['gender'].required = False
self.fields['grade'].required = False
def clean(self):
data = self.cleaned_data
if data.get('schools') is not None and data['schools'].location != data.get('reporting_location'):
self._errors['schools'] = self.error_class(['School should be from location same as reporting location'])
return data
def save(self, commit=True):
reporter_form = super(EditReporterForm, self).save(commit=False)
school = self.cleaned_data['schools']
if school:
schools = School.objects.filter(pk=school.pk)
reporter_form.schools = schools
else:
# remove all schools associated with this reporter
[reporter_form.schools.remove(sch) for sch in reporter_form.schools.all()]
groups = self.cleaned_data['groups']
if groups:
reporter_form.groups.clear()
group = Group.objects.get(pk=groups[0].pk)
reporter_form.groups.add(group)
else:
[reporter_form.groups.remove(grp) for grp in reporter_form.groups.all()]
if commit:
reporter_form.save()
class DistrictFilterForm(forms.Form):
""" filter form for districts """
locs = Location.objects.filter(
name__in=XFormSubmissionValue.objects.values_list('submission__connection__contact__reporting_location__name',
flat=True))
locs_list = []
for loc in locs:
if not Location.tree.root_nodes()[0].pk == loc.pk and loc.type.name == 'district':
locs_list.append((loc.pk, loc.name))
district = forms.ChoiceField(choices=(('', '-----'),) + tuple(locs_list))
class LimitedDistictFilterForm(FilterForm):
""" filter Emis Reporters on their districts """
locs = Location.objects.filter(
name__in=EmisReporter.objects.values_list('reporting_location__name', flat=True).distinct())
locs_list = []
for loc in locs:
if not Location.tree.root_nodes()[0].pk == loc.pk and loc.type.name == 'district':
locs_list.append((loc.pk, loc.name))
district = forms.ChoiceField(choices=(('', '-----'),) + tuple(locs_list))
def filter(self, request, queryset):
district_pk = self.cleaned_data['district']
if district_pk == '':
return queryset
elif int(district_pk) == -1:
return queryset.filter(reporting_location=None)
else:
try:
district = Location.objects.get(pk=district_pk)
except Location.DoesNotExist:
district = None
if district:
return queryset.filter(reporting_location__in=district.get_descendants(include_self=True))
else:
return queryset
class RolesFilterForm(FilterForm):
def __init__(self, data=None, **kwargs):
self.request = kwargs.pop('request')
if data:
forms.Form.__init__(self, data, **kwargs)
else:
forms.Form.__init__(self, **kwargs)
choices = ((-1, 'No Group'),) + tuple([(int(g.pk), g.name) for g in Group.objects.all().order_by('name')])
self.fields['groups'] = forms.ChoiceField(choices=choices, required=True)
def filter(self, request, queryset):
groups_pk = self.cleaned_data['groups']
if groups_pk == '-1':
return queryset
else:
return queryset.filter(groups=groups_pk)
class SchoolForm(forms.ModelForm):
class Meta:
model = School
fields = ('name', 'location')
def __init__(self, *args, **kwargs):
super(SchoolForm, self).__init__(*args, **kwargs)
self.fields['location'] = forms.ModelChoiceField(
queryset=Location.objects.filter(type='district').order_by('name'))
class FreeSearchSchoolsForm(FilterForm):
""" concrete implementation of filter form
TO DO: add ability to search for multiple search terms separated by 'or'
"""
search = forms.CharField(max_length=100, required=False, label="Free-form search",
help_text="Use 'or' to search for multiple names")
def filter(self, request, queryset):
search = self.cleaned_data['search']
if search == "":
return queryset
else:
return queryset.filter(Q(name__icontains=search)
| Q(emis_id__icontains=search)
| Q(location__name__icontains=search))
class SchoolDistictFilterForm(FilterForm):
""" filter Schools on their districts """
locs = Location.objects.filter(name__in=School.objects.values_list('location__name', flat=True)).order_by('name')
locs_list = []
for loc in locs:
if not Location.tree.root_nodes()[0].pk == loc.pk and loc.type.name == 'district':
locs_list.append((loc.pk, loc.name))
district = forms.ChoiceField(choices=(('', '-----'),) + tuple(locs_list))
def filter(self, request, queryset):
district_pk = self.cleaned_data['district']
if district_pk == '':
return queryset
elif int(district_pk) == -1:
return queryset.filter(reporting_location=None)
else:
try:
district = Location.objects.get(pk=district_pk)
except Location.DoesNotExist:
district = None
if district:
return queryset.filter(location__in=district.get_descendants(include_self=True))
else:
return queryset
class ReportCommentForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())
class Meta:
model = ReportComment
def __init__(self, *args, **kwargs):
super(ReportCommentForm, self).__init__(*args, **kwargs)
self.fields['report_date'].required = False
self.fields['reporting_period'].required = False
def save(self, commit=True):
# do all that funky saving
report_comment = super(ReportCommentForm, self).save(commit=False)
reporting_period = self.cleaned_data.get('reporting_period', '')
today = datetime.datetime.now()
if reporting_period == 'wk':
report_comment.set_report_date(
get_week_date(depth=2)[0][0]
)
elif reporting_period == 'mo':
report_comment.set_report_date(
get_month_day_range(today)[0]
)
elif reporting_period == 't':
# TODO how best to set termly comments
pass
if commit:
report_comment.save()
return report_comment
class UserForm(forms.ModelForm):
location = forms.ModelChoiceField(
queryset=Location.objects.filter(type__in=["district", "country"]).order_by('name'), required=True)
password1 = forms.CharField(label="Password", widget=forms.PasswordInput, required=False)
password2 = forms.CharField(label="Password confirmation", widget=forms.PasswordInput,
help_text="Enter the same password as above, for verification.", required=False)
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email", "groups", "password1", "password2")
def __init__(self, *args, **kwargs):
self.edit = kwargs.pop('edit', None)
super(UserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = ""
self.fields['groups'].required = True
self.fields['email'].help_text = "Optional field"
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
if not self.edit:
raise forms.ValidationError("A user with that username already exists.")
else:
return username
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data.get("password2", "")
if password1 == password2 and password2 == "" and self.edit:
return password2
elif password2 == "":
raise forms.ValidationError("This Field is Required")
if password1 != password2:
raise forms.ValidationError("The two password fields didn't match.")
return password2
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
if self.edit and self.cleaned_data["password1"] != "":
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class MassTextForm(ActionForm):
text = forms.CharField(max_length=160, required=True, widget=SMSInput())
action_label = 'Send Message'
def clean_text(self):
text = self.cleaned_data['text']
# replace common MS-word characters with SMS-friendly characters
for find, replace in [(u'\u201c', '"'),
(u'\u201d', '"'),
(u'\u201f', '"'),
(u'\u2018', "'"),
(u'\u2019', "'"),
(u'\u201B', "'"),
(u'\u2013', "-"),
(u'\u2014', "-"),
(u'\u2015', "-"),
(u'\xa7', "$"),
(u'\xa1', "i"),
(u'\xa4', ''),
(u'\xc4', 'A')]:
text = text.replace(find, replace)
return text
def perform(self, request, results):
if results is None or len(results) == 0:
return ('A message must have one or more recipients!', 'error')
if request.user and request.user.has_perm('auth.add_message'):
connections = \
list(Connection.objects.filter(contact__in=results).distinct())
text = self.cleaned_data.get('text', "")
text = text.replace('%', u'\u0025')
messages = Message.mass_text(text, connections)
MassText.bulk.bulk_insert(send_pre_save=False,
user=request.user,
text=text,
contacts=list(results))
masstexts = MassText.bulk.bulk_insert_commit(send_post_save=False, autoclobber=True)
masstext = masstexts[0]
if settings.SITE_ID:
masstext.sites.add(Site.objects.get_current())
return ('Message successfully sent to %d numbers' % len(connections), 'success',)
else:
return ("You don't have permission to send messages!", 'error',)
class SchoolMassTextForm(ActionForm):
text = forms.CharField(max_length=160, required=True, widget=SMSInput())
action_label = 'Send Message'
def clean_text(self):
text = self.cleaned_data['text']
# replace common MS-word characters with SMS-friendly characters
for find, replace in [(u'\u201c', '"'),
(u'\u201d', '"'),
(u'\u201f', '"'),
(u'\u2018', "'"),
(u'\u2019', "'"),
(u'\u201B', "'"),
(u'\u2013', "-"),
(u'\u2014', "-"),
(u'\u2015', "-"),
(u'\xa7', "$"),
(u'\xa1', "i"),
(u'\xa4', ''),
(u'\xc4', 'A')]:
text = text.replace(find, replace)
return text
def perform(self, request, results):
if results is None or len(results) == 0:
return ('A message must have one or more recipients!', 'error')
if request.user and request.user.has_perm('auth.add_message'):
reporters = []
for school in results:
for rep in school.emisreporter_set.filter(groups__name__in=['Teachers', 'Head Teachers']):
reporters.append(rep)
connections = \
list(Connection.objects.filter(contact__in=reporters).distinct())
text = self.cleaned_data.get('text', "")
text = text.replace('%', u'\u0025')
messages = Message.mass_text(text, connections)
MassText.bulk.bulk_insert(send_pre_save=False,
user=request.user,
text=text,
contacts=list(reporters))
masstexts = MassText.bulk.bulk_insert_commit(send_post_save=False, autoclobber=True)
masstext = masstexts[0]
if settings.SITE_ID:
masstext.sites.add(Site.objects.get_current())
return ('Message successfully sent to %d numbers' % len(connections), 'success',)
else:
return ("You don't have permission to send messages!", 'error',)
class ScriptsForm(forms.ModelForm):
date = forms.DateField(label="Schedule Date: ", widget=extras.widgets.SelectDateWidget(), required=False)
class Meta:
model = Script
fields = ("slug", "name", "enabled")
widgets = {
'slug': forms.HiddenInput(),
'name': forms.TextInput(attrs={'size': 60}),
'enabled': forms.CheckboxInput(attrs={'onclick': 'check_clicked(this);'})
}
class ResultForm(forms.Form):
from_date = forms.DateTimeField()
to_date = forms.DateTimeField()
|
|
from __future__ import division, absolute_import
import numpy as np
from copy import deepcopy
from fatiando import utils, gridder
from mesher import OblateEllipsoid
import oblate_ellipsoid
from numpy.testing import assert_almost_equal
from pytest import raises
# Local-geomagnetic field
F = 30000
inc = 2
dec = -27
gm = 1000 # geometrical factor
area = [-5.*gm, 5.*gm, -5.*gm, 5.*gm]
x, y, z = gridder.scatter(area, 300, z=0.)
axis_ref = gm # reference semi-axis
# Oblate ellipsoids used for testing
model = [OblateEllipsoid(x=-3*gm, y=-3*gm, z=3*axis_ref,
small_axis=0.6*axis_ref,
large_axis=axis_ref,
strike=78, dip=92, rake=135,
props={'principal susceptibilities': [0.7, 0.7,
0.7],
'susceptibility angles': [90., 47., 13.]}),
OblateEllipsoid(x=-gm, y=-gm, z=2.4*axis_ref,
small_axis=0.3*axis_ref,
large_axis=1.1*axis_ref,
strike=4, dip=10, rake=5,
props={'principal susceptibilities': [0.2, 0.15,
0.05],
'susceptibility angles': [180, 19, -8.],
'remanent magnetization': [3, -6, 35]}),
OblateEllipsoid(x=3*gm, y=3*gm, z=4*axis_ref,
small_axis=0.6*axis_ref,
large_axis=1.5*axis_ref,
strike=-58, dip=87, rake=49,
props={'remanent magnetization': [4.7, 39, 0]})]
def test_oblate_ellipsoid_force_prop():
"Test the oblate_ellipsoid code with an imposed physical property"
# forced physical property
pmag = utils.ang2vec(5, 43, -8)
# magnetic field produced by the ellipsoids
# with the forced physical property
bx = oblate_ellipsoid.bx(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
by = oblate_ellipsoid.by(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
bz = oblate_ellipsoid.bz(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
tf = oblate_ellipsoid.tf(x, y, z, model,
F, inc, dec, demag=False, pmag=pmag)
# constant factor
f = 3.71768
# magnetic field produced by the ellipsoids
# with the forced physical property multiplied by the constant factor
bx2 = oblate_ellipsoid.bx(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
by2 = oblate_ellipsoid.by(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
bz2 = oblate_ellipsoid.bz(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
tf2 = oblate_ellipsoid.tf(x, y, z, model,
F, inc, dec, demag=False, pmag=f*pmag)
# the fields must be proportional
assert_almost_equal(bx2, f*bx, decimal=10)
assert_almost_equal(by2, f*by, decimal=10)
assert_almost_equal(bz2, f*bz, decimal=10)
assert_almost_equal(tf2, f*tf, decimal=10)
# pmag not None requires demag not True
raises(AssertionError, oblate_ellipsoid.bx, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, oblate_ellipsoid.by, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, oblate_ellipsoid.bz, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
raises(AssertionError, oblate_ellipsoid.tf, x, y, z, model,
F, inc, dec, demag=True, pmag=pmag)
def test_oblate_ellipsoid_ignore_none():
"Oblate ellipsoid ignores model elements that are None"
# forced physical property
pmag = utils.ang2vec(7, -52, 13)
# copy of the original model
model_none = deepcopy(model)
# force an element of the copy to be None
model_none[1] = None
# magnetic field produced by the original model
# without the removed element
bx = oblate_ellipsoid.bx(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
by = oblate_ellipsoid.by(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
bz = oblate_ellipsoid.bz(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
tf = oblate_ellipsoid.tf(x, y, z, [model[0], model[2]],
F, inc, dec, demag=False, pmag=pmag)
# magnetic field produced by the copy
bx2 = oblate_ellipsoid.bx(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
by2 = oblate_ellipsoid.by(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
bz2 = oblate_ellipsoid.bz(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
tf2 = oblate_ellipsoid.tf(x, y, z, model_none,
F, inc, dec, demag=False, pmag=pmag)
assert_almost_equal(bx2, bx, decimal=15)
assert_almost_equal(by2, by, decimal=15)
assert_almost_equal(bz2, bz, decimal=15)
assert_almost_equal(tf2, tf, decimal=15)
def test_oblate_ellipsoid_missing_prop():
"Self-demagnetization requires specific properties"
# demag=True requires specific properties
raises(AssertionError, oblate_ellipsoid._bx, x, y, z, model[2],
F, inc, dec, demag=True)
raises(AssertionError, oblate_ellipsoid._by, x, y, z, model[2],
F, inc, dec, demag=True)
raises(AssertionError, oblate_ellipsoid._bz, x, y, z, model[2],
F, inc, dec, demag=True)
def test_oblate_ellipsoid_susceptibility_tensor_missing_prop():
"Susceptibility tensor requires specific properties"
suscep1 = model[0].susceptibility_tensor
suscep2 = model[1].susceptibility_tensor
suscep3 = model[2].susceptibility_tensor
assert suscep1 is not None
assert suscep2 is not None
assert suscep3 is None
def test_oblate_ellipsoid_demag_factors_sum():
"The summation of the demagnetizing factors must be equal to one"
n11, n22 = oblate_ellipsoid.demag_factors(model[0])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
n11, n22 = oblate_ellipsoid.demag_factors(model[1])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
n11, n22 = oblate_ellipsoid.demag_factors(model[2])
assert_almost_equal(n11+n22+n22, 1., decimal=15)
def test_oblate_ellipsoid_demag_factors_signal_order():
"Demagnetizing factors must be all positive and ordered"
n11, n22 = oblate_ellipsoid.demag_factors(model[0])
assert (n11 > 0) and (n22 > 0)
assert n11 > n22
n11, n22 = oblate_ellipsoid.demag_factors(model[1])
assert (n11 > 0) and (n22 > 0)
assert n11 > n22
n11, n22 = oblate_ellipsoid.demag_factors(model[2])
assert (n11 > 0) and (n22 > 0)
assert n11 > n22
def test_oblate_ellipsoid_self_demagnetization():
"Self-demagnetization decreases the magnetization intensity"
mag_with_demag = oblate_ellipsoid.magnetization(model[1],
F, inc, dec,
demag=True)
mag_without_demag = oblate_ellipsoid.magnetization(model[1],
F, inc, dec,
demag=False)
mag_with_demag_norm = np.linalg.norm(mag_with_demag, ord=2)
mag_without_demag_norm = np.linalg.norm(mag_without_demag, ord=2)
assert mag_with_demag_norm < mag_without_demag_norm
def test_oblate_ellipsoid_neglecting_self_demagnetization():
"The error in magnetization by negleting self-demagnetization is bounded"
# susceptibility tensor
k1, k2, k3 = model[0].props['principal susceptibilities']
strike, dip, rake = model[0].props['susceptibility angles']
# demagnetizing factors
n11, n22 = oblate_ellipsoid.demag_factors(model[0])
# maximum relative error in the resulting magnetization
max_error = k3*n11
# magnetizations calculated with and without self-demagnetization
mag_with_demag = oblate_ellipsoid.magnetization(model[0],
F, inc, dec,
demag=True)
mag_without_demag = oblate_ellipsoid.magnetization(model[0],
F, inc, dec,
demag=False)
# difference in magnetization
mag_diff = mag_with_demag - mag_without_demag
# computed norms
mag_with_demag_norm = np.linalg.norm(mag_with_demag, ord=2)
mag_diff_norm = np.linalg.norm(mag_diff, ord=2)
# computed error
computed_error = mag_diff_norm/mag_with_demag_norm
assert computed_error <= max_error
def test_oblate_ellipsoid_depolarization_tensor():
"The depolarization tensor must be symmetric"
ellipsoid = model[1]
x1, x2, x3 = oblate_ellipsoid.x1x2x3(x, y, z, ellipsoid)
lamb = oblate_ellipsoid._lamb(x1, x2, x3, ellipsoid)
denominator = oblate_ellipsoid._dlamb_aux(x1, x2, x3, ellipsoid, lamb)
dlamb_dx = oblate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='x')
dlamb_dy = oblate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='y')
dlamb_dz = oblate_ellipsoid._dlamb(x1, x2, x3, ellipsoid, lamb,
denominator, deriv='z')
h1 = oblate_ellipsoid._hv(ellipsoid, lamb, v='x')
h2 = oblate_ellipsoid._hv(ellipsoid, lamb, v='y')
h3 = oblate_ellipsoid._hv(ellipsoid, lamb, v='z')
g1 = oblate_ellipsoid._gv(ellipsoid, lamb, v='x')
g2 = oblate_ellipsoid._gv(ellipsoid, lamb, v='y')
g3 = oblate_ellipsoid._gv(ellipsoid, lamb, v='z')
a = ellipsoid.large_axis
b = ellipsoid.small_axis
cte = -0.5*a*b*b
# elements of the depolarization tensor without the ellipsoid
nxx = cte*(dlamb_dx*h1*x1 + g1)
nyy = cte*(dlamb_dy*h2*x2 + g2)
nzz = cte*(dlamb_dz*h3*x3 + g3)
nxy = cte*(dlamb_dx*h2*x2)
nyx = cte*(dlamb_dy*h1*x1)
nxz = cte*(dlamb_dx*h3*x3)
nzx = cte*(dlamb_dz*h1*x1)
nyz = cte*(dlamb_dy*h3*x3)
nzy = cte*(dlamb_dz*h2*x2)
trace = nxx+nyy+nzz
# the trace must zero
assert_almost_equal(trace, np.zeros_like(nxx), decimal=3)
# the depolarization is symmetric
assert_almost_equal(nxy, nyx, decimal=3)
assert_almost_equal(nxz, nzx, decimal=3)
assert_almost_equal(nyz, nzy, decimal=3)
def test_oblate_ellipsoid_isotropic_susceptibility():
"Isostropic susceptibility must be proportional to identity"
k1, k2, k3 = model[0].props['principal susceptibilities']
strike, dip, rake = model[0].props['susceptibility angles']
suscep = model[0].susceptibility_tensor
assert np.allclose(suscep, k1*np.identity(3))
def test_confocal_oblate_ellipsoids():
"Confocal bodies with properly scaled suscep produce the same field"
# Reference ellipsoid
a, b, = 400., 1000. # semi-axes
chi = 1.2 # reference susceptibility
ellipsoid = OblateEllipsoid(0., 0., 1500., a, b, 45., 10., -30.,
{'principal susceptibilities': [chi,
chi,
chi],
'susceptibility angles': [0., 0., 0.]})
# Intensity of the local-geomagnetic field (in nT)
B0 = 23500.
# Direction parallel to the semi-axis a
_, inc, dec = utils.vec2ang(ellipsoid.transf_matrix.T[0])
# Magnetic moment of the reference ellipsoid
volume = ellipsoid.volume
mag = oblate_ellipsoid.magnetization(ellipsoid, B0,
inc, dec, demag=True)
moment = volume*mag
# Confocal ellipsoid
u = 2.0e6
a_confocal = np.sqrt(a*a + u)
b_confocal = np.sqrt(b*b + u)
xc = ellipsoid.x
yc = ellipsoid.y
zc = ellipsoid.z
strike = ellipsoid.strike
dip = ellipsoid.dip
rake = ellipsoid.rake
confocal_ellipsoid = OblateEllipsoid(xc, yc, zc,
a_confocal, b_confocal,
strike, dip, rake,
{'susceptibility angles':
[0., 0., 0.]})
n11, n22 = oblate_ellipsoid.demag_factors(confocal_ellipsoid)
H0 = B0/(4*np.pi*100)
volume_confocal = confocal_ellipsoid.volume
# Equivalent susceptibility
moment_norm = np.sqrt(np.sum(moment*moment))
chi_confocal = moment_norm/(volume_confocal*H0 - n11*moment_norm)
confocal_ellipsoid.addprop('principal susceptibilities',
[chi_confocal, chi_confocal, chi_confocal])
# Magnetic moment of the confocal ellipsoid
mag_confocal = oblate_ellipsoid.magnetization(confocal_ellipsoid, B0,
inc, dec, demag=True)
moment_confocal = volume_confocal*mag_confocal
# Total-field anomalies
tf = oblate_ellipsoid.tf(x, y, z, [ellipsoid], B0, inc, dec)
tf_confocal = oblate_ellipsoid.tf(x, y, z, [confocal_ellipsoid],
B0, inc, dec)
# Comparison between the moments and total-field anomalies
assert_almost_equal(moment, moment_confocal, decimal=5)
assert_almost_equal(tf, tf_confocal, decimal=12)
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 07 14:20:11 2018
@author: m.meliani
Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkpls import MFKPLS
from copy import deepcopy
print_output = False
class TestMFKPLS(SMTestCase):
def setUp(self):
self.nt = 100
self.ne = 100
self.ndim = 3
self.n_comp = 2
def test_mfkpls(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
# Modif MM
sm = MFKPLS()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = True
sm.options["optim_var"] = True
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
self.assert_error(t_error, 0.0, 1.5)
self.assert_error(e_error, 0.0, 1.5)
def test_mfkpls_derivs(self):
if self.ndim < 2:
print("To try test_mfkpls_derivs the dimension must be greater than 1")
prob = Sphere(ndim=self.ndim)
sampling = LHS(xlimits=prob.xlimits)
# Modif MM
nt = 100
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
print("n_comp mfkpls_deriv", self.n_comp)
# modif MM
sm = MFKPLS()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt)
sm.set_training_values(x_lf, y_lf, name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 1e-1)
self.assert_error(e_error1, 0.0, 1e-1)
@staticmethod
def run_mfkpls_example():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkpls import MFKPLS
# low fidelity model
def lf_function(x):
import numpy as np
return (
0.5 * ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
+ (x - 0.5) * 10.0
- 5
)
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits)
xt_c, xt_e = xdoes(7)
# Evaluate the HF and LF functions
yt_e = hf_function(xt_e)
yt_c = lf_function(xt_c)
# choice of number of PLS components
ncomp = 1
sm = MFKPLS(n_comp=ncomp, theta0=np.array(ncomp * [1.0]))
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import 3dcoin_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = 3dcoin_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
Note that the symbols that are exported to v1 tf.train namespace are also
exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.client import timeline
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.util.tf_export import tf_export
_HOOKS = "hooks"
_STEPS_PER_RUN_VAR = "steps_per_run"
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
@tf_export(v1=["train.SecondOrStepTimer"])
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
This symbol is also exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
@tf_export(v1=["train.LoggingTensorHook"])
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self,
tensors,
every_n_iter=None,
every_n_secs=None,
at_end=False,
formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names, or
`iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = sorted(tensors.keys())
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(
every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {
tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()
}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
def get_or_create_steps_per_run_variable():
"""Gets or creates the steps_per_run variable.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each
device program execution and before the next execution.
The purpose of using a variable, rather than a constant, is to allow
Estimator adapt the device training iterations according to the final steps
specified by users. For example, if the user sets the steps_per_run as
4 and steps as 10 in Estimator.train(), the steps_per_run
variable will have the following value before each training run.
- 1-st execution: steps_per_run = 4
- 2-nd execution: steps_per_run = 4
- 3-rd execution: steps_per_run = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi steps_per_run variables were found.
"""
graph = ops.get_default_graph()
collection_name = "{}_{}".format(_HOOKS, _STEPS_PER_RUN_VAR)
steps_per_run_vars = graph.get_collection(collection_name)
if len(steps_per_run_vars) == 1:
return steps_per_run_vars[0]
elif len(steps_per_run_vars) > 1:
raise RuntimeError("Multiple steps_per_run_var in collection.")
with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_STEPS_PER_RUN_VAR,
initializer=init_ops.ones_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
class _MultiStepStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None, steps_per_run=1):
"""Initializes a `MultiStepStopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The steps_per_run variable
determines the number of iterations of the loop before returning to the CPU.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
steps_per_run: Number of steps executed per run call.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
if steps_per_run is None or steps_per_run < 1:
raise ValueError("steps_per_run should be greater than 0")
self._num_steps = num_steps
self._last_step = last_step
self._steps_per_run_initial_value = steps_per_run
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
self._steps_per_run_variable = get_or_create_steps_per_run_variable()
def _update_steps_per_run_variable(self, global_step, session):
steps = min(self._last_step - global_step,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=session)
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
self._update_steps_per_run_variable(global_step, session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition in hook execution.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
self._update_steps_per_run_variable(global_step, run_context.session)
@tf_export(v1=["train.StopAtStepHook"])
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
@tf_export(v1=["train.CheckpointSaverListener"])
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListener(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
if decided_to_stop_training():
return True
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListener()
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with
tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
A `CheckpointSaverListener` can request training to be stopped, by returning
True in `after_save`. Please note that, in replicated distributed training
setting, only `chief` should use this behavior. Otherwise each worker will do
their own evaluation, which may be wasteful of resources.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
@tf_export(v1=["train.CheckpointSaverHook"])
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None,
save_graph_def=True):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
save_graph_def: Whether to save the GraphDef and MetaGraphDef to
`checkpoint_dir`. The GraphDef is saved after the session is created as
`graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as
`model.ckpt-*.meta`.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._save_graph_def = save_graph_def
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._save_graph_def:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step,
write_meta_graph=self._save_graph_def)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
should_stop = False
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
@tf_export(v1=["train.StepCounterHook"])
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
self._last_global_step = None
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[
Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)
])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._log_and_record(elapsed_steps, elapsed_time, global_step)
# Check whether the global step has been increased. Here, we do not use the
# timer.last_triggered_step as the timer might record a different global
# step value such that the comparison could be unreliable. For simplicity,
# we just compare the stale_global_step with previously recorded version.
if stale_global_step == self._last_global_step:
# Here, we give a warning in the first 5 times if we have observed that
# the global step has not been increased. For some Optimizers, the global
# step is not increased each time by design. For example,
# SyncReplicaOptimizer doesn't increase the global step in worker's main
# train step.
logging.log_first_n(
logging.WARN,
"It seems that global step (tf.train.get_global_step) has not "
"been increased. Current value (could be stable): %s vs previous "
"value: %s. You could increase the global step by passing "
"tf.train.get_global_step() to Optimizer.apply_gradients or "
"Optimizer.minimize.", 5, stale_global_step, self._last_global_step)
self._last_global_step = stale_global_step
@tf_export(v1=["train.NanLossDuringTrainingError"])
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
@tf_export(v1=["train.NanTensorHook"])
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
@tf_export(v1=["train.SummarySaverHook"])
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used if
no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output by
TF summary methods like `tf.compat.v1.summary.scalar` or
`tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if
more than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
@tf_export(v1=["train.GlobalStepWaiterHook"])
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info(
"Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
@tf_export(v1=["train.FinalOpsHook"])
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
try:
self._final_ops_values = session.run(
self._final_ops, feed_dict=self._final_ops_feed_dict)
except (errors.OutOfRangeError, StopIteration) as e:
logging.warning(
"An OutOfRangeError or StopIteration exception is raised by the "
"code in FinalOpsHook. This typically means the Ops running by the "
"FinalOpsHook have a dependency back to some input source, which "
"should not happen. For example, for metrics in "
"tf.estimator.Estimator, all metrics functions return two Ops: "
"`value_op` and `update_op`. Estimator.evaluate calls the "
"`update_op` for each batch of the data in input source and, once "
"it is exhausted, it call the `value_op` to get the metric values. "
"The `value_op` here should have dependency back to variables "
"reading only, rather than reading another batch from input. "
"Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers "
"another data reading, which ends OutOfRangeError/StopIteration. "
"Please fix that.")
raise e
@tf_export(v1=["train.FeedFnHook"])
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
@tf_export(v1=["train.ProfilerHook"])
class ProfilerHook(session_run_hook.SessionRunHook):
"""Captures CPU/GPU profiling information every N steps or seconds.
This produces files called "timeline-<step>.json", which are in Chrome
Trace format.
For more information see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
"""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir="",
show_dataflow=True,
show_memory=False):
"""Initializes a hook that takes periodic profiling snapshots.
`options.run_metadata` argument of `tf.Session.Run` is used to collect
metadata about execution. This hook sets the metadata and dumps it in Chrome
Trace format.
Args:
save_steps: `int`, save profile traces every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int` or `float`, save profile traces every N seconds.
output_dir: `string`, the directory to save the profile traces to.
Defaults to the current directory.
show_dataflow: `bool`, if True, add flow events to the trace connecting
producers and consumers of tensors.
show_memory: `bool`, if True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
"""
self._output_file = os.path.join(output_dir, "timeline-{}.json")
self._file_writer = SummaryWriterCache.get(output_dir)
self._show_dataflow = show_dataflow
self._show_memory = show_memory
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
def begin(self):
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use ProfilerHook.")
def before_run(self, run_context):
self._request_summary = (
self._next_step is not None and
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
opts = (
config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
if self._request_summary else None)
return SessionRunArgs(requests, options=opts)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results["global_step"]
if self._next_step is None:
# Update the timer so that it does not activate until N steps or seconds
# have passed.
self._timer.update_last_triggered_step(stale_global_step)
global_step = stale_global_step + 1
if self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
self._timer.update_last_triggered_step(global_step)
self._save(global_step, self._output_file.format(global_step),
run_values.run_metadata.step_stats)
self._file_writer.add_run_metadata(run_values.run_metadata,
"step_%d" % global_step)
self._next_step = global_step + 1
def _save(self, step, save_path, step_stats):
logging.info("Saving timeline for %d into '%s'.", step, save_path)
with gfile.Open(save_path, "w") as f:
trace = timeline.Timeline(step_stats)
f.write(
trace.generate_chrome_trace_format(
show_dataflow=self._show_dataflow, show_memory=self._show_memory))
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
|
import copy
from Cython.Compiler import (ExprNodes, PyrexTypes, MemoryView,
ParseTreeTransforms, StringEncoding,
Errors)
from Cython.Compiler.ExprNodes import CloneNode, ProxyNode, TupleNode
from Cython.Compiler.Nodes import (FuncDefNode, CFuncDefNode, StatListNode,
DefNode)
class FusedCFuncDefNode(StatListNode):
"""
This node replaces a function with fused arguments. It deep-copies the
function for every permutation of fused types, and allocates a new local
scope for it. It keeps track of the original function in self.node, and
the entry of the original function in the symbol table is given the
'fused_cfunction' attribute which points back to us.
Then when a function lookup occurs (to e.g. call it), the call can be
dispatched to the right function.
node FuncDefNode the original function
nodes [FuncDefNode] list of copies of node with different specific types
py_func DefNode the fused python function subscriptable from
Python space
__signatures__ A DictNode mapping signature specialization strings
to PyCFunction nodes
resulting_fused_function PyCFunction for the fused DefNode that delegates
to specializations
fused_func_assignment Assignment of the fused function to the function name
defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
defaults would result in many different tuples)
specialized_pycfuncs List of synthesized pycfunction nodes for the
specializations
code_object CodeObjectNode shared by all specializations and the
fused function
fused_compound_types All fused (compound) types (e.g. floating[:])
"""
__signatures__ = None
resulting_fused_function = None
fused_func_assignment = None
defaults_tuple = None
decorators = None
def __init__(self, node, env):
super(FusedCFuncDefNode, self).__init__(node.pos)
self.nodes = []
self.node = node
is_def = isinstance(self.node, DefNode)
if is_def:
# self.node.decorators = []
self.copy_def(env)
else:
self.copy_cdef(env)
# Perform some sanity checks. If anything fails, it's a bug
for n in self.nodes:
assert not n.entry.type.is_fused
assert not n.local_scope.return_type.is_fused
if node.return_type.is_fused:
assert not n.return_type.is_fused
if not is_def and n.cfunc_declarator.optional_arg_count:
assert n.type.op_arg_struct
node.entry.fused_cfunction = self
# Copy the nodes as AnalyseDeclarationsTransform will prepend
# self.py_func to self.stats, as we only want specialized
# CFuncDefNodes in self.nodes
self.stats = self.nodes[:]
def copy_def(self, env):
"""
Create a copy of the original def or lambda function for specialized
versions.
"""
fused_compound_types = PyrexTypes.unique(
[arg.type for arg in self.node.args if arg.type.is_fused])
permutations = PyrexTypes.get_all_specialized_permutations(fused_compound_types)
self.fused_compound_types = fused_compound_types
if self.node.entry in env.pyfunc_entries:
env.pyfunc_entries.remove(self.node.entry)
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
self._specialize_function_args(copied_node.args, fused_to_specific)
copied_node.return_type = self.node.return_type.specialize(
fused_to_specific)
copied_node.analyse_declarations(env)
# copied_node.is_staticmethod = self.node.is_staticmethod
# copied_node.is_classmethod = self.node.is_classmethod
self.create_new_local_scope(copied_node, env, fused_to_specific)
self.specialize_copied_def(copied_node, cname, self.node.entry,
fused_to_specific, fused_compound_types)
PyrexTypes.specialize_entry(copied_node.entry, cname)
copied_node.entry.used = True
env.entries[copied_node.entry.name] = copied_node.entry
if not self.replace_fused_typechecks(copied_node):
break
self.orig_py_func = self.node
self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
def copy_cdef(self, env):
"""
Create a copy of the original c(p)def function for all specialized
versions.
"""
permutations = self.node.type.get_all_specialized_permutations()
# print 'Node %s has %d specializations:' % (self.node.entry.name,
# len(permutations))
# import pprint; pprint.pprint([d for cname, d in permutations])
if self.node.entry in env.cfunc_entries:
env.cfunc_entries.remove(self.node.entry)
# Prevent copying of the python function
self.orig_py_func = orig_py_func = self.node.py_func
self.node.py_func = None
if orig_py_func:
env.pyfunc_entries.remove(orig_py_func.entry)
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# Make the types in our CFuncType specific
type = copied_node.type.specialize(fused_to_specific)
entry = copied_node.entry
copied_node.type = type
entry.type, type.entry = type, entry
entry.used = (entry.used or
self.node.entry.defined_in_pxd or
env.is_c_class_scope or
entry.is_cmethod)
if self.node.cfunc_declarator.optional_arg_count:
self.node.cfunc_declarator.declare_optional_arg_struct(
type, env, fused_cname=cname)
copied_node.return_type = type.return_type
self.create_new_local_scope(copied_node, env, fused_to_specific)
# Make the argument types in the CFuncDeclarator specific
self._specialize_function_args(copied_node.cfunc_declarator.args,
fused_to_specific)
type.specialize_entry(entry, cname)
env.cfunc_entries.append(entry)
# If a cpdef, declare all specialized cpdefs (this
# also calls analyse_declarations)
copied_node.declare_cpdef_wrapper(env)
if copied_node.py_func:
env.pyfunc_entries.remove(copied_node.py_func.entry)
self.specialize_copied_def(
copied_node.py_func, cname, self.node.entry.as_variable,
fused_to_specific, fused_types)
if not self.replace_fused_typechecks(copied_node):
break
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
else:
self.py_func = orig_py_func
def _specialize_function_args(self, args, fused_to_specific):
for arg in args:
if arg.type.is_fused:
arg.type = arg.type.specialize(fused_to_specific)
if arg.type.is_memoryviewslice:
MemoryView.validate_memslice_dtype(arg.pos, arg.type.dtype)
def create_new_local_scope(self, node, env, f2s):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
fused types are aready in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
"""
node.create_local_scope(env)
node.local_scope.fused_to_specific = f2s
# This is copied from the original function, set it to false to
# stop recursion
node.has_fused_arguments = False
self.nodes.append(node)
def specialize_copied_def(self, node, cname, py_entry, f2s, fused_types):
"""Specialize the copy of a DefNode given the copied node,
the specialization cname and the original DefNode entry"""
type_strings = [
PyrexTypes.specialization_signature_string(fused_type, f2s)
for fused_type in fused_types
]
node.specialized_signature_string = '|'.join(type_strings)
node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
cname, node.entry.pymethdef_cname)
node.entry.doc = py_entry.doc
node.entry.doc_cname = py_entry.doc_cname
def replace_fused_typechecks(self, copied_node):
"""
Branch-prune fused type checks like
if fused_t is int:
...
Returns whether an error was issued and whether we should stop in
in order to prevent a flood of errors.
"""
num_errors = Errors.num_errors
transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
copied_node.local_scope)
transform(copied_node)
if Errors.num_errors > num_errors:
return False
return True
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
Genereate Cython code for instance checks, matching an object to
specialized types.
"""
if_ = 'if'
for specialized_type in normal_types:
# all_numeric = all_numeric and specialized_type.is_numeric
py_type_name = specialized_type.py_type_name()
specialized_type_name = specialized_type.specialization_string
pyx_code.context.update(locals())
pyx_code.put_chunk(
u"""
{{if_}} isinstance(arg, {{py_type_name}}):
dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'
""")
if_ = 'elif'
if not normal_types:
# we need an 'if' to match the following 'else'
pyx_code.putln("if 0: pass")
def _dtype_name(self, dtype):
if dtype.is_typedef:
return '___pyx_%s' % dtype
return str(dtype).replace(' ', '_')
def _dtype_type(self, dtype):
if dtype.is_typedef:
return self._dtype_name(dtype)
return str(dtype)
def _sizeof_dtype(self, dtype):
if dtype.is_pyobject:
return 'sizeof(void *)'
else:
return "sizeof(%s)" % self._dtype_type(dtype)
def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
"Setup some common cases to match dtypes against specializations"
if pyx_code.indenter("if dtype.kind in ('i', 'u'):"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_int")
pyx_code.dedent()
if pyx_code.indenter("elif dtype.kind == 'f':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_float")
pyx_code.dedent()
if pyx_code.indenter("elif dtype.kind == 'c':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_complex")
pyx_code.dedent()
if pyx_code.indenter("elif dtype.kind == 'O':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_object")
pyx_code.dedent()
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in specialized_buffer_types:
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
specialized_type_name=specialized_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
(dtype.is_float, pyx_code.dtype_float),
(dtype.is_complex, pyx_code.dtype_complex)
]
for dtype_category, codewriter in dtypes:
if dtype_category:
cond = '{{itemsize_match}} and arg.ndim == %d' % (
specialized_type.ndim,)
if dtype.is_int:
cond += ' and {{signed_match}}'
if codewriter.indenter("if %s:" % cond):
# codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
def _buffer_parse_format_string_check(self, pyx_code, decl_code,
specialized_type, env):
"""
For each specialized type, try to coerce the object to a memoryview
slice of that type. This means obtaining a buffer and parsing the
format string.
TODO: separate buffer acquisition from format parsing
"""
dtype = specialized_type.dtype
if specialized_type.is_buffer:
axes = [('direct', 'strided')] * specialized_type.ndim
else:
axes = specialized_type.axes
memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
memslice_type.create_from_py_utility_code(env)
pyx_code.context.update(
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
"{{memviewslice_cname}} {{coerce_from_py_func}}(object)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
sizeof_dtype=self._sizeof_dtype(dtype))
pyx_code.put_chunk(
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
memslice = {{coerce_from_py_func}}(arg)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
%s
break
else:
__pyx_PyErr_Clear()
""" % self.match)
def _buffer_checks(self, buffer_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
specializations. If that fails, try naively to coerce the object
to each specialization, which obtains the buffer each time and tries
to match the format string.
"""
from Cython.Compiler import ExprNodes
if buffer_types:
if pyx_code.indenter(u"else:"):
# The first thing to find a match in this loop breaks out of the loop
if pyx_code.indenter(u"while 1:"):
pyx_code.put_chunk(
u"""
if numpy is not None:
if isinstance(arg, numpy.ndarray):
dtype = arg.dtype
elif (__pyx_memoryview_check(arg) and
isinstance(arg.base, numpy.ndarray)):
dtype = arg.base.dtype
else:
dtype = None
itemsize = -1
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
dtype_signed = kind == ord('i')
""")
pyx_code.indent(2)
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
pyx_code.putln(self.no_match)
pyx_code.putln("break")
pyx_code.dedent()
pyx_code.dedent()
else:
pyx_code.putln("else: %s" % self.no_match)
def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
"""
decl_code.put_chunk(
u"""
ctypedef struct {{memviewslice_cname}}:
void *memview
void __PYX_XDEC_MEMVIEW({{memviewslice_cname}} *, int have_gil)
bint __pyx_memoryview_check(object)
""")
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef {{memviewslice_cname}} memslice
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
itemsize = -1
""")
pyx_code.imports.put_chunk(
u"""
try:
import numpy
except ImportError:
numpy = None
""")
seen_int_dtypes = set()
for buffer_type in all_buffer_types:
dtype = buffer_type.dtype
if dtype.is_typedef:
#decl_code.putln("ctypedef %s %s" % (dtype.resolve(),
# self._dtype_name(dtype)))
decl_code.putln('ctypedef %s %s "%s"' % (dtype.resolve(),
self._dtype_name(dtype),
dtype.declaration_code("")))
if buffer_type.dtype.is_int:
if str(dtype) not in seen_int_dtypes:
seen_int_dtypes.add(str(dtype))
pyx_code.context.update(dtype_name=self._dtype_name(dtype),
dtype_type=self._dtype_type(dtype))
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
{{dtype_name}}_is_signed = <{{dtype_type}}> -1 < 0
""")
def _split_fused_types(self, arg):
"""
Specialize fused types and split into normal types and buffer types.
"""
specialized_types = PyrexTypes.get_specialized_types(arg.type)
# Prefer long over int, etc
# specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types = [], []
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
if py_type_name:
if py_type_name in seen_py_type_names:
continue
seen_py_type_names.add(py_type_name)
normal_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
u"""
# PROCESSING ARGUMENT {{arg_tuple_idx}}
if {{arg_tuple_idx}} < len(args):
arg = args[{{arg_tuple_idx}}]
elif '{{arg.name}}' in kwargs:
arg = kwargs['{{arg.name}}']
else:
{{if arg.default:}}
arg = defaults[{{default_idx}}]
{{else}}
raise TypeError("Expected at least %d arguments" % len(args))
{{endif}}
""")
def make_fused_cpdef(self, orig_py_func, env, is_def):
"""
This creates the function that is indexable from Python and does
runtime dispatch based on the argument types. The function gets the
arg tuple and kwargs dict (or None) and the defaults tuple
as arguments from the Binding Fused Function's tp_call.
"""
from Cython.Compiler import TreeFragment, Code, MemoryView, UtilityCode
# { (arg_pos, FusedType) : specialized_type }
seen_fused_types = set()
context = {
'memviewslice_cname': MemoryView.memviewslice_cname,
'func_args': self.node.args,
'n_fused': len([arg for arg in self.node.args]),
'name': orig_py_func.entry.name,
}
pyx_code = Code.PyxCodeWriter(context=context)
decl_code = Code.PyxCodeWriter(context=context)
decl_code.put_chunk(
u"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
""")
decl_code.indent()
pyx_code.put_chunk(
u"""
def __pyx_fused_cpdef(signatures, args, kwargs, defaults):
dest_sig = [{{for _ in range(n_fused)}}None,{{endfor}}]
if kwargs is None:
kwargs = {}
cdef Py_ssize_t i
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
default_idx = 0
all_buffer_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused and arg.type not in seen_fused_types:
seen_fused_types.add(arg.type)
context.update(
arg_tuple_idx=i,
arg=arg,
dest_sig_idx=fused_index,
default_idx=default_idx,
)
normal_types, buffer_types = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
self._fused_instance_checks(normal_types, pyx_code, env)
self._buffer_checks(buffer_types, pyx_code, decl_code, env)
fused_index += 1
all_buffer_types.update(buffer_types)
if arg.default:
default_idx += 1
if all_buffer_types:
self._buffer_declarations(pyx_code, decl_code, all_buffer_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
pyx_code.put_chunk(
u"""
candidates = []
for sig in signatures:
match_found = False
for src_type, dst_type in zip(sig.strip('()').split('|'), dest_sig):
if dst_type is not None:
if src_type == dst_type:
match_found = True
else:
match_found = False
break
if match_found:
candidates.append(sig)
if not candidates:
raise TypeError("No matching signature found")
elif len(candidates) > 1:
raise TypeError("Function call with ambiguous argument types")
else:
return signatures[candidates[0]]
""")
fragment_code = pyx_code.getvalue()
# print decl_code.getvalue()
# print fragment_code
fragment = TreeFragment.TreeFragment(fragment_code, level='module')
ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
UtilityCode.declare_declarations_in_scope(decl_code.getvalue(),
env.global_scope())
ast.scope = env
ast.analyse_declarations(env)
py_func = ast.stats[-1] # the DefNode
self.fragment_scope = ast.scope
if isinstance(self.node, DefNode):
py_func.specialized_cpdefs = self.nodes[:]
else:
py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
return py_func
def update_fused_defnode_entry(self, env):
copy_attributes = (
'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
'scope'
)
entry = self.py_func.entry
for attr in copy_attributes:
setattr(entry, attr,
getattr(self.orig_py_func.entry, attr))
self.py_func.name = self.orig_py_func.name
self.py_func.doc = self.orig_py_func.doc
env.entries.pop('__pyx_fused_cpdef', None)
if isinstance(self.node, DefNode):
env.entries[entry.name] = entry
else:
env.entries[entry.name].as_variable = entry
env.pyfunc_entries.append(entry)
self.py_func.entry.fused_cfunction = self
for node in self.nodes:
if isinstance(self.node, DefNode):
node.fused_py_func = self.py_func
else:
node.py_func.fused_py_func = self.py_func
node.entry.as_variable = entry
self.synthesize_defnodes()
self.stats.append(self.__signatures__)
def analyse_expressions(self, env):
"""
Analyse the expressions. Take care to only evaluate default arguments
once and clone the result for all specializations
"""
for fused_compound_type in self.fused_compound_types:
for fused_type in fused_compound_type.get_fused_types():
for specialization_type in fused_type.types:
if specialization_type.is_complex:
specialization_type.create_declaration_utility_code(env)
if self.py_func:
self.__signatures__.analyse_expressions(env)
self.py_func.analyse_expressions(env)
self.resulting_fused_function.analyse_expressions(env)
self.fused_func_assignment.analyse_expressions(env)
self.defaults = defaults = []
for arg in self.node.args:
if arg.default:
arg.default.analyse_expressions(env)
defaults.append(ProxyNode(arg.default))
else:
defaults.append(None)
for stat in self.stats:
stat.analyse_expressions(env)
if isinstance(stat, FuncDefNode):
for arg, default in zip(stat.args, defaults):
if default is not None:
arg.default = CloneNode(default).coerce_to(arg.type, env)
if self.py_func:
args = [CloneNode(default) for default in defaults if default]
self.defaults_tuple = TupleNode(self.pos, args=args)
self.defaults_tuple.analyse_types(env, skip_children=True)
self.defaults_tuple = ProxyNode(self.defaults_tuple)
self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
fused_func = self.resulting_fused_function.arg
fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
fused_func.code_object = CloneNode(self.code_object)
for pycfunc in self.specialized_pycfuncs:
pycfunc.code_object = CloneNode(self.code_object)
pycfunc.analyse_types(env)
pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
def synthesize_defnodes(self):
"""
Create the __signatures__ dict of PyCFunctionNode specializations.
"""
if isinstance(self.nodes[0], CFuncDefNode):
nodes = [node.py_func for node in self.nodes]
else:
nodes = self.nodes
signatures = [
StringEncoding.EncodedString(node.specialized_signature_string)
for node in nodes]
keys = [ExprNodes.StringNode(node.pos, value=sig)
for node, sig in zip(nodes, signatures)]
values = [ExprNodes.PyCFunctionNode.from_defnode(node, True)
for node in nodes]
self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos,
zip(keys, values))
self.specialized_pycfuncs = values
for pycfuncnode in values:
pycfuncnode.is_specialization = True
def generate_function_definitions(self, env, code):
if self.py_func:
self.py_func.pymethdef_required = True
self.fused_func_assignment.generate_function_definitions(env, code)
for stat in self.stats:
if isinstance(stat, FuncDefNode) and stat.entry.used:
code.mark_pos(stat.pos)
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# Note: all def function specialization are wrapped in PyCFunction
# nodes in the self.__signatures__ dictnode.
for default in self.defaults:
if default is not None:
default.generate_evaluation_code(code)
if self.py_func:
self.defaults_tuple.generate_evaluation_code(code)
self.code_object.generate_evaluation_code(code)
for stat in self.stats:
code.mark_pos(stat.pos)
if isinstance(stat, ExprNodes.ExprNode):
stat.generate_evaluation_code(code)
else:
stat.generate_execution_code(code)
if self.__signatures__:
self.resulting_fused_function.generate_evaluation_code(code)
code.putln(
"((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
(self.resulting_fused_function.result(),
self.__signatures__.result()))
code.put_giveref(self.__signatures__.result())
self.fused_func_assignment.generate_execution_code(code)
# Dispose of results
self.resulting_fused_function.generate_disposal_code(code)
self.defaults_tuple.generate_disposal_code(code)
self.code_object.generate_disposal_code(code)
for default in self.defaults:
if default is not None:
default.generate_disposal_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
|
|
import time
from unittest import mock
import pytest
from globus_sdk.authorizers.renewing import EXPIRES_ADJUST_SECONDS, RenewingAuthorizer
class MockRenewer(RenewingAuthorizer):
"""
Class that implements RenewingAuthorizer so that _get_token_response and
_extract_token_data can return known values for testing
"""
def __init__(self, token_data, **kwargs):
self.token_data = token_data
self.token_response = mock.Mock()
super().__init__(**kwargs)
def _get_token_response(self):
return self.token_response
def _extract_token_data(self, res):
return self.token_data
ACCESS_TOKEN = "access_token_1"
@pytest.fixture
def expires_at():
return int(time.time()) + EXPIRES_ADJUST_SECONDS + 10
@pytest.fixture
def token_data():
return {
"expires_at_seconds": int(time.time()) + 1000,
"access_token": "access_token_2",
}
@pytest.fixture
def on_refresh():
return mock.Mock()
@pytest.fixture
def authorizer(on_refresh, token_data, expires_at):
return MockRenewer(
token_data,
access_token=ACCESS_TOKEN,
expires_at=expires_at,
on_refresh=on_refresh,
)
@pytest.fixture
def expired_authorizer(on_refresh, token_data, expires_at):
return MockRenewer(
token_data,
access_token=ACCESS_TOKEN,
expires_at=expires_at - 11,
on_refresh=on_refresh,
)
def test_init(token_data, expires_at):
"""
Creating a MockRenewer with partial data results in a new access token
being fetched, but complete data does not
"""
authorizer = MockRenewer(
token_data, access_token=ACCESS_TOKEN, expires_at=expires_at
)
assert authorizer.access_token == ACCESS_TOKEN
assert authorizer.access_token != token_data["access_token"]
authorizer = MockRenewer(token_data, access_token=ACCESS_TOKEN)
assert authorizer.access_token != ACCESS_TOKEN
assert authorizer.access_token == token_data["access_token"]
authorizer = MockRenewer(token_data, expires_at=expires_at)
assert authorizer.access_token != ACCESS_TOKEN
assert authorizer.access_token == token_data["access_token"]
def test_init_expiration_time(authorizer, expires_at):
# confirm initial value was adjusted automatically
assert authorizer.expires_at == expires_at - EXPIRES_ADJUST_SECONDS
@pytest.mark.parametrize("input_time", [0, 60, 120, 1200])
def test_set_expiration_time(input_time, authorizer, expires_at):
"""
Confirms expiration time is set earlier than input value for a buffer
"""
authorizer._set_expiration_time(input_time)
assert authorizer.expires_at == input_time - EXPIRES_ADJUST_SECONDS
def test_get_new_access_token(authorizer, token_data, on_refresh):
"""
Calls get_new_acces token, confirms that the mock _get_token_data
is used and that the mock on_refresh function is called.
"""
# take note of original access_token_hash
original_hash = authorizer.access_token_hash
# get new_access_token
authorizer._get_new_access_token()
# confirm side effects
assert authorizer.access_token == token_data["access_token"]
assert authorizer.expires_at == (
token_data["expires_at_seconds"] - EXPIRES_ADJUST_SECONDS
)
assert authorizer.access_token_hash != original_hash
on_refresh.assert_called_once()
def test_check_expiration_time_valid(authorizer):
"""
Confirms nothing is done before the access_token expires,
"""
authorizer.check_expiration_time()
assert authorizer.access_token == ACCESS_TOKEN
def test_check_expiration_time_expired(expired_authorizer, token_data):
"""
Confirms a new access_token is gotten after expiration
"""
expired_authorizer.check_expiration_time()
assert expired_authorizer.access_token == token_data["access_token"]
assert expired_authorizer.expires_at == (
token_data["expires_at_seconds"] - EXPIRES_ADJUST_SECONDS
)
def test_check_expiration_time_no_token(authorizer, token_data):
"""
Confirms a new access_token is gotten if the old one is set to None
"""
authorizer.access_token = None
authorizer.check_expiration_time()
assert authorizer.access_token == token_data["access_token"]
assert authorizer.expires_at == (
token_data["expires_at_seconds"] - EXPIRES_ADJUST_SECONDS
)
def test_check_expiration_time_no_expiration(authorizer, token_data):
"""
Confirms a new access_token is gotten if expires_at is set to None
"""
authorizer.expires_at = None
authorizer.check_expiration_time()
assert authorizer.access_token == token_data["access_token"]
assert authorizer.expires_at == (
token_data["expires_at_seconds"] - EXPIRES_ADJUST_SECONDS
)
def test_set_authorization_header(authorizer):
"""
Sets authorization header on a test dictionary, confirms expected value
"""
header_dict = {}
authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"] == "Bearer " + ACCESS_TOKEN
def test_set_authorization_header_existing(authorizer):
"""
Confirms that an existing Authorization field is overwritten
"""
header_dict = {"Header": "value", "Authorization": "previous_value"}
authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"] == "Bearer " + ACCESS_TOKEN
assert header_dict["Header"] == "value"
def test_set_authorization_header_expired(expired_authorizer, token_data):
"""
Sets the access_token to be expired, then sets authorization header
Confirms header value uses the new access_token.
"""
header_dict = {}
expired_authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"] == ("Bearer " + token_data["access_token"])
def test_set_authorization_header_no_token(authorizer, token_data):
"""
Sets the access_token to None, then sets authorization header
Confirms header value uses the new access_token.
"""
header_dict = {}
authorizer.access_token = None
authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"] == ("Bearer " + token_data["access_token"])
def test_set_authorization_header_no_expires(authorizer, token_data):
"""
Sets expires_at to None, then sets authorization header
Confirms header value uses the new access_token.
"""
header_dict = {}
authorizer.expires_at = None
authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"] == ("Bearer " + token_data["access_token"])
def test_handle_missing_authorization(authorizer):
"""
Confirms that RenewingAuthorizers will attempt to fix 401s
by treating their existing access_token as expired
"""
assert authorizer.handle_missing_authorization()
assert authorizer.expires_at is None
|
|
__author__ = 'chris'
from binascii import unhexlify
import mock
import nacl.signing
import nacl.hash
from txrudp import packet, connection, rudp, constants
from twisted.internet import udp, address, task
from twisted.trial import unittest
from dht.crawling import RPCFindResponse, NodeSpiderCrawl, ValueSpiderCrawl
from dht.node import Node, NodeHeap
from dht.utils import digest
from dht.storage import ForgetfulStorage
from dht.protocol import KademliaProtocol
from protos.objects import Value
from wireprotocol import OpenBazaarProtocol
from db.datastore import Database
from constants import ALPHA, KSIZE
class ValueSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.db = Database(filepath=":memory:")
self.protocol = KademliaProtocol(self.node, self.storage, 20, self.db)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol], self.wire_protocol)
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def tearDown(self):
self.con.shutdown()
self.wire_protocol.shutdown()
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
# test resonse with uncontacted nodes
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
# test all been contacted
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
for peer in spider.nearest.getUncontacted():
spider.nearest.markContacted(peer)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node2.id: response}
resp = spider._nodesFound(responses)
self.assertTrue(resp is None)
# test didn't happen
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.assertTrue(len(spider.nearest) == 2)
# test got value
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.protocol.sourceNode.getProto().SerializeToString()
response = (True, ("value", val.SerializeToString()))
responses = {self.node3.id: response}
spider.nearestWithoutValue = NodeHeap(node, 1)
value = spider._nodesFound(responses)
self.assertEqual(value[0], val.SerializeToString())
def test_handleFoundValues(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = ValueSpiderCrawl(self.protocol, node, nearest, KSIZE, ALPHA)
val = Value()
val.valueKey = digest("contractID")
val.serializedData = self.node1.getProto().SerializeToString()
val1 = val.SerializeToString()
value = spider._handleFoundValues([(val1,)])
self.assertEqual(value[0], val.SerializeToString())
# test handle multiple values
val.serializedData = self.node2.getProto().SerializeToString()
val2 = val.SerializeToString()
found_values = [(val1,), (val1,), (val2,)]
self.assertEqual(spider._handleFoundValues(found_values), (val1,))
# test store value at nearest without value
spider.nearestWithoutValue.push(self.node1)
spider._handleFoundValues(found_values)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertTrue(len(self.proto_mock.send_datagram.call_args_list) > 1)
self.proto_mock.send_datagram.call_args_list = []
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class NodeSpiderCrawlTest(unittest.TestCase):
def setUp(self):
self.public_ip = '123.45.67.89'
self.port = 12345
self.own_addr = (self.public_ip, self.port)
self.addr1 = ('132.54.76.98', 54321)
self.addr2 = ('231.76.45.89', 15243)
self.addr3 = ("193.193.111.00", 99999)
self.clock = task.Clock()
connection.REACTOR.callLater = self.clock.callLater
self.proto_mock = mock.Mock(spec_set=rudp.ConnectionMultiplexer)
self.handler_mock = mock.Mock(spec_set=connection.Handler)
self.con = connection.Connection(
self.proto_mock,
self.handler_mock,
self.own_addr,
self.addr1
)
valid_key = "1a5c8e67edb8d279d1ae32fa2da97e236b95e95c837dc8c3c7c2ff7a7cc29855"
self.signing_key = nacl.signing.SigningKey(valid_key, encoder=nacl.encoding.HexEncoder)
verify_key = self.signing_key.verify_key
signed_pubkey = self.signing_key.sign(str(verify_key))
h = nacl.hash.sha512(signed_pubkey)
self.storage = ForgetfulStorage()
self.node = Node(unhexlify(h[:40]), self.public_ip, self.port, signed_pubkey, True)
self.db = Database(filepath=":memory:")
self.protocol = KademliaProtocol(self.node, self.storage, 20, self.db)
self.wire_protocol = OpenBazaarProtocol(self.own_addr)
self.wire_protocol.register_processor(self.protocol)
self.protocol.connect_multiplexer(self.wire_protocol)
self.handler = self.wire_protocol.ConnHandler([self.protocol], self.wire_protocol)
transport = mock.Mock(spec_set=udp.Port)
ret_val = address.IPv4Address('UDP', self.public_ip, self.port)
transport.attach_mock(mock.Mock(return_value=ret_val), 'getHost')
self.wire_protocol.makeConnection(transport)
self.node1 = Node(digest("id1"), self.addr1[0], self.addr1[1], digest("key1"), True)
self.node2 = Node(digest("id2"), self.addr2[0], self.addr2[1], digest("key2"), True)
self.node3 = Node(digest("id3"), self.addr3[0], self.addr3[1], digest("key3"), True)
def test_find(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
spider.find()
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
def test_nodesFound(self):
self._connecting_to_connected()
self.wire_protocol[self.addr1] = self.con
self.wire_protocol[self.addr2] = self.con
self.wire_protocol[self.addr3] = self.con
self.protocol.router.addContact(self.node1)
self.protocol.router.addContact(self.node2)
self.protocol.router.addContact(self.node3)
node = Node(digest("s"))
nearest = self.protocol.router.findNeighbors(node)
spider = NodeSpiderCrawl(self.protocol, node, nearest, 20, 3)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
spider._nodesFound(responses)
self.clock.advance(100 * constants.PACKET_TIMEOUT)
connection.REACTOR.runUntilCurrent()
self.assertEqual(len(self.proto_mock.send_datagram.call_args_list), 4)
response = (True, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node1.getProto() in node_protos)
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
response = (False, (self.node1.getProto().SerializeToString(), self.node2.getProto().SerializeToString(),
self.node3.getProto().SerializeToString()))
responses = {self.node1.id: response}
nodes = spider._nodesFound(responses)
node_protos = []
for n in nodes:
node_protos.append(n.getProto())
self.assertTrue(self.node2.getProto() in node_protos)
self.assertTrue(self.node3.getProto() in node_protos)
def _connecting_to_connected(self):
remote_synack_packet = packet.Packet.from_data(
42,
self.con.own_addr,
self.con.dest_addr,
ack=0,
syn=True
)
self.con.receive_packet(remote_synack_packet)
self.clock.advance(0)
connection.REACTOR.runUntilCurrent()
self.next_remote_seqnum = 43
m_calls = self.proto_mock.send_datagram.call_args_list
sent_syn_packet = packet.Packet.from_bytes(m_calls[0][0][0])
seqnum = sent_syn_packet.sequence_number
self.handler_mock.reset_mock()
self.proto_mock.reset_mock()
self.next_seqnum = seqnum + 1
class RPCFindResponseTest(unittest.TestCase):
def test_happened(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.happened())
response = (False, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertFalse(r.happened())
def test_hasValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertTrue(r.hasValue())
response = (False, "a node")
r = RPCFindResponse(response)
self.assertFalse(r.hasValue())
def test_getValue(self):
response = (True, ("value", "some_value"))
r = RPCFindResponse(response)
self.assertEqual(r.getValue(), ("some_value",))
def test_getNodeList(self):
node1 = Node(digest("id1"), "127.0.0.1", 12345, signed_pubkey=digest("key1"), vendor=True)
node2 = Node(digest("id2"), "127.0.0.1", 22222, signed_pubkey=digest("key2"), vendor=True)
node3 = Node(digest("id3"), "127.0.0.1", 77777, signed_pubkey=digest("key3"))
response = (True, (node1.getProto().SerializeToString(), node2.getProto().SerializeToString(),
node3.getProto().SerializeToString(),
"sdfasdfsd"))
r = RPCFindResponse(response)
nodes = r.getNodeList()
self.assertEqual(nodes[0].getProto(), node1.getProto())
self.assertEqual(nodes[1].getProto(), node2.getProto())
self.assertEqual(nodes[2].getProto(), node3.getProto())
|
|
import pytest
import json
import six
from textwrap import dedent
from nbformat.v4 import new_output
from ...preprocessors import DisplayAutoGrades
from .base import BaseTestPreprocessor
from .. import (
create_code_cell, create_text_cell)
@pytest.fixture
def preprocessor():
return DisplayAutoGrades()
@pytest.fixture
def stream():
return six.StringIO()
class TestDisplayAutoGrades(BaseTestPreprocessor):
def _add_error(self, cell):
cell.outputs.append(new_output(
"error",
ename="Error",
evalue="oh noes, an error occurred!",
traceback=["oh noes, an error occurred!"]
))
return cell
def test_indent(self, preprocessor):
# test normal indenting
assert preprocessor._indent("Hello, world!") == " Hello, world!"
assert preprocessor._indent("Hello,\n world!") == " Hello,\n world!"
# test truncation
preprocessor.width = 10
assert preprocessor._indent("Hello, world!") == " Hel..."
assert preprocessor._indent("Hello,\n world!") == " Hel...\n wo..."
# test that ansi escape sequences are removed and not counted towards
# the line width
assert preprocessor._indent("\x1b[30mHello, world!\x1b[0m") == " Hel..."
assert preprocessor._indent("\x1b[30mHello,\n world!\x1b[0m") == " Hel...\n wo..."
def test_print_changed(self, preprocessor, stream):
cell = create_code_cell()
preprocessor.stream = stream
preprocessor.width = 20
preprocessor._print_changed(cell)
expected = dedent(
"""
====================
The following cell has changed:
print("someth...
### BEGIN SOL...
print("hello"...
### END SOLUT...
"""
)
assert stream.getvalue() == expected
def test_print_error_code_cell(self, preprocessor, stream):
cell = create_code_cell()
preprocessor.stream = stream
preprocessor.width = 20
preprocessor._print_error(cell)
expected = dedent(
"""
====================
The following cell failed:
print("someth...
### BEGIN SOL...
print("hello"...
### END SOLUT...
The error was:
You did not p...
"""
)
assert stream.getvalue() == expected
def test_print_error_code_cell_error(self, preprocessor, stream):
cell = self._add_error(create_code_cell())
preprocessor.stream = stream
preprocessor.width = 20
preprocessor._print_error(cell)
expected = dedent(
"""
====================
The following cell failed:
print("someth...
### BEGIN SOL...
print("hello"...
### END SOLUT...
The error was:
oh noes, an e...
"""
)
assert stream.getvalue() == expected
def test_print_error_markdown_cell(self, preprocessor, stream):
cell = create_text_cell()
preprocessor.stream = stream
preprocessor.width = 20
preprocessor._print_error(cell)
expected = dedent(
"""
====================
The following cell failed:
this is the a...
The error was:
You did not p...
"""
)
assert stream.getvalue() == expected
def test_print_pass(self, preprocessor, stream):
cell = create_code_cell()
preprocessor.stream = stream
preprocessor.width = 20
preprocessor._print_pass(cell)
expected = dedent(
"""
====================
The following cell passed:
print("someth...
### BEGIN SOL...
print("hello"...
### END SOLUT...
"""
)
assert stream.getvalue() == expected
def test_print_num_changed_0(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_changed(0)
assert stream.getvalue() == ""
def test_print_num_changed_1(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_changed(1)
assert stream.getvalue().startswith("THE CONTENTS OF 1 TEST CELL(S) HAVE CHANGED!")
def test_print_num_failed(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_failed(0)
assert stream.getvalue() == "Success! Your notebook passes all the tests.\n"
def test_print_num_failed_1(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_failed(1)
assert stream.getvalue().startswith("VALIDATION FAILED ON 1 CELL(S)!")
def test_print_num_passed(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_passed(0)
assert stream.getvalue() == "Success! The notebook does not pass any tests.\n"
def test_print_num_passed_1(self, preprocessor, stream):
preprocessor.stream = stream
preprocessor._print_num_passed(1)
assert stream.getvalue().startswith("NOTEBOOK PASSED ON 1 CELL(S)!")
def test_submitted_unchanged(self, preprocessor, stream):
"""Does the validation fail on an unchanged notebook?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "VALIDATION FAILED ON 3 CELL(S)! If you submit your assignment as it is, you WILL NOT"
def test_submitted_changed(self, preprocessor, stream):
"""Does the validation pass on an changed notebook?"""
nb = self._read_nb("files/submitted-changed.ipynb")
preprocessor.stream = stream
preprocessor.preprocess(nb, {})
assert stream.getvalue() == "Success! Your notebook passes all the tests.\n"
def test_invert_submitted_unchanged(self, preprocessor, stream):
"""Does the inverted validation pass on an unchanged notebook?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "NOTEBOOK PASSED ON 1 CELL(S)!"
def test_invert_submitted_changed(self, preprocessor, stream):
"""Does the inverted validation fail on a changed notebook?"""
nb = self._read_nb("files/submitted-changed.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "NOTEBOOK PASSED ON 2 CELL(S)!"
def test_grade_cell_changed(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "THE CONTENTS OF 1 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests"
def test_grade_cell_changed_ignore_checksums(self, preprocessor, stream):
"""Does the validate pass if a grade cell has changed but we're ignoring checksums?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "Success! Your notebook passes all the tests."
def test_invert_grade_cell_changed(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed, even with --invert?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "THE CONTENTS OF 1 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests"
def test_invert_grade_cell_changed_ignore_checksums(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed with --invert and ignoring checksums?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "NOTEBOOK PASSED ON 2 CELL(S)!"
def test_submitted_unchanged_ignore_checksums(self, preprocessor, stream):
"""Does the validation fail on an unchanged notebook with ignoring checksums?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT"
def test_locked_cell_changed(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "THE CONTENTS OF 2 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests"
def test_locked_cell_changed_ignore_checksums(self, preprocessor, stream):
"""Does the validate pass if a locked cell has changed but we're ignoring checksums?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "VALIDATION FAILED ON 1 CELL(S)! If you submit your assignment as it is, you WILL NOT"
def test_invert_locked_cell_changed(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed, even with --invert?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "THE CONTENTS OF 2 TEST CELL(S) HAVE CHANGED! This might mean that even though the tests"
def test_invert_locked_cell_changed_ignore_checksums(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed with --invert and ignoring checksums?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.invert = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
assert stream.getvalue().split("\n")[0] == "NOTEBOOK PASSED ON 1 CELL(S)!"
def test_submitted_unchanged_json(self, preprocessor, stream):
"""Does the validation fail on an unchanged notebook?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["failed"]
assert len(output["failed"]) == 3
assert output["failed"][0]["source"] == "assert a == 1"
assert output["failed"][1]["source"] == "YOUR ANSWER HERE"
assert output["failed"][1]["error"] == "You did not provide a response."
assert output["failed"][2]["source"] == "# YOUR CODE HERE\nraise NotImplementedError()"
def test_submitted_changed_json(self, preprocessor, stream):
"""Does the validation pass on an changed notebook?"""
nb = self._read_nb("files/submitted-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == []
def test_invert_submitted_unchanged_json(self, preprocessor, stream):
"""Does the inverted validation pass on an unchanged notebook?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["passed"]
assert len(output["passed"]) == 1
assert output["passed"][0]["source"] == 'print("Success!")'
def test_invert_submitted_changed_json(self, preprocessor, stream):
"""Does the inverted validation fail on a changed notebook?"""
nb = self._read_nb("files/submitted-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["passed"]
assert len(output["passed"]) == 2
assert output["passed"][0]["source"] == 'print("Success!")'
assert output["passed"][1]["source"] == 'assert a == 1'
def test_grade_cell_changed_json(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["changed"]
assert len(output["changed"]) == 1
assert output["changed"][0]["source"] == '#assert a == 1'
def test_grade_cell_changed_ignore_checksums_json(self, preprocessor, stream):
"""Does the validate pass if a grade cell has changed but we're ignoring checksums?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == []
def test_invert_grade_cell_changed_json(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed, even with --invert?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["changed"]
assert len(output["changed"]) == 1
assert output["changed"][0]["source"] == '#assert a == 1'
def test_invert_grade_cell_changed_ignore_checksums_json(self, preprocessor, stream):
"""Does the validate fail if a grade cell has changed with --invert and ignoring checksums?"""
nb = self._read_nb("files/submitted-grade-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["passed"]
assert len(output["passed"]) == 2
assert output["passed"][0]["source"] == 'print("Success!")'
assert output["passed"][1]["source"] == '#assert a == 1'
def test_submitted_unchanged_ignore_checksums_json(self, preprocessor, stream):
"""Does the validation fail on an unchanged notebook with ignoring checksums?"""
nb = self._read_nb("files/submitted-unchanged.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["failed"]
assert len(output["failed"]) == 1
assert output["failed"][0]["source"] == 'assert a == 1'
def test_locked_cell_changed_json(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["changed"]
assert len(output["changed"]) == 2
assert output["changed"][0]["source"] == '#print("Don\'t change this cell!")'
assert output["changed"][1]["source"] == "This cell shouldn't \nbe changed."
def test_locked_cell_changed_ignore_checksums_json(self, preprocessor, stream):
"""Does the validate pass if a locked cell has changed but we're ignoring checksums?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["failed"]
assert len(output["failed"]) == 1
assert output["failed"][0]["source"] == 'assert a == 1'
def test_invert_locked_cell_changed_json(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed, even with --invert?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["changed"]
assert len(output["changed"]) == 2
assert output["changed"][0]["source"] == '#print("Don\'t change this cell!")'
assert output["changed"][1]["source"] == "This cell shouldn't \nbe changed."
def test_invert_locked_cell_changed_ignore_checksums_json(self, preprocessor, stream):
"""Does the validate fail if a locked cell has changed with --invert and ignoring checksums?"""
nb = self._read_nb("files/submitted-locked-cell-changed.ipynb")
preprocessor.stream = stream
preprocessor.as_json = True
preprocessor.invert = True
preprocessor.ignore_checksums = True
preprocessor.preprocess(nb, {})
output = json.loads(stream.getvalue())
assert list(output.keys()) == ["passed"]
assert len(output["passed"]) == 1
assert output["passed"][0]["source"] == 'print("Success!")'
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the heat engine RPC API.
"""
from heat.rpc import api
import heat.openstack.common.rpc.proxy
class EngineClient(heat.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the heat engine rpc API.
API version history::
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(EngineClient, self).__init__(
topic=api.ENGINE_TOPIC,
default_version=self.BASE_RPC_API_VERSION)
def identify_stack(self, ctxt, stack_name):
"""
The identify_stack method returns the full stack identifier for a
single, live stack given the stack name.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('identify_stack',
stack_name=stack_name))
def list_stacks(self, ctxt):
"""
The list_stacks method returns the attributes of all stacks.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('list_stacks'))
def show_stack(self, ctxt, stack_identity):
"""
Return detailed information about one or all stacks.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to show, or None to
show all
"""
return self.call(ctxt, self.make_msg('show_stack',
stack_identity=stack_identity))
def create_stack(self, ctxt, stack_name, template, params, files, args):
"""
The create_stack method creates a new stack using the template
provided.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
"""
return self.call(ctxt,
self.make_msg('create_stack', stack_name=stack_name,
template=template,
params=params, files=files, args=args))
def update_stack(self, ctxt, stack_identity, template, params,
files, args):
"""
The update_stack method updates an existing stack based on the
provided template and parameters.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
"""
return self.call(ctxt, self.make_msg('update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
args=args))
def validate_template(self, ctxt, template):
"""
The validate_template method uses the stack parser to check
the validity of a template.
:param ctxt: RPC context.
:param template: Template of stack you want to create.
"""
return self.call(ctxt, self.make_msg('validate_template',
template=template))
def authenticated_to_backend(self, ctxt):
"""
Verify that the credentials in the RPC context are valid for the
current cloud backend.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('authenticated_to_backend'))
def get_template(self, ctxt, stack_identity):
"""
Get the template.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see.
"""
return self.call(ctxt, self.make_msg('get_template',
stack_identity=stack_identity))
def delete_stack(self, ctxt, stack_identity, cast=True):
"""
The delete_stack method deletes a given stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to delete.
:param cast: cast the message or use call (default: True)
"""
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt,
self.make_msg('delete_stack',
stack_identity=stack_identity))
def list_resource_types(self, ctxt):
"""
Get a list of valid resource types.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('list_resource_types'))
def resource_schema(self, ctxt, type_name):
"""
Get the schema for a resource type.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('resource_schema',
type_name=type_name))
def generate_template(self, ctxt, type_name):
"""
Generate a template based on the specified type.
:param ctxt: RPC context.
:param type_name: The resource type name to generate a template for.
"""
return self.call(ctxt, self.make_msg('generate_template',
type_name=type_name))
def list_events(self, ctxt, stack_identity):
"""
The list_events method lists all events associated with a given stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to get events for.
"""
return self.call(ctxt, self.make_msg('list_events',
stack_identity=stack_identity))
def describe_stack_resource(self, ctxt, stack_identity, resource_name):
"""
Get detailed resource information about a particular resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt, self.make_msg('describe_stack_resource',
stack_identity=stack_identity,
resource_name=resource_name))
def find_physical_resource(self, ctxt, physical_resource_id):
"""
Return an identifier for the resource with the specified physical
resource ID.
:param ctxt RPC context.
:param physcial_resource_id The physical resource ID to look up.
"""
return self.call(ctxt,
self.make_msg(
'find_physical_resource',
physical_resource_id=physical_resource_id))
def describe_stack_resources(self, ctxt, stack_identity, resource_name):
"""
Get detailed resource information about one or more resources.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt, self.make_msg('describe_stack_resources',
stack_identity=stack_identity,
resource_name=resource_name))
def list_stack_resources(self, ctxt, stack_identity):
"""
List the resources belonging to a stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
"""
return self.call(ctxt, self.make_msg('list_stack_resources',
stack_identity=stack_identity))
def stack_suspend(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_suspend',
stack_identity=stack_identity))
def stack_resume(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_resume',
stack_identity=stack_identity))
def metadata_update(self, ctxt, stack_identity, resource_name, metadata):
"""
Update the metadata for the given resource.
"""
return self.call(ctxt, self.make_msg('metadata_update',
stack_identity=stack_identity,
resource_name=resource_name,
metadata=metadata))
def resource_signal(self, ctxt, stack_identity, resource_name, details):
"""
Generate an alarm on the resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param details: the details of the signal.
"""
return self.call(ctxt, self.make_msg('resource_signal',
stack_identity=stack_identity,
resource_name=resource_name,
details=details))
def create_watch_data(self, ctxt, watch_name, stats_data):
'''
This could be used by CloudWatch and WaitConditions
and treat HA service events like any other CloudWatch.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm
:param stats_data: The data to post.
'''
return self.call(ctxt, self.make_msg('create_watch_data',
watch_name=watch_name,
stats_data=stats_data))
def show_watch(self, ctxt, watch_name):
"""
The show_watch method returns the attributes of one watch
or all watches if no watch_name is passed
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch',
watch_name=watch_name))
def show_watch_metric(self, ctxt, metric_namespace=None, metric_name=None):
"""
The show_watch_metric method returns the datapoints associated
with a specified metric, or all metrics if no metric_name is passed
:param ctxt: RPC context.
:param metric_namespace: Name of the namespace you want to see,
or None to see all
:param metric_name: Name of the metric you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch_metric',
metric_namespace=metric_namespace,
metric_name=metric_name))
def set_watch_state(self, ctxt, watch_name, state):
'''
Temporarily set the state of a given watch
:param ctxt: RPC context.
:param watch_name: Name of the watch
:param state: State (must be one defined in WatchRule class)
'''
return self.call(ctxt, self.make_msg('set_watch_state',
watch_name=watch_name,
state=state))
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPUs with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os.path
import re
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# With 8 Tesla K40's and a batch size = 256, the following setup achieves
# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def _tower_loss(images, labels, num_classes, scope, reuse_variables=None):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPUs. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(loss_name +' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(dataset):
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
images, labels = image_processing.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Split the batch of images and labels for towers.
images_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=images)
labels_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=labels)
# Calculate the gradients for each model tower.
tower_grads = []
reuse_variables = None
for i in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
scope, reuse_variables)
# Reuse variables for the next tower.
reuse_variables = True
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
scope)
# Calculate the gradients for the batch of data on this ImageNet
# tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
# Another possibility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(
FLAGS.train_dir,
graph=sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 5000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
|
|
"""Base class extended by connection adapters. This extends the
connection.Connection class to encapsulate connection behavior but still
isolate socket and low level communication.
"""
import errno
import logging
import socket
import ssl
from pika import connection
from pika import exceptions
try:
SOL_TCP = socket.SOL_TCP
except AttributeError:
SOL_TCP = 6
LOGGER = logging.getLogger(__name__)
class BaseConnection(connection.Connection):
"""BaseConnection class that should be extended by connection adapters"""
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
ERRORS_TO_IGNORE = [errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR]
DO_HANDSHAKE = True
WARN_ABOUT_IOLOOP = False
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
ioloop=None,
stop_ioloop_on_close=True):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param object ioloop: IOLoop object to use
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:raises: RuntimeError
:raises: ValueError
"""
if parameters and not isinstance(parameters, connection.Parameters):
raise ValueError('Expected instance of Parameters, not %r' %
parameters)
# Let the developer know we could not import SSL
if parameters and parameters.ssl and not ssl:
raise RuntimeError("SSL specified but it is not available")
self.base_events = self.READ | self.ERROR
self.event_state = self.base_events
self.fd = None
self.ioloop = ioloop
self.socket = None
self.stop_ioloop_on_close = stop_ioloop_on_close
self.write_buffer = None
super(BaseConnection, self).__init__(parameters,
on_open_callback,
on_open_error_callback,
on_close_callback)
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
return self.ioloop.add_timeout(deadline, callback_method)
def close(self, reply_code=200, reply_text='Normal shutdown'):
"""Disconnect from RabbitMQ. If there are any open channels, it will
attempt to close them prior to fully disconnecting. Channels which
have active consumers will attempt to send a Basic.Cancel to RabbitMQ
to cleanly stop the delivery of messages prior to closing the channel.
:param int reply_code: The code number for the close
:param str reply_text: The text reason for the close
"""
super(BaseConnection, self).close(reply_code, reply_text)
self._handle_ioloop_stop()
def remove_timeout(self, timeout_id):
"""Remove the timeout from the IOLoop by the ID returned from
add_timeout.
:rtype: str
"""
self.ioloop.remove_timeout(timeout_id)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True if connected
:rtype: bool
"""
# Get the addresses for the socket, supporting IPv4 & IPv6
try:
addresses = socket.getaddrinfo(self.params.host, self.params.port)
except socket.error as error:
LOGGER.critical('Could not get addresses to use: %s (%s)',
error, self.params.host)
return error
# If the socket is created and connected, continue on
error = "No socket addresses available"
for sock_addr in addresses:
error = self._create_and_connect_to_socket(sock_addr)
if not error:
return None
# Failed to connect
return error
def _adapter_disconnect(self):
"""Invoked if the connection is being told to disconnect"""
if hasattr(self, 'heartbeat') and self.heartbeat is not None:
self.heartbeat.stop()
if self.socket:
self.socket.close()
self.socket = None
self._check_state_on_disconnect()
self._handle_ioloop_stop()
self._init_connection_state()
def _check_state_on_disconnect(self):
"""Checks to see if we were in opening a connection with RabbitMQ when
we were disconnected and raises exceptions for the anticipated
exception types.
"""
if self.connection_state == self.CONNECTION_PROTOCOL:
LOGGER.error('Incompatible Protocol Versions')
raise exceptions.IncompatibleProtocolError
elif self.connection_state == self.CONNECTION_START:
LOGGER.error("Socket closed while authenticating indicating a "
"probable authentication error")
raise exceptions.ProbableAuthenticationError
elif self.connection_state == self.CONNECTION_TUNE:
LOGGER.error("Socket closed while tuning the connection indicating "
"a probable permission error when accessing a virtual "
"host")
raise exceptions.ProbableAccessDeniedError
elif self.is_open:
LOGGER.warning("Socket closed when connection was open")
elif not self.is_closed:
LOGGER.warning('Unknown state on disconnect: %i',
self.connection_state)
def _create_and_connect_to_socket(self, sock_addr_tuple):
"""Create socket and connect to it, using SSL if enabled."""
self.socket = socket.socket(sock_addr_tuple[0], socket.SOCK_STREAM, 0)
self.socket.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(self.params.socket_timeout)
# Wrap socket if using SSL
if self.params.ssl:
self.socket = self._wrap_socket(self.socket)
ssl_text = " with SSL"
else:
ssl_text = ""
LOGGER.info('Connecting to %s:%s%s',
sock_addr_tuple[4][0], sock_addr_tuple[4][1], ssl_text)
# Connect to the socket
try:
self.socket.connect(sock_addr_tuple[4])
except socket.timeout:
error = 'Connection to %s:%s failed: timeout' % (
sock_addr_tuple[4][0], sock_addr_tuple[4][1])
LOGGER.error(error)
return error
except socket.error as error:
error = 'Connection to %s:%s failed: %s' % (
sock_addr_tuple[4][0], sock_addr_tuple[4][1], error)
LOGGER.warning(error)
return error
# Handle SSL Connection Negotiation
if self.params.ssl and self.DO_HANDSHAKE:
try:
self._do_ssl_handshake()
except ssl.SSLError as error:
error = 'SSL connection to %s:%s failed: %s' % (
sock_addr_tuple[4][0], sock_addr_tuple[4][1], error)
LOGGER.error(error)
return error
# Made it this far
return None
def _do_ssl_handshake(self):
"""Perform SSL handshaking, copied from python stdlib test_ssl.py.
"""
if not self.DO_HANDSHAKE:
return
while True:
try:
self.socket.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self.event_state = self.READ
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self.event_state = self.WRITE
else:
raise
self._manage_event_state()
def _get_error_code(self, error_value):
"""Get the error code from the error_value accounting for Python
version differences.
:rtype: int
"""
if not error_value:
return None
if hasattr(error_value, 'errno'): # Python >= 2.6
return error_value.errno
elif error_value is not None:
return error_value[0] # Python <= 2.5
return None
def _flush_outbound(self):
"""Call the state manager who will figure out that we need to write."""
self._manage_event_state()
def _handle_disconnect(self):
"""Called internally when the socket is disconnected already
"""
self._adapter_disconnect()
self._on_connection_closed(None, True)
def _handle_ioloop_stop(self):
"""Invoked when the connection is closed to determine if the IOLoop
should be stopped or not.
"""
if self.stop_ioloop_on_close and self.ioloop:
self.ioloop.stop()
elif self.WARN_ABOUT_IOLOOP:
LOGGER.warning('Connection is closed but not stopping IOLoop')
def _handle_error(self, error_value):
"""Internal error handling method. Here we expect a socket.error
coming in and will handle different socket errors differently.
:param int|object error_value: The inbound error
"""
if 'timed out' in str(error_value):
raise socket.timeout
error_code = self._get_error_code(error_value)
if not error_code:
LOGGER.critical("Tried to handle an error where no error existed")
return
# Ok errors, just continue what we were doing before
if error_code in self.ERRORS_TO_IGNORE:
LOGGER.debug("Ignoring %s", error_code)
return
# Socket is closed, so lets just go to our handle_close method
elif error_code in (errno.EBADF, errno.ECONNABORTED):
LOGGER.error("Socket is closed")
elif self.params.ssl and isinstance(error_value, ssl.SSLError):
if error_value.args[0] == ssl.SSL_ERROR_WANT_READ:
self.event_state = self.READ
elif error_value.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self.event_state = self.WRITE
else:
LOGGER.error("SSL Socket error on fd %d: %r",
self.socket.fileno(), error_value)
elif error_code == errno.EPIPE:
# Broken pipe, happens when connection reset
LOGGER.error("Socket connection was broken")
else:
# Haven't run into this one yet, log it.
LOGGER.error("Socket Error on fd %d: %s",
self.socket.fileno(), error_code)
# Disconnect from our IOLoop and let Connection know what's up
self._handle_disconnect()
def _handle_events(self, fd, events, error=None, write_only=False):
"""Handle IO/Event loop events, processing them.
:param int fd: The file descriptor for the events
:param int events: Events from the IO/Event loop
:param int error: Was an error specified
:param bool write_only: Only handle write events
"""
if not fd:
LOGGER.error('Received events on closed socket: %d', fd)
return
if events & self.WRITE:
self._handle_write()
self._manage_event_state()
if not write_only and (events & self.READ):
self._handle_read()
if write_only and (events & self.READ) and (events & self.ERROR):
LOGGER.error('BAD libc: Write-Only but Read+Error. '
'Assume socket disconnected.')
self._handle_disconnect()
if events & self.ERROR:
LOGGER.error('Error event %r, %r', events, error)
self._handle_error(error)
def _handle_read(self):
"""Read from the socket and call our on_data_available with the data."""
try:
if self.params.ssl:
data = self.socket.read(self._buffer_size)
else:
data = self.socket.recv(self._buffer_size)
except socket.timeout:
raise
except socket.error as error:
return self._handle_error(error)
# Empty data, should disconnect
if not data or data == 0:
LOGGER.error('Read empty data, calling disconnect')
return self._handle_disconnect()
# Pass the data into our top level frame dispatching method
self._on_data_available(data)
return len(data)
def _handle_write(self):
"""Handle any outbound buffer writes that need to take place."""
bytes_written = 0
if self.outbound_buffer:
frame = self.outbound_buffer.popleft()
try:
self.socket.sendall(frame)
bytes_written = len(frame)
except socket.timeout:
raise
except socket.error as error:
return self._handle_error(error)
return bytes_written
def _init_connection_state(self):
"""Initialize or reset all of our internal state variables for a given
connection. If we disconnect and reconnect, all of our state needs to
be wiped.
"""
super(BaseConnection, self)._init_connection_state()
self.fd = None
self.base_events = self.READ | self.ERROR
self.event_state = self.base_events
self.socket = None
def _manage_event_state(self):
"""Manage the bitmask for reading/writing/error which is used by the
io/event handler to specify when there is an event such as a read or
write.
"""
if self.outbound_buffer:
if not self.event_state & self.WRITE:
self.event_state |= self.WRITE
self.ioloop.update_handler(self.socket.fileno(),
self.event_state)
elif self.event_state & self.WRITE:
self.event_state = self.base_events
self.ioloop.update_handler(self.socket.fileno(), self.event_state)
def _wrap_socket(self, sock):
"""Wrap the socket for connecting over SSL.
:rtype: ssl.SSLSocket
"""
return ssl.wrap_socket(sock,
do_handshake_on_connect=self.DO_HANDSHAKE,
**self.params.ssl_options)
|
|
import argparse
import multiprocessing
import multiprocessing.pool
import sys
import random
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from api import State, util
from bots.ml.ml import features
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
colors = {
'SUCCESS': GREEN,
'INFO': BLUE,
'WARN': YELLOW,
'FAIL': RED
}
args = None
NOTIFY_AMOUNT = 50
def main():
pool = multiprocessing.Pool(processes=args.parallelism)
bots = []
for id, botname in enumerate(args.players):
bots.append(util.load_player(botname))
matches = len(bots) * args.matches * len(args.planets)
log("Training against {} Bots, {} Maps, {} Matches".format(len(bots), len(args.planets), matches))
data, target = [], []
try:
i = 0
for ret in pool.imap_unordered(execute, gen_rounds(bots)):
i += 1
(bid, mid), winner, state_vectors, (map_size, seed) = ret
if winner == 1:
result = 'won'
elif winner == 2:
result = 'lost'
else:
result = 'draw'
data += state_vectors
target += [result] * len(state_vectors)
log("({}:{} | {}:{}): {}".format(bid, mid, map_size, seed, result), lvl=1)
if i % NOTIFY_AMOUNT == 0:
log("Finished {}/{} matches ({:.2f})%.".format(i, matches, (float(i) / matches * 100)))
except KeyboardInterrupt:
log("Tournament interrupted by user", type="FAIL")
pool.terminate()
pool.join()
sys.exit(1)
pool.close()
pool.join()
log("All games finished", type="SUCCESS")
generate_model(data, target)
# If you wish to use a different model, this
# is where to edit
def generate_model(data, target):
log("Training logistic regression model", lvl=1)
learner = LogisticRegression()
model = learner.fit(data, target)
log("Checking class imbalance", lvl=1)
count = {}
for str in target:
if str not in count:
count[str] = 0
count[str] += 1
log("Instances per class: {}".format(count))
joblib.dump(model, args.model)
log("Done", type="SUCCESS")
def gen_rounds(bots):
for bid, bot in enumerate(bots):
for map_id, map_size in enumerate(args.planets):
for i in range(args.matches):
mid = map_id * args.matches + i
seed = random.randint(0, 100000)
yield ((bid, mid), bot, (map_size, seed, args.max_turns, args.asym))
def execute(params):
ids, bot, (map_size, seed, max_turns, asym) = params
state, _ = State.generate(map_size, seed, symmetric=not asym)
state_vectors = []
i = 0
while not state.finished() and i <= max_turns:
state_vectors.append(features(state))
move = bot.get_move(state)
state = state.next(move)
i += 1
winner = state.winner()
return ids, winner, state_vectors, (map_size, seed)
# following from Python cookbook, #475186
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
def log(s, type='INFO', lvl=0):
color = WHITE
if type in colors:
color = colors[type]
if args.verbose >= lvl:
sys.stdout.write("[")
printout("%07s" % type, color)
sys.stdout.write("] %s\n" % s)
def printout(text, colour=WHITE):
if args.color:
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def optparse():
global args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--color', action='store_true', dest='color',
help="force color output")
parser.add_argument('-n', '--no-color', action='store_false', dest='color',
help="force disable color output")
parser.add_argument("-p", "--num-planets",
dest="planets",
help="List of map sizes to use",
type=int, nargs='*',
default=[6])
parser.add_argument("-m", "--num-matches",
dest="matches",
help="Amount of matches played per map size",
type=int, default=1000)
parser.add_argument("-t", "--max-time",
dest="max_time",
help="Maximum amount of time allowed per turn in seconds",
type=float, default=5)
parser.add_argument("-T", "--max-turns",
dest="max_turns",
help="Maximum amount of turns per game",
type=int, default=100)
parser.add_argument("model",
help="Output file for model",
type=str, default="./bots/ml/model.pkl")
parser.add_argument("players",
metavar="player",
help="Players for the game",
type=str, nargs='+')
parser.add_argument("-P", "--pool-size",
dest="parallelism",
help="Pool size for parallelism. Do not use unless you know what you are doing",
type=int, default=multiprocessing.cpu_count())
parser.add_argument("-v", "--verbose",
action="count", default=0,
help="Show more output")
parser.add_argument("-a", "--asym", dest="asym",
help="Whether to start with an asymmetric state.",
action="store_true")
parser.set_defaults(color=has_colours(sys.stdout))
args = parser.parse_args()
if __name__ == "__main__":
optparse()
main()
|
|
# coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects representing a file system and a file stream."""
import logging
import os
from core.platform import models
import feconf
import utils
(file_models,) = models.Registry.import_models([
models.NAMES.file
])
CHANGE_LIST_SAVE = [{'cmd': 'save'}]
class FileMetadata(object):
"""A class representing the metadata of a file."""
def __init__(self, metadata):
self._size = metadata.size if (metadata is not None) else None
@property
def size(self):
return self._size
class FileStreamWithMetadata(object):
"""A class that wraps a file stream, but adds extra attributes to it."""
def __init__(self, content, version, metadata):
"""The args are a file content blob and a metadata model object."""
self._content = content
self._version = version
self._metadata = FileMetadata(metadata)
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
content = self._content
self._content = ''
return content
@property
def metadata(self):
return self._metadata
@property
def version(self):
return self._version
class ExplorationFileSystem(object):
"""A datastore-backed read-write file system for a single exploration.
The conceptual intention is for each exploration to have its own asset
folder. An asset has no meaning outside its exploration, so the assets in
these asset folders should therefore not be edited directly. They should
only be modified as side-effects of some other operation (such as adding an
image to an exploration).
The content of an exploration should include a reference to the asset
together with the version number of the asset. This allows the
exploration to refer to asset versions.
In general, assets should be retrieved only within the context of the
exploration that contains them, and should not be retrieved outside this
context.
"""
_DEFAULT_VERSION_NUMBER = 1
def __init__(self, exploration_id):
self._exploration_id = exploration_id
@property
def exploration_id(self):
return self._exploration_id
def _get_file_metadata(self, filepath, version):
"""Return the desired file metadata.
Returns None if the file does not exist.
"""
if version is None:
return file_models.FileMetadataModel.get_model(
self._exploration_id, 'assets/%s' % filepath)
else:
return file_models.FileMetadataModel.get_version(
self._exploration_id, 'assets/%s' % filepath, version)
def _get_file_data(self, filepath, version):
"""Return the desired file content.
Returns None if the file does not exist.
"""
if version is None:
return file_models.FileModel.get_model(
self._exploration_id, 'assets/%s' % filepath)
else:
return file_models.FileModel.get_version(
self._exploration_id, 'assets/%s' % filepath, version)
def _save_file(self, user_id, filepath, raw_bytes):
"""Create or update a file."""
if len(raw_bytes) > feconf.MAX_FILE_SIZE_BYTES:
raise Exception('The maximum allowed file size is 1 MB.')
metadata = self._get_file_metadata(filepath, None)
if not metadata:
metadata = file_models.FileMetadataModel.create(
self._exploration_id, 'assets/%s' % filepath)
metadata.size = len(raw_bytes)
data = self._get_file_data(filepath, None)
if not data:
data = file_models.FileModel.create(
self._exploration_id, 'assets/%s' % filepath)
data.content = raw_bytes
data.commit(user_id, CHANGE_LIST_SAVE)
metadata.commit(user_id, CHANGE_LIST_SAVE)
def get(self, filepath, version=None, mode=None): # pylint: disable=unused-argument
"""Gets a file as an unencoded stream of raw bytes.
If `version` is not supplied, the latest version is retrieved. If the
file does not exist, None is returned.
The 'mode' argument is unused. It is included so that this method
signature matches that of other file systems.
"""
metadata = self._get_file_metadata(filepath, version)
if metadata:
data = self._get_file_data(filepath, version)
if data:
if version is None:
version = data.version
return FileStreamWithMetadata(data.content, version, metadata)
else:
logging.error(
'Metadata and data for file %s (version %s) are out of '
'sync.' % (filepath, version))
return None
else:
return None
def commit(self, user_id, filepath, raw_bytes):
"""Saves a raw bytestring as a file in the database."""
self._save_file(user_id, filepath, raw_bytes)
def delete(self, user_id, filepath):
"""Marks the current version of a file as deleted."""
metadata = self._get_file_metadata(filepath, None)
if metadata:
metadata.delete(user_id, '')
data = self._get_file_data(filepath, None)
if data:
data.delete(user_id, '')
def isfile(self, filepath):
"""Checks the existence of a file."""
metadata = self._get_file_metadata(filepath, None)
return bool(metadata)
def listdir(self, dir_name):
"""Lists all files in a directory.
Args:
dir_name: The directory whose files should be listed. This should
not start with '/' or end with '/'.
Returns:
List of str. This is a lexicographically-sorted list of filenames,
each of which is prefixed with dir_name.
"""
# The trailing slash is necessary to prevent non-identical directory
# names with the same prefix from matching, e.g. /abcd/123.png should
# not match a query for files under /abc/.
prefix = '%s' % utils.vfs_construct_path(
'/', self._exploration_id, 'assets', dir_name)
if not prefix.endswith('/'):
prefix += '/'
result = set()
metadata_models = file_models.FileMetadataModel.get_undeleted()
for metadata_model in metadata_models:
filepath = metadata_model.id
if filepath.startswith(prefix):
result.add('/'.join(filepath.split('/')[3:]))
return sorted(list(result))
class DiskBackedFileSystem(object):
"""Implementation for a disk-backed file system.
This implementation ignores versioning and is used only by tests.
"""
def __init__(self, root):
"""Constructor for this class.
Args:
root: the path to append to the oppia/ directory.
"""
self._root = os.path.join(os.getcwd(), root)
self._exploration_id = 'test'
@property
def exploration_id(self):
return self._exploration_id
def isfile(self, filepath):
"""Checks if a file exists."""
return os.path.isfile(os.path.join(self._root, filepath))
def get(self, filepath, version=None, mode='r'): # pylint: disable=unused-argument
"""Returns a bytestring with the file content, but no metadata."""
content = utils.get_file_contents(
os.path.join(self._root, filepath), raw_bytes=True, mode=mode)
return FileStreamWithMetadata(content, None, None)
def commit(self, user_id, filepath, raw_bytes):
raise NotImplementedError
def delete(self, user_id, filepath):
raise NotImplementedError
def listdir(self, dir_name):
raise NotImplementedError
class AbstractFileSystem(object):
"""Interface for a file system."""
def __init__(self, impl):
self._impl = impl
@property
def impl(self):
return self._impl
def _check_filepath(self, filepath):
"""Raises an error if a filepath is invalid."""
base_dir = utils.vfs_construct_path(
'/', self.impl.exploration_id, 'assets')
absolute_path = utils.vfs_construct_path(base_dir, filepath)
normalized_path = utils.vfs_normpath(absolute_path)
# This check prevents directory traversal.
if not normalized_path.startswith(base_dir):
raise IOError('Invalid filepath: %s' % filepath)
def isfile(self, filepath):
"""Checks if a file exists. Similar to os.path.isfile(...)."""
self._check_filepath(filepath)
return self._impl.isfile(filepath)
def open(self, filepath, version=None, mode='r'):
"""Returns a stream with the file content. Similar to open(...)."""
self._check_filepath(filepath)
return self._impl.get(filepath, version=version, mode=mode)
def get(self, filepath, version=None, mode='r'):
"""Returns a bytestring with the file content, but no metadata."""
file_stream = self.open(filepath, version=version, mode=mode)
if file_stream is None:
raise IOError(
'File %s (version %s) not found.'
% (filepath, version if version else 'latest'))
return file_stream.read()
def commit(self, user_id, filepath, raw_bytes):
"""Replaces the contents of the file with the given bytestring."""
raw_bytes = str(raw_bytes)
self._check_filepath(filepath)
self._impl.commit(user_id, filepath, raw_bytes)
def delete(self, user_id, filepath):
"""Deletes a file and the metadata associated with it."""
self._check_filepath(filepath)
self._impl.delete(user_id, filepath)
def listdir(self, dir_name):
"""Lists all the files in a directory. Similar to os.listdir(...)."""
self._check_filepath(dir_name)
return self._impl.listdir(dir_name)
|
|
from __future__ import absolute_import
from sentry.grouping.api import get_default_grouping_config_dict, load_grouping_config
from sentry.stacktraces.processing import (
find_stacktraces_in_data,
normalize_stacktraces_for_grouping,
)
from sentry.testutils import TestCase
class FindStacktracesTest(TestCase):
def test_stacktraces_basics(self):
data = {
"message": "hello",
"platform": "javascript",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
"platform": "native",
},
]
},
}
infos = find_stacktraces_in_data(data)
assert len(infos) == 1
assert len(infos[0].stacktrace["frames"]) == 2
assert infos[0].platforms == set(["javascript", "native"])
def test_stacktraces_exception(self):
data = {
"message": "hello",
"platform": "javascript",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
},
]
},
}
]
},
}
infos = find_stacktraces_in_data(data)
assert len(infos) == 1
assert len(infos[0].stacktrace["frames"]) == 2
def test_stacktraces_threads(self):
data = {
"message": "hello",
"platform": "javascript",
"threads": {
"values": [
{
"id": "4711",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
},
]
},
}
]
},
}
infos = find_stacktraces_in_data(data)
assert len(infos) == 1
assert len(infos[0].stacktrace["frames"]) == 2
def test_find_stacktraces_skip_none(self):
# This tests:
# 1. exception is None
# 2. stacktrace is None
# 3. frames is None
# 3. frames contains only None
# 4. frame is None
data = {
"message": "hello",
"platform": "javascript",
"exception": {
"values": [
None,
{"type": "Error", "stacktrace": None},
{"type": "Error", "stacktrace": {"frames": None}},
{"type": "Error", "stacktrace": {"frames": [None]}},
{
"type": "Error",
"stacktrace": {
"frames": [
None,
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
},
]
},
},
]
},
}
infos = find_stacktraces_in_data(data)
assert len(infos) == 1
# XXX: The null frame is still part of this stack trace!
assert len(infos[0].stacktrace["frames"]) == 3
class NormalizeInApptest(TestCase):
def test_normalize_with_system_frames(self):
data = {
"stacktrace": {
"frames": [
None,
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
"in_app": True,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
},
]
}
}
normalize_stacktraces_for_grouping(data)
assert data["stacktrace"]["frames"][1]["in_app"] is True
assert data["stacktrace"]["frames"][2]["in_app"] is False
def test_normalize_skips_none(self):
data = {
"stacktrace": {
"frames": [
None,
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 4,
"colno": 0,
},
{
"abs_path": "http://example.com/foo.js",
"filename": "foo.js",
"lineno": 1,
"colno": 0,
},
]
}
}
normalize_stacktraces_for_grouping(data)
assert data["stacktrace"]["frames"][1]["in_app"] is False
assert data["stacktrace"]["frames"][2]["in_app"] is False
def test_ios_package_in_app_detection(self):
data = {
"platform": "native",
"stacktrace": {
"frames": [
{
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": "0x1000",
},
{
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/Frameworks/foo.dylib",
"instruction_addr": "0x2000",
},
{
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/Frameworks/libswiftCore.dylib",
"instruction_addr": "0x3000",
},
{"package": "/usr/lib/whatever.dylib", "instruction_addr": "0x4000"},
]
},
}
config = load_grouping_config(get_default_grouping_config_dict())
normalize_stacktraces_for_grouping(data, grouping_config=config)
# App object should be in_app
assert data["stacktrace"]["frames"][0]["in_app"] is True
# Framework should be in app (but optional)
assert data["stacktrace"]["frames"][1]["in_app"] is True
# libswift should not be system
assert data["stacktrace"]["frames"][2]["in_app"] is False
# Unknown object should default to not in_app
assert data["stacktrace"]["frames"][3]["in_app"] is False
def tes_macos_package_in_app_detection(self):
data = {
"platform": "cocoa",
"debug_meta": {"images": []}, # omitted
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "-[CRLCrashAsyncSafeThread crash]",
"package": "/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/CrashProbe.app/Contents/Frameworks/CrashLib.framework/Versions/A/CrashLib",
"instruction_addr": 4295098388,
},
{
"function": "[KSCrash ]",
"package": "/usr/lib/system/libdyld.dylib",
"instruction_addr": 4295098388,
},
]
},
"type": "NSRangeException",
}
]
},
"contexts": {"os": {"version": "10.12.5", "type": "os", "name": "macOS"}},
}
config = load_grouping_config(get_default_grouping_config_dict())
normalize_stacktraces_for_grouping(data, grouping_config=config)
frames = data["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["in_app"] is True
assert frames[1]["in_app"] is False
def test_ios_function_name_in_app_detection(self):
data = {
"platform": "cocoa",
"debug_meta": {"images": []}, # omitted
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "+[RNSentry ]",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "+[SentryClient ]",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "kscrash_foobar",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "kscm_foobar",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "+[KSCrash ]",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "+[KSCrash]",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
{
"function": "+[KSCrashy]",
"package": "/var/containers/Bundle/Application/B33C37A8-F933-4B6B-9FFA-152282BFDF13/SentryTest.app/SentryTest",
"instruction_addr": 4295098388,
},
]
},
"type": "NSRangeException",
}
]
},
"contexts": {"os": {"version": "9.3.2", "type": "os", "name": "iOS"}},
}
config = load_grouping_config(get_default_grouping_config_dict())
normalize_stacktraces_for_grouping(data, grouping_config=config)
frames = data["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["in_app"] is False
assert frames[1]["in_app"] is False
assert frames[2]["in_app"] is False
assert frames[3]["in_app"] is False
assert frames[4]["in_app"] is False
assert frames[5]["in_app"] is True
assert frames[6]["in_app"] is True
|
|
import typing
import enum
import io
import collections
import click
import json
from dataclasses import dataclass
from typing import List, Dict, TypeVar, Union, Generic, Optional
from ontobio.validation import metadata
from ontobio.io import assocparser
from ontobio.io import gafparser
from ontobio.io import assocwriter
"""
This module is for centralizing logic related to validating the example data in
GO Rules. Optional data can be included in rules that show exmaples of failing,
passing, or repair type rules for incoming data (either GAF, GPAD, or RDF).
This will first just support GAF for the first pass in all liklihood.
Relavent schema:
```
"examples":
type: map
required: false
mapping:
"pass":
type: seq
required: false
sequence:
- type: map
mapping:
"comment":
type: str
required: true
"format":
type: str
required: true
enum: ["rdf", "gaf", "gpad"]
"input":
type: str
required: true
"fail":
type: seq
required: false
sequence:
- type: map
mapping:
"comment":
type: str
required: true
"format":
type: str
required: true
enum: ["rdf", "gaf", "gpad"]
"input":
type: str
required: true
"repair":
type: seq
required: false
sequence:
- type: map
mapping:
"comment":
type: str
required: true
"format":
type: str
required: true
enum: ["rdf", "gaf", "gpad"]
"input":
type: str
required: true
"output":
type: str
required: true
```
"""
FormatType = enum.Enum("FormatType", ["RDF", "GAF", "GPAD"])
ExampleType = enum.Enum("ExampleType", {"REPAIR": "repair", "FAIL": "fail", "PASS": "pass"})
def format_from_string(format: str) -> Optional[FormatType]:
if format == "rdf":
return FormatType.RDF
if format == "gaf":
return FormatType.GAF
if format == "gpad":
return FormatType.GPAD
return None
@dataclass
class RuleExample:
rule_id: str
example_type: ExampleType
input: str
format: FormatType
expected: Union[str, bool]
@classmethod
def example_from_json(RuleExample, rule_json: Dict) -> List:
"""
This constructs the set of examples to be run from a single GO rule.
"""
# Returns List of RuleExample
if "examples" not in rule_json:
# Bail if we don't have any examples
return []
fail_examples = rule_json["examples"].get("fail", [])
pass_examples = rule_json["examples"].get("pass", [])
repair_examples = rule_json["examples"].get("repair", [])
built_examples = [] # type: List[RuleExample]
ruleid = rule_json["id"].lower().replace(":", "-")
for ex in fail_examples:
f = format_from_string(ex["format"]) # type: Optional[FormatType]
# Expected is False since these are "fail" examples
built_examples.append(RuleExample(ruleid, ExampleType.FAIL, ex["input"], f, False))
for ex in pass_examples:
f = format_from_string(ex["format"]) # type: Optional[FormatType]
# Expected is True since these are "pass" examples
built_examples.append(RuleExample(ruleid, ExampleType.PASS, ex["input"], f, True))
for ex in repair_examples:
f = format_from_string(ex["format"]) # type: Optional[FormatType]
# Expected will come from the `output` field
built_examples.append(RuleExample(ruleid, ExampleType.REPAIR, ex["input"], f, ex["output"]))
return built_examples
@dataclass
class ValidationResult:
example: RuleExample
actual: Union[str, bool]
success: bool
reason: str
def to_json(self) -> Dict:
return {
"rule": self.example.rule_id,
"type": self.example.example_type.value,
"format": self.example.format.value,
"input": self.example.input,
"expected": self.example.expected,
"actual": self.actual,
"success": self.success,
"reason": self.reason
}
Parsed = collections.namedtuple("Parsed", ["report", "output", "expected"])
#==============================================================================
def normalize_tsv_row(size: int, tsv: str) -> str:
columns = tsv.split("\t")
if len(columns) < size:
columns += [""] * (size - len(columns))
elif len(columns) > size:
columns = columns[0:size]
return "\t".join(columns)
def validate_all_examples(examples: List[RuleExample], config=None) -> List[ValidationResult]:
results = []
for ex in examples:
r = validate_example(ex, config=config)
results.append(r)
return results
def validate_example(example: RuleExample, config=None) -> ValidationResult:
"""
1. Create parser based on `format`
2. Run input into parser/validator
3. In parsed rule results, find the results for the rule given in the example
4. Decide on what the `output` is.
5. Validate output against the example `expected` to decide on `success`
6. Consolodate and return `ValidationResult`
"""
parser = create_base_parser(example.format)
parsed = validate_input(example, parser, config=config)
# click.echo(parsed)
success = example_success(example, parsed)
actual = len(parsed.report) == 0 if example.example_type in [ExampleType.FAIL, ExampleType.PASS] else parsed.output
reason = "Valid"
if not success:
if example.example_type in [ ExampleType.PASS or ExampleType.FAIL ]:
reason = "Input was expected to {passfail} {ruleid}, but it did not: {message}".format(passfail=example.example_type.value, ruleid=example.rule_id,
message="; ".join([m["message"] for m in parsed.report]))
else:
reason = "Repair found `{}`, but expected `{}`".format(actual, example.expected)
result = ValidationResult(example, actual, success, reason)
return result
def create_base_parser(format: FormatType) -> Optional[assocparser.AssocParser]:
"""
Make an unconfigured parser based on the format. Only GAF is supported currently.
"""
parser = None
if format == FormatType.GAF:
parser = gafparser.GafParser(config=assocparser.AssocParserConfig())
else:
parser = None
return parser
def validate_input(example: RuleExample, parser: assocparser.AssocParser, config=None) -> Parsed:
if config:
parser.config = config
out = []
writer = assocwriter.GafWriter(file=io.StringIO())
assocs_gen = parser.association_generator(file=io.StringIO(example.input), skipheader=True)
for assoc in assocs_gen:
out.append(writer.tsv_as_string(writer.as_tsv(assoc)))
rule_messages = parser.report.reporter.messages.get(example.rule_id, [])
rule_messages.extend(parser.report.reporter.messages.get("gorule-0000001", []))
# We have to also parse the expected result if we are in a repair to normalize all the data
expected_out = []
if example.example_type == ExampleType.REPAIR:
expected_parsed_gen = create_base_parser(example.format).association_generator(file=io.StringIO(example.expected), skipheader=True)
expected_writer = assocwriter.GafWriter(file=io.StringIO())
for assoc in expected_parsed_gen:
expected_out.append(expected_writer.tsv_as_string(expected_writer.as_tsv(assoc)))
# We only collect the messages from *our* rule we're in
return Parsed(report=rule_messages, output="\n".join(out), expected="\n".join(expected_out))
def example_success(example: RuleExample, parsed_input: Parsed) -> bool:
"""
Decide if the example was a success. Given the example and the result of parsing
and validation, this will return True if we expected the example to fail validation/repair
the rule and it did so or if the example is expected to pass the rule and it did so.
This returns False if validation produces something we did not expect.
Additionally, examples will not succeed if we aren't testing rule 1, but still fail rule 1
"""
success = False
if example.rule_id != "gorule-0000001" and (1 in [message["rule"] for message in parsed_input.report]):
# If we find a gorule-0000001 report in a non gorule-0000001 example, than we auto-fail success
return False
if example.example_type == ExampleType.REPAIR:
success = parsed_input.output == parsed_input.expected
elif example.example_type in [ ExampleType.FAIL, ExampleType.PASS ]:
# The rule was passed if there were no messages from that rule
passed_rule = len(parsed_input.report) == 0
# We have a successful example if the passing above was what we expected give the example
success = passed_rule == example.expected
return success
def validation_report(all_results: List[ValidationResult]) -> Dict:
"""
{
"gorule-00000008": {
"results": [
{
"rule": "gorule-00000008"
"type": "fail|pass|repair",
"format": "gaf|gpad|rdf",
"input": "string",
"expected": "string|bool",
"actual": "string|bool",
"success": "bool",
"reason": "string"
}, "..."
]
}
}
"""
report_dict = dict()
for r in all_results:
if r.example.rule_id not in report_dict:
report_dict[r.example.rule_id] = []
r_out = r.to_json()
report_dict[r.example.rule_id].append(r_out)
return report_dict
|
|
"""Implementation of OpenGL errors/exceptions
Note that OpenGL-ctypes will also throw standard errors,
such as TypeError or ValueError when appropriate.
ErrorChecker is an _ErrorChecker instance that allows you
to register a new error-checking function for use
throughout the system.
"""
import OpenGL, logging
log = logging.getLogger( 'OpenGL.error' )
from OpenGL import platform, _configflags
__all__ = (
"Error",'GLError','GLUError','GLUTError','glCheckError',
'GLerror','GLUerror','GLUTerror',
)
class Error( Exception ):
"""Base class for all PyOpenGL-specific exception classes"""
class NoContext( Error ):
"""Raised to indicate that there is no currently active context
Technically almost *any* OpenGL call can segfault if there is
no active context. The OpenGL.CHECK_CONTEXT flag, if enabled
will cause this error to be raised whenever a GL or GLU call is
issued (via PyOpenGL) if there is no currently valid context.
"""
class CopyError( Error ):
"""Raised to indicate that operation requires data-copying
if you set:
OpenGL.ERROR_ON_COPY = True
before importing OpenGL.GL, this error will be raised when
a passed argument would require a copy to be made.
"""
class NullFunctionError( Error ):
"""Error raised when an undefined function is called"""
class GLError( Error ):
"""OpenGL core error implementation class
Primary purpose of this error class is to allow for
annotating an error with more details about the calling
environment so that it's easier to debug errors in the
wrapping process.
Attributes:
err -- the OpenGL error code for the error
result -- the OpenGL result code for the operation
baseOperation -- the "function" being called
pyArgs -- the translated set of Python arguments
cArgs -- the Python objects matching 1:1 the C arguments
cArguments -- ctypes-level arguments to the operation,
often raw integers for pointers and the like
description -- OpenGL description of the error (textual)
"""
def __init__(
self,
err=None,
result=None,
cArguments=None,
baseOperation=None,
pyArgs=None,
cArgs=None,
description=None,
):
"""Initialise the GLError, storing metadata for later display"""
(
self.err, self.result, self.cArguments,
self.baseOperation, self.pyArgs, self.cArgs,
self.description
) = (
err, result, cArguments,
baseOperation, pyArgs, cArgs,
description
)
DISPLAY_ORDER = (
'err',
'description',
'baseOperation',
'pyArgs',
'cArgs',
'cArguments',
'result',
)
def __str__( self ):
"""Create a fully formatted representation of the error"""
args = []
for property in self.DISPLAY_ORDER:
value = getattr( self, property, None )
if value is not None or property=='description':
formatFunction = 'format_%s'%(property)
if hasattr( self, formatFunction ):
args.append( getattr(self,formatFunction)( property, value ))
else:
args.append( '%s = %s'%(
property,
self.shortRepr( value ),
))
return '%s(\n\t%s\n)'%(self.__class__.__name__, ',\n\t'.join(
[x for x in args if x]
))
def __repr__( self ):
"""Produce a much shorter version of the error as a string"""
return '%s( %s )'%(
self.__class__.__name__,
", ".join([x for x in [
'err=%s'%(self.err),
self.format_description( 'description', self.description ) or '',
self.format_baseOperation( 'baseOperation', self.baseOperation ) or '',
] if x])
)
def format_description( self, property, value ):
"""Format description using GLU's gluErrorString"""
if value is None and self.err is not None:
try:
from OpenGL.GLU import gluErrorString
self.description = value = gluErrorString( self.err )
except Exception, err:
return None
if value is None:
return None
return '%s = %s'%(
property,
self.shortRepr( value ),
)
def shortRepr( self, value, firstLevel=True ):
"""Retrieve short representation of the given value"""
if isinstance( value, (list,tuple) ) and value and len(repr(value))>=40:
if isinstance( value, list ):
template = '[\n\t\t%s\n\t]'
else:
template = '(\n\t\t%s,\n\t)'
return template%( ",\n\t\t".join(
[
self.shortRepr(x,False) for x in value
]
))
r = repr( value )
if len(r) < 40:
return r
else:
return r[:37] + '...'
def format_baseOperation( self, property, value ):
"""Format a baseOperation reference for display"""
if hasattr( value, '__name__' ):
return '%s = %s'%( property, value.__name__ )
else:
return '%s = %r'%( property, value )
class GLUError( Error ):
"""GLU error implementation class"""
class GLUTError( Error ):
"""GLUT error implementation class"""
if _configflags.ERROR_CHECKING:
from OpenGL import acceleratesupport
_ErrorChecker = None
if acceleratesupport.ACCELERATE_AVAILABLE:
try:
from OpenGL_accelerate.errorchecker import _ErrorChecker
except ImportError, err:
log.warn( """OpenGL_accelerate seems to be installed, but unable to import error checking entry point!""" )
if _ErrorChecker is None:
class _ErrorChecker( object ):
"""Global error-checking object
Attributes:
_registeredChecker -- the checking function enabled when
not doing onBegin/onEnd processing
safeGetError -- platform safeGetError function as callable method
_currentChecker -- currently active checking function
"""
_currentChecker = _registeredChecker = safeGetError = staticmethod(
platform.safeGetError
)
def glCheckError(
self,
result,
baseOperation=None,
cArguments=None,
*args
):
"""Base GL Error checker compatible with new ctypes errcheck protocol
This function will raise a GLError with just the calling information
available at the C-calling level, i.e. the error code, cArguments,
baseOperation and result. Higher-level code is responsible for any
extra annotations.
Note:
glCheckError relies on glBegin/glEnd interactions to
prevent glGetError being called during a glBegin/glEnd
sequence. If you are calling glBegin/glEnd in C you
should call onBegin and onEnd appropriately.
"""
err = self._currentChecker()
if err: # GL_NO_ERROR's guaranteed value is 0
raise GLError(
err,
result,
cArguments = cArguments,
baseOperation = baseOperation,
)
return result
def nullGetError( self ):
"""Used as error-checker when inside begin/end set"""
return None
def onBegin( self ):
"""Called by glBegin to record the fact that glGetError won't work"""
self._currentChecker = self.nullGetError
def onEnd( self ):
"""Called by glEnd to record the fact that glGetError will work"""
self._currentChecker = self._registeredChecker
ErrorChecker = _ErrorChecker()
else:
ErrorChecker = _ErrorChecker( platform )
glCheckError = ErrorChecker.glCheckError
onBegin = ErrorChecker.onBegin
onEnd = ErrorChecker.onEnd
else:
glCheckError = platform.safeGetError
# Compatibility with PyOpenGL 2.x series
GLUerror = GLUError
GLerror = GLError
GLUTerror = GLUTError
|
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
#print "OAuth base string:" + str(sig)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
print raw
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key
|
|
# -*- coding: utf-8 -*-
'''
Module for handling openstack keystone calls.
:optdepends: - keystoneclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
OR (for token based authentication)
keystone.token: 'ADMIN'
keystone.endpoint: 'http://127.0.0.1:35357/v2.0'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the keystone functions can make use
of a configuration profile by declaring it explicitly.
For example::
salt '*' keystone.tenant_list profile=openstack1
'''
# Import third party libs
HAS_KEYSTONE = False
try:
from keystoneclient.v2_0 import client
import keystoneclient.exceptions
HAS_KEYSTONE = True
except ImportError:
pass
def __virtual__():
'''
Only load this module if keystone
is installed on this minion.
'''
if HAS_KEYSTONE:
return 'keystone'
return False
__opts__ = {}
def auth(profile=None, **connection_args):
'''
Set up keystone credentials
Only intended to be used within Keystone-enabled modules
'''
if profile:
user = __salt__['config.get']('{0}:keystone.user'.format(profile), 'admin')
password = __salt__['config.get']('{0}:keystone.password'.format(profile), 'ADMIN')
tenant = __salt__['config.get']('{0}:keystone.tenant'.format(profile), 'admin')
tenant_id = __salt__['config.get']('{0}:keystone.tenant_id'.format(profile))
auth_url = __salt__['config.get']('{0}:keystone.auth_url'.format(profile),
'http://127.0.0.1:35357/v2.0/')
insecure = __salt__['config.get']('{0}:keystone.insecure'.format(profile), False)
token = __salt__['config.get']('{0}:keystone.token'.format(profile))
endpoint = __salt__['config.get']('{0}:keystone.endpoint'.format(profile),
'http://127.0.0.1:35357/v2.0')
if connection_args:
user = connection_args.get('connection_user', 'admin')
password = connection_args.get('connection_pass', 'ADMIN')
tenant = connection_args.get('connection_tenant', 'admin')
tenant_id = connection_args.get('connection_tenant_id')
auth_url = connection_args.get('connection_auth_url',
'http://127.0.0.1:35357/v2.0/')
token = connection_args.get('connection_token')
insecure = connection_args.get('connection_insecure', False)
endpoint = connection_args.get('connection_endpoint',
'http://127.0.0.1:35357/v2.0/')
else:
user = __salt__['config.get']('keystone.user', 'admin')
password = __salt__['config.get']('keystone.password', 'ADMIN')
tenant = __salt__['config.get']('keystone.tenant', 'admin')
tenant_id = __salt__['config.get']('keystone.tenant_id')
auth_url = __salt__['config.get']('keystone.auth_url',
'http://127.0.0.1:35357/v2.0/')
insecure = __salt__['config.get']('keystone.insecure', False)
token = __salt__['config.get']('keystone.token')
endpoint = __salt__['config.get']('keystone.endpoint',
'http://127.0.0.1:35357/v2.0')
kwargs = {}
if token:
kwargs = {'token': token,
'endpoint': endpoint}
else:
kwargs = {'username': user,
'password': password,
'tenant_name': tenant,
'tenant_id': tenant_id,
'auth_url': auth_url}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs[insecure] = True
return client.Client(**kwargs)
def ec2_credentials_create(user_id=None, name=None,
tenant_id=None, tenant=None,
profile=None, **connection_args):
'''
Create EC2-compatibile credentials for user per tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_create name=admin tenant=admin
salt '*' keystone.ec2_credentials_create \
user_id=c965f79c4f864eaaa9c3b41904e67082 \
tenant_id=722787eb540849158668370dc627ec5f
'''
kstone = auth(profile, **connection_args)
if name:
user_id = user_get(name=name, profile=profile,
**connection_args)[name]['id']
if not user_id:
return {'Error': 'Could not resolve User ID'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
if not tenant_id:
return {'Error': 'Could not resolve Tenant ID'}
newec2 = kstone.ec2.create(user_id, tenant_id)
return {'access': newec2.access,
'secret': newec2.secret,
'tenant_id': newec2.tenant_id,
'user_id': newec2.user_id}
def ec2_credentials_delete(user_id=None, name=None, access_key=None,
profile=None, **connection_args):
'''
Delete EC2-compatibile credentials
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_delete \
860f8c2c38ca4fab989f9bc56a061a64
access_key=5f66d2f24f604b8bb9cd28886106f442
salt '*' keystone.ec2_credentials_delete name=admin \
access_key=5f66d2f24f604b8bb9cd28886106f442
'''
kstone = auth(profile, **connection_args)
if name:
user_id = user_get(name=name, profile=None, **connection_args)[name]['id']
if not user_id:
return {'Error': 'Could not resolve User ID'}
kstone.ec2.delete(user_id, access_key)
return 'ec2 key "{0}" deleted under user id "{1}"'.format(access_key,
user_id)
def ec2_credentials_get(user_id=None, name=None, access=None,
profile=None, **connection_args):
'''
Return ec2_credentials for a user (keystone ec2-credentials-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_get c965f79c4f864eaaa9c3b41904e67082 access=722787eb540849158668370dc627ec5f
salt '*' keystone.ec2_credentials_get user_id=c965f79c4f864eaaa9c3b41904e67082 access=722787eb540849158668370dc627ec5f
salt '*' keystone.ec2_credentials_get name=nova access=722787eb540849158668370dc627ec5f
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
if not access:
return {'Error': 'Access key is required'}
ec2_credentials = kstone.ec2.get(user_id=user_id, access=access,
profile=profile, **connection_args)
ret[ec2_credentials.user_id] = {'user_id': ec2_credentials.user_id,
'tenant': ec2_credentials.tenant_id,
'access': ec2_credentials.access,
'secret': ec2_credentials.secret}
return ret
def ec2_credentials_list(user_id=None, name=None, profile=None,
**connection_args):
'''
Return a list of ec2_credentials for a specific user (keystone ec2-credentials-list)
CLI Examples:
.. code-block:: bash
salt '*' keystone.ec2_credentials_list 298ce377245c4ec9b70e1c639c89e654
salt '*' keystone.ec2_credentials_list user_id=298ce377245c4ec9b70e1c639c89e654
salt '*' keystone.ec2_credentials_list name=jack
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
for ec2_credential in kstone.ec2.list(user_id):
ret[ec2_credential.user_id] = {'user_id': ec2_credential.user_id,
'tenant_id': ec2_credential.tenant_id,
'access': ec2_credential.access,
'secret': ec2_credential.secret}
return ret
def endpoint_get(service, profile=None, **connection_args):
'''
Return a specific endpoint (keystone endpoint-get)
CLI Example:
.. code-block:: bash
salt '*' keystone.endpoint_get ec2
'''
kstone = auth(profile, **connection_args)
return kstone.service_catalog.url_for(service_type=service)
def endpoint_list(profile=None, **connection_args):
'''
Return a list of available endpoints (keystone endpoints-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.endpoint_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for endpoint in kstone.endpoints.list():
ret[endpoint.id] = {'id': endpoint.id,
'region': endpoint.region,
'adminurl': endpoint.adminurl,
'internalurl': endpoint.internalurl,
'publicurl': endpoint.publicurl,
'service_id': endpoint.service_id}
return ret
def role_create(name, profile=None, **connection_args):
'''
Create named role
.. code-block:: bash
salt '*' keystone.role_create admin
'''
kstone = auth(profile, **connection_args)
if 'Error' not in role_get(name=name, profile=profile, **connection_args):
return {'Error': 'Role "{0}" already exists'.format(name)}
role = kstone.roles.create(name)
return role_get(name=name, profile=profile, **connection_args)
def role_delete(role_id=None, name=None, profile=None,
**connection_args):
'''
Delete a role (keystone role-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.role_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_delete role_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_delete name=admin
'''
kstone = auth(profile, **connection_args)
if name:
for role in kstone.roles.list():
if role.name == name:
role_id = role.id
break
if not role_id:
return {'Error': 'Unable to resolve role id'}
role = role_get(role_id, profile=profile, **connection_args)
kstone.roles.delete(role)
ret = 'Role ID {0} deleted'.format(role_id)
if name:
ret += ' ({0})'.format(name)
return ret
def role_get(role_id=None, name=None, profile=None, **connection_args):
'''
Return a specific roles (keystone role-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.role_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_get role_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.role_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for role in kstone.roles.list():
if role.name == name:
role_id = role.id
break
if not role_id:
return {'Error': 'Unable to resolve role id'}
role = kstone.roles.get(role_id)
ret[role.name] = {'id': role.id,
'name': role.name}
return ret
def role_list(profile=None, **connection_args):
'''
Return a list of available roles (keystone role-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.role_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for role in kstone.roles.list():
ret[role.name] = {'id': role.id,
'name': role.name}
return ret
def service_create(name, service_type, description=None, profile=None,
**connection_args):
'''
Add service to Keystone service catalog
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_create nova compute \
'OpenStack Compute Service'
'''
kstone = auth(profile, **connection_args)
service = kstone.services.create(name, service_type, description)
return service_get(service.id, profile=profile, **connection_args)
def service_delete(service_id=None, name=None, profile=None, **connection_args):
'''
Delete a service from Keystone service catalog
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_delete name=nova
'''
kstone = auth(profile)
if name:
service_id = service_get(name=name, profile=profile,
**connection_args)[name]['id']
service = kstone.services.delete(service_id)
return 'Keystone service ID "{0}" deleted'.format(service_id)
def service_get(service_id=None, name=None, profile=None, **connection_args):
'''
Return a specific services (keystone service-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.service_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_get service_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for service in kstone.services.list():
if service.name == name:
service_id = service.id
break
if not service_id:
return {'Error': 'Unable to resolve service id'}
service = kstone.services.get(service_id)
ret[service.name] = {'id': service.id,
'name': service.name,
'type': service.type,
'description': service.description}
return ret
def service_list(profile=None, **connection_args):
'''
Return a list of available services (keystone services-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.service_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for service in kstone.services.list():
ret[service.name] = {'id': service.id,
'name': service.name,
'description': service.description,
'type': service.type}
return ret
def tenant_create(name, description=None, enabled=True, profile=None,
**connection_args):
'''
Create a keystone tenant
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_create nova description='nova tenant'
salt '*' keystone.tenant_create test enabled=False
'''
kstone = auth(profile, **connection_args)
new = kstone.tenants.create(name, description, enabled)
return tenant_get(new.id, profile=profile, **connection_args)
def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args):
'''
Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo
'''
kstone = auth(profile, **connection_args)
if name:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
kstone.tenants.delete(tenant_id)
ret = 'Tenant ID {0} deleted'.format(tenant_id)
if name:
ret += ' ({0})'.format(name)
return ret
def tenant_get(tenant_id=None, name=None, profile=None,
**connection_args):
'''
Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = kstone.tenants.get(tenant_id)
ret[tenant.name] = {'id': tenant.id,
'name': tenant.name,
'description': tenant.description,
'enabled': tenant.enabled}
return ret
def tenant_list(profile=None, **connection_args):
'''
Return a list of available tenants (keystone tenants-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.tenant_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for tenant in kstone.tenants.list():
ret[tenant.name] = {'id': tenant.id,
'name': tenant.name,
'description': tenant.description,
'enabled': tenant.enabled}
return ret
def tenant_update(tenant_id=None, name=None, email=None,
enabled=None, profile=None, **connection_args):
'''
Update a tenant's information (keystone tenant-update)
The following fields may be updated: name, email, enabled.
Can only update name if targeting by ID
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_update name=admin enabled=True
salt '*' keystone.tenant_update c965f79c4f864eaaa9c3b41904e67082 name=admin [email protected]
'''
kstone = auth(profile, **connection_args)
if not tenant_id:
for tenant in kstone.tenants.list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = kstone.tenants.get(tenant_id)
if not name:
name = tenant.name
if not email:
email = tenant.email
if enabled is None:
enabled = tenant.enabled
kstone.tenants.update(tenant_id, name, email, enabled)
def token_get(profile=None, **connection_args):
'''
Return the configured tokens (keystone token-get)
CLI Example:
.. code-block:: bash
salt '*' keystone.token_get c965f79c4f864eaaa9c3b41904e67082
'''
kstone = auth(profile, **connection_args)
token = kstone.service_catalog.get_token()
return {'id': token['id'],
'expires': token['expires'],
'user_id': token['user_id'],
'tenant_id': token['tenant_id']}
def user_list(profile=None, **connection_args):
'''
Return a list of available users (keystone user-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.user_list
'''
kstone = auth(profile, **connection_args)
ret = {}
for user in kstone.users.list():
ret[user.name] = {'id': user.id,
'name': user.name,
'email': user.email,
'enabled': user.enabled,
'tenant_id': user.tenantId}
return ret
def user_get(user_id=None, name=None, profile=None, **connection_args):
'''
Return a specific users (keystone user-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_get user_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
user = kstone.users.get(user_id)
ret[user.name] = {'id': user.id,
'name': user.name,
'email': user.email,
'enabled': user.enabled,
'tenant_id': user.tenantId}
return ret
def user_create(name, password, email, tenant_id=None,
enabled=True, profile=None, **connection_args):
'''
Create a user (keystone user-create)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_create name=jack password=zero [email protected] tenant_id=a28a7b5a999a455f84b1f5210264375e enabled=True
'''
kstone = auth(profile, **connection_args)
item = kstone.users.create(name=name,
password=password,
email=email,
tenant_id=tenant_id,
enabled=enabled)
return user_get(item.id, profile=profile, **connection_args)
def user_delete(user_id=None, name=None, profile=None):
'''
Delete a user (keystone user-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_delete user_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.user_delete name=nova
'''
kstone = auth(profile, **connection_args)
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
kstone.users.delete(user_id)
ret = 'User ID {0} deleted'.format(user_id)
if name:
ret += ' ({0})'.format(name)
return ret
def user_update(user_id=None, name=None, email=None, enabled=None,
tenant=None, profile=None, **connection_args):
'''
Update a user's information (keystone user-update)
The following fields may be updated: name, email, enabled, tenant.
Because the name is one of the fields, a valid user id is required.
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_update user_id=c965f79c4f864eaaa9c3b41904e67082 name=newname
salt '*' keystone.user_update c965f79c4f864eaaa9c3b41904e67082 name=newname [email protected]
'''
kstone = auth(profile, **connection_args)
if not user_id:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
user = kstone.users.get(user_id)
# Keep previous settings if not updating them
if not name:
name = user.name
if not email:
email = user.email
if enabled is None:
enabled = user.enabled
kstone.users.update(user=user_id, name=name, email=email, enabled=enabled)
if tenant:
for t in kstone.tenants.list():
if t.name == tenant:
tenant_id = t.id
break
kstone.users.update_tenant(user_id, tenant_id)
ret = 'Info updated for user ID {0}'.format(user_id)
return ret
def user_verify_password(user_id=None, name=None, password=None,
profile=None, **connection_args):
'''
Verify a user's password
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_verify_password name=test password=foobar
salt '*' keystone.user_verify_password user_id=c965f79c4f864eaaa9c3b41904e67082 password=foobar
'''
kstone = auth(profile, **connection_args)
if 'connection_endpoint' in connection_args:
auth_url = connection_args.get('connection_endpoint')
else:
auth_url = __salt__['config.option']('keystone.endpoint',
'http://127.0.0.1:35357/v2.0')
if user_id:
for user in kstone.users.list():
if user.id == user_id:
name = user.name
break
if not name:
return {'Error': 'Unable to resolve user name'}
kwargs = {'username': name,
'password': password,
'auth_url': auth_url}
try:
userauth = client.Client(**kwargs)
except keystoneclient.exceptions.Unauthorized:
return False
return True
def user_password_update(user_id=None, name=None, password=None,
profile=None, **connection_args):
'''
Update a user's password (keystone user-password-update)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_delete c965f79c4f864eaaa9c3b41904e67082 password=12345
salt '*' keystone.user_delete user_id=c965f79c4f864eaaa9c3b41904e67082 password=12345
salt '*' keystone.user_delete name=nova password=12345
'''
kstone = auth(profile, **connection_args)
if name:
for user in kstone.users.list():
if user.name == name:
user_id = user.id
break
if not user_id:
return {'Error': 'Unable to resolve user id'}
kstone.users.update_password(user=user_id, password=password)
ret = 'Password updated for user ID {0}'.format(user_id)
if name:
ret += ' ({0})'.format(name)
return ret
def user_role_add(user_id=None, user=None, tenant_id=None,
tenant=None, role_id=None, role=None, profile=None,
**connection_args):
'''
Add role for user in tenant (keystone user-role-add)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_add \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b \
role_id=ce377245c4ec9b70e1c639c89e8cead4
salt '*' keystone.user_role_add user=admin tenant=admin role=admin
'''
kstone = auth(profile, **connection_args)
if user:
user_id = user_get(name=user, profile=profile,
**connection_args)[user]['id']
else:
user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name']
if not user_id:
return {'Error': 'Unable to resolve user id'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
else:
tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name']
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
if role:
role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id']
else:
role = role_get(role_id, profile=profile,
**connection_args).keys()[0]['name']
if not role_id:
return {'Error': 'Unable to resolve role id'}
kstone.roles.add_user_role(user_id, role_id, tenant_id)
ret_msg = '"{0}" role added for user "{1}" for "{2}" tenant'
return ret_msg.format(role, user, tenant)
def user_role_remove(user_id=None, user=None, tenant_id=None,
tenant=None, role_id=None, role=None,
profile=None, **connection_args):
'''
Remove role for user in tenant (keystone user-role-remove)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_remove \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b \
role_id=ce377245c4ec9b70e1c639c89e8cead4
salt '*' keystone.user_role_remove user=admin tenant=admin role=admin
'''
kstone = auth(profile, **connection_args)
if user:
user_id = user_get(name=user, profile=profile,
**connection_args)[user]['id']
else:
user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name']
if not user_id:
return {'Error': 'Unable to resolve user id'}
if tenant:
tenant_id = tenant_get(name=tenant, profile=profile,
**connection_args)[tenant]['id']
else:
tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name']
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
if role:
role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id']
else:
role = role_get(role_id).keys()[0]['name']
if not role_id:
return {'Error': 'Unable to resolve role id'}
kstone.roles.remove_user_role(user_id, role_id, tenant_id)
ret_msg = '"{0}" role removed for user "{1}" under "{2}" tenant'
return ret_msg.format(role, user, tenant)
def user_role_list(user_id=None, tenant_id=None, user_name=None,
tenant_name=None, profile=None, **connection_args):
'''
Return a list of available user_roles (keystone user-roles-list)
CLI Examples:
.. code-block:: bash
salt '*' keystone.user_role_list \
user_id=298ce377245c4ec9b70e1c639c89e654 \
tenant_id=7167a092ece84bae8cead4bf9d15bb3b
salt '*' keystone.user_role_list user_name=admin tenant_name=admin
'''
kstone = auth(profile, **connection_args)
ret = {}
if user_name:
for user in kstone.users.list():
if user.name == user_name:
user_id = user.id
break
if tenant_name:
for tenant in kstone.tenants.list():
if tenant.name == tenant_name:
tenant_id = tenant.id
break
if not user_id or not tenant_id:
return {'Error': 'Unable to resolve user or tenant id'}
for role in kstone.roles.roles_for_user(user=user_id, tenant=tenant_id):
ret[role.name] = {'id': role.id,
'name': role.name,
'user_id': user_id,
'tenant_id': tenant_id}
return ret
def _item_list(profile=None, **connection_args):
'''
Template for writing list functions
Return a list of available items (keystone items-list)
CLI Example:
.. code-block:: bash
salt '*' keystone.item_list
'''
kstone = auth(profile, **connection_args)
ret = []
for item in kstone.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'id': item.id,
# 'name': item.name,
# }
return ret
#The following is a list of functions that need to be incorporated in the
#keystone module. This list should be updated as functions are added.
#
#endpoint-create Create a new endpoint associated with a service
#endpoint-delete Delete a service endpoint
#discover Discover Keystone servers and show authentication
# protocols and
#bootstrap Grants a new role to a new user on a new tenant, after
# creating each.
|
|
from __future__ import print_function, division
import numpy as np
import pandas as pd
from os.path import join
import nilmtk
from nilmtk.disaggregate import CombinatorialOptimisation, fhmm_exact
from neuralnilm.metrics import run_metrics, across_all_appliances
import yaml
import matplotlib.pyplot as plt
ukdale = nilmtk.DataSet('/data/mine/vadeec/merged/ukdale.h5')
#ukdale.set_window("2013-04-12", "2014-12-10")
ukdale.set_window("2013-04-12", "2013-05-12")
elec = ukdale.buildings[1].elec
BASE_DIRECTORY = '/home/jack/experiments/neuralnilm/figures/'
PLOT = True
EXPERIMENT_DIRECTORIES = {
'fridge freezer': 'e544a',
'washer dryer': 'e545a',
'kettle': 'e545b',
'HTPC': 'e545c',
'dish washer': 'e545d'
}
APPLIANCES = [
'fridge freezer',
'washer dryer',
'kettle',
'HTPC',
'dish washer'
]
meters = []
for appliance in APPLIANCES:
meter = elec[appliance]
meters.append(meter)
meters = nilmtk.MeterGroup(meters)
scores = {}
MAINS_APPLIANCES = ['washer dryer', 'HTPC', 'dish washer']
mains = np.load(join(BASE_DIRECTORY, 'e545a/mains.npy'))
mains = pd.DataFrame(mains)
fridge_mains = np.load(join(BASE_DIRECTORY, 'e544a/mains.npy'))
fridge_mains = pd.DataFrame(fridge_mains)
kettle_mains = np.load(join(BASE_DIRECTORY, 'e545b/mains.npy'))
kettle_mains = pd.DataFrame(kettle_mains)
def co_disag():
# TRAIN
disag = CombinatorialOptimisation()
disag.train(meters)
# TEST
appliance_powers = disag.disaggregate_chunk(mains)
for i, df in appliance_powers.iteritems():
appliance = disag.model[i]['training_metadata'].dominant_appliance()
appliance_type = appliance.identifier.type
y_pred = df.values
if appliance_type in MAINS_APPLIANCES:
np.save(join(BASE_DIRECTORY, 'CO_' + appliance_type), y_pred)
appliance_powers = disag.disaggregate_chunk(fridge_mains)
for i, df in appliance_powers.iteritems():
appliance = disag.model[i]['training_metadata'].dominant_appliance()
appliance_type = appliance.identifier.type
y_pred = df.values
if appliance_type in ['fridge freezer']:
np.save(join(BASE_DIRECTORY, 'CO_' + appliance_type), y_pred)
appliance_powers = disag.disaggregate_chunk(kettle_mains)
for i, df in appliance_powers.iteritems():
appliance = disag.model[i]['training_metadata'].dominant_appliance()
appliance_type = appliance.identifier.type
y_pred = df.values
if appliance_type in ['kettle']:
np.save(join(BASE_DIRECTORY, 'CO_' + appliance_type), y_pred)
def co_metrics():
scores['CO'] = {}
aggregate_predictions = None
for appliance_type in APPLIANCES:
y_pred = np.load(join(BASE_DIRECTORY, 'CO_' + appliance_type + '.npy'))
y_true_fname = join(
BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance_type],
'targets.npy')
y_true = np.load(y_true_fname)
n = min(len(y_true), len(y_pred))
y_true = y_true[:n]
y_pred = y_pred[:n]
if appliance_type in ['fridge freezer']:
m = fridge_mains
elif appliance_type in ['kettle']:
m = kettle_mains
else:
m = mains
if PLOT:
fig, axes = plt.subplots(nrows=2, sharex=True)
axes[0].plot(y_true[:20000], label='y_true')
axes[0].plot(y_pred[:20000], label='y_pred')
axes[0].set_title(appliance_type)
axes[0].legend()
axes[1].plot(m.values[:20000], label='mains')
axes[1].set_title('Mains')
plt.show(block=True)
scores['CO'][appliance_type] = run_metrics(
y_true, y_pred, m.values)
if aggregate_predictions is None:
aggregate_predictions = y_pred
else:
n_agg = min(len(aggregate_predictions), len(y_pred))
aggregate_predictions = aggregate_predictions[:n_agg]
aggregate_predictions += y_pred[:n_agg]
scores['CO'] = across_all_appliances(
scores['CO'], mains, aggregate_predictions)
def fhmm_disag():
# TRAIN
disag = fhmm_exact.FHMM()
disag.train(meters)
# TEST
appliance_powers = disag.disaggregate_chunk(mains)
for meter, df in appliance_powers.iteritems():
appliance = meter.dominant_appliance()
appliance_type = appliance.identifier.type
if appliance_type in MAINS_APPLIANCES:
y_pred = df.values
np.save(join(BASE_DIRECTORY, 'FHMM_' + appliance_type), y_pred)
appliance_powers = disag.disaggregate_chunk(fridge_mains)
for meter, df in appliance_powers.iteritems():
appliance = meter.dominant_appliance()
appliance_type = appliance.identifier.type
if appliance_type in ['fridge freezer']:
y_pred = df.values
np.save(join(BASE_DIRECTORY, 'FHMM_' + appliance_type), y_pred)
appliance_powers = disag.disaggregate_chunk(kettle_mains)
for meter, df in appliance_powers.iteritems():
appliance = meter.dominant_appliance()
appliance_type = appliance.identifier.type
if appliance_type in ['kettle']:
y_pred = df.values
np.save(join(BASE_DIRECTORY, 'FHMM_' + appliance_type), y_pred)
def fhmm_metrics():
# METRICS
scores['FHMM'] = {}
aggregate_predictions = None
for appliance_type in APPLIANCES:
y_pred = np.load(
join(BASE_DIRECTORY, 'FHMM_' + appliance_type + '.npy'))
y_true_fname = join(
BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance_type],
'targets.npy')
y_true = np.load(y_true_fname)
n = min(len(y_true), len(y_pred))
y_true = y_true[:n]
y_pred = y_pred[:n]
if appliance_type in ['fridge freezer']:
m = fridge_mains
elif appliance_type in ['kettle']:
m = kettle_mains
else:
m = mains
if PLOT:
fig, axes = plt.subplots(nrows=2, sharex=True)
axes[0].plot(y_true[:20000], label='y_true')
axes[0].plot(y_pred[:20000], label='y_pred')
axes[0].set_title(appliance_type)
axes[0].legend()
axes[1].plot(m.values[:20000], label='mains')
axes[1].set_title('Mains')
plt.show(block=True)
scores['FHMM'][appliance_type] = run_metrics(
y_true, y_pred, m.values)
if aggregate_predictions is None:
aggregate_predictions = y_pred
else:
n_agg = min(len(aggregate_predictions), len(y_pred))
aggregate_predictions = aggregate_predictions[:n_agg]
aggregate_predictions += y_pred[:n_agg]
scores['FHMM'] = across_all_appliances(
scores['FHMM'], mains, aggregate_predictions)
def always_off():
# METRICS
scores['Always off'] = {}
aggregate_predictions = None
for appliance in APPLIANCES:
y_true_fname = join(
BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance],
'targets.npy')
y_true = np.load(y_true_fname)
n = len(y_true)
y_pred = np.zeros(n)
scores['Always off'][appliance] = run_metrics(
y_true, y_pred, mains.values)
if aggregate_predictions is None:
aggregate_predictions = y_pred
else:
n_agg = min(len(aggregate_predictions), len(y_pred))
aggregate_predictions = aggregate_predictions[:n_agg]
aggregate_predictions += y_pred[:n_agg]
scores['Always off'] = across_all_appliances(
scores['Always off'], mains, aggregate_predictions)
def mean():
# METRICS
ALGO = 'Mean'
scores[ALGO] = {}
aggregate_predictions = None
for appliance in APPLIANCES:
y_true_fname = join(
BASE_DIRECTORY, EXPERIMENT_DIRECTORIES[appliance],
'targets.npy')
y_true = np.load(y_true_fname)
n = len(y_true)
y_pred = np.zeros(n) + y_true.mean()
print(appliance, y_true.mean())
scores[ALGO][appliance] = run_metrics(
y_true, y_pred, mains.values)
if aggregate_predictions is None:
aggregate_predictions = y_pred
else:
n_agg = min(len(aggregate_predictions), len(y_pred))
aggregate_predictions = aggregate_predictions[:n_agg]
aggregate_predictions += y_pred[:n_agg]
scores[ALGO] = across_all_appliances(
scores[ALGO], mains, aggregate_predictions)
# LOAD YAML IF NECESSARY
with open(join(BASE_DIRECTORY, 'benchmark_scores.yaml'), 'r') as fh:
scores = yaml.load(fh)
# Run
# co_disag()
fhmm_disag()
# co_metrics()
fhmm_metrics()
always_off()
mean()
print()
print(yaml.dump(scores, default_flow_style=False))
metrics_filename = join(BASE_DIRECTORY, 'benchmark_scores.yaml')
print("Saving to", metrics_filename)
with open(metrics_filename, 'w') as fh:
yaml.dump(scores, stream=fh, default_flow_style=False)
|
|
import json
import re
from collections import defaultdict
from datetime import datetime
import yara
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_jsonfield_backport.models import JSONField
import olympia.core.logger
from olympia.amo.models import ModelBase
from olympia.constants.scanners import (
ABORTED,
ABORTING,
ACTIONS,
COMPLETED,
CUSTOMS,
DELAY_AUTO_APPROVAL,
DELAY_AUTO_APPROVAL_INDEFINITELY,
FLAG_FOR_HUMAN_REVIEW,
QUERY_RULE_STATES,
MAD,
NEW,
NO_ACTION,
RESULT_STATES,
RUNNING,
SCANNERS,
SCHEDULED,
UNKNOWN,
WAT,
YARA,
)
from olympia.files.models import FileUpload
from olympia.scanners.actions import (
_delay_auto_approval,
_delay_auto_approval_indefinitely,
_flag_for_human_review,
_flag_for_human_review_by_scanner,
_no_action,
)
log = olympia.core.logger.getLogger('z.scanners.models')
class AbstractScannerResult(ModelBase):
# Store the "raw" results of a scanner.
results = JSONField(default=list)
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
has_matches = models.NullBooleanField()
state = models.PositiveSmallIntegerField(
choices=RESULT_STATES.items(), null=True, blank=True, default=UNKNOWN
)
version = models.ForeignKey(
'versions.Version',
related_name="%(class)ss",
on_delete=models.CASCADE,
null=True,
)
class Meta(ModelBase.Meta):
abstract = True
indexes = [
models.Index(fields=('has_matches',)),
models.Index(fields=('state',)),
]
def add_yara_result(self, rule, tags=None, meta=None):
"""This method is used to store a Yara result."""
self.results.append(
{'rule': rule, 'tags': tags or [], 'meta': meta or {}}
)
def extract_rule_names(self):
"""This method parses the raw results and returns the (matched) rule
names. Not all scanners have rules that necessarily match."""
if self.scanner == YARA:
return sorted({result['rule'] for result in self.results})
if self.scanner == CUSTOMS and 'matchedRules' in self.results:
return self.results['matchedRules']
# We do not have support for the remaining scanners (yet).
return []
def save(self, *args, **kwargs):
rule_model = self._meta.get_field('matched_rules').related_model
matched_rules = rule_model.objects.filter(
scanner=self.scanner,
name__in=self.extract_rule_names(),
# See: https://github.com/mozilla/addons-server/issues/13143
is_active=True,
)
self.has_matches = bool(matched_rules)
# Save the instance first...
super().save(*args, **kwargs)
# ...then add the associated rules.
for scanner_rule in matched_rules:
self.matched_rules.add(scanner_rule)
def get_scanner_name(self):
return SCANNERS.get(self.scanner)
def get_pretty_results(self):
return json.dumps(self.results, indent=2)
def get_files_by_matched_rules(self):
res = defaultdict(list)
if self.scanner is YARA:
for item in self.results:
res[item['rule']].append(item['meta'].get('filename', '???'))
elif self.scanner is CUSTOMS:
scanMap = self.results.get('scanMap', {})
for filename, rules in scanMap.items():
for ruleId, data in rules.items():
if data.get('RULE_HAS_MATCHED', False):
res[ruleId].append(filename)
return res
def can_report_feedback(self):
return self.state == UNKNOWN and self.scanner not in [WAT, MAD]
def can_revert_feedback(self):
return self.state != UNKNOWN and self.scanner not in [WAT, MAD]
def get_git_repository(self):
return {
CUSTOMS: settings.CUSTOMS_GIT_REPOSITORY,
YARA: settings.YARA_GIT_REPOSITORY,
}.get(self.scanner)
@classmethod
def run_action(cls, version):
"""Try to find and execute an action for a given version, based on the
scanner results and associated rules.
If an action is found, it is run synchronously from this method, not in
a task.
"""
log.info('Checking rules and actions for version %s.', version.pk)
try:
mad_result = cls.objects.filter(version=version, scanner=MAD).get()
customs = mad_result.results.get('scanners', {}).get('customs', {})
customs_score = customs.get('score', 0.5)
customs_models_agree = customs.get('result_details', {}).get(
'models_agree', True
)
if (
customs_score <= 0.01 or
customs_score >= 0.99 or
not customs_models_agree
):
log.info('Flagging version %s for human review by MAD.',
version.pk)
_flag_for_human_review_by_scanner(version, MAD)
except cls.DoesNotExist:
log.info('No MAD scanner result for version %s.', version.pk)
pass
rule_model = cls.matched_rules.rel.model
result_query_name = cls._meta.get_field(
'matched_rules'
).related_query_name()
rule = (
rule_model.objects.filter(
**{f'{result_query_name}__version': version, 'is_active': True}
)
.order_by(
# The `-` sign means descending order.
'-action'
)
.first()
)
if not rule:
log.info('No action to execute for version %s.', version.pk)
return
action_id = rule.action
action_name = ACTIONS.get(action_id, None)
if not action_name:
raise Exception("invalid action %s" % action_id)
ACTION_FUNCTIONS = {
NO_ACTION: _no_action,
FLAG_FOR_HUMAN_REVIEW: _flag_for_human_review,
DELAY_AUTO_APPROVAL: _delay_auto_approval,
DELAY_AUTO_APPROVAL_INDEFINITELY: (
_delay_auto_approval_indefinitely
),
}
action_function = ACTION_FUNCTIONS.get(action_id, None)
if not action_function:
raise Exception("no implementation for action %s" % action_id)
# We have a valid action to execute, so let's do it!
log.info(
'Starting action "%s" for version %s.', action_name, version.pk
)
action_function(version)
log.info('Ending action "%s" for version %s.', action_name, version.pk)
class AbstractScannerRule(ModelBase):
name = models.CharField(
max_length=200,
help_text=_('This is the exact name of the rule used by a scanner.'),
)
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
action = models.PositiveSmallIntegerField(
choices=ACTIONS.items(), default=NO_ACTION
)
is_active = models.BooleanField(
default=True,
help_text=_(
'When unchecked, the scanner results will not be bound to this '
'rule and the action will not be executed.'
),
)
definition = models.TextField(null=True, blank=True)
class Meta(ModelBase.Meta):
abstract = True
unique_together = ('name', 'scanner')
@classmethod
def get_yara_externals(cls):
"""
Return a dict with the various external variables we inject in every
yara rule automatically and their default values.
"""
return {
'is_json_file': False,
'is_manifest_file': False,
'is_locale_file': False,
}
def __str__(self):
return self.name
def clean(self):
if self.scanner == YARA:
self.clean_yara()
def clean_yara(self):
if not self.definition:
raise ValidationError(
{'definition': _('Yara rules should have a definition')}
)
if 'rule {}'.format(self.name) not in self.definition:
raise ValidationError(
{
'definition': _(
'The name of the rule in the definition should match '
'the name of the scanner rule'
)
}
)
if len(re.findall(r'rule\s+.+?\s+{', self.definition)) > 1:
raise ValidationError(
{
'definition': _(
'Only one Yara rule is allowed in the definition'
)
}
)
try:
yara.compile(
source=self.definition, externals=self.get_yara_externals()
)
except yara.SyntaxError as syntaxError:
raise ValidationError(
{
'definition': _('The definition is not valid: %(error)s')
% {'error': syntaxError}
}
)
except Exception:
raise ValidationError(
{
'definition': _(
'An error occurred when compiling the definition'
)
}
)
class ScannerRule(AbstractScannerRule):
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_rules'
class ScannerResult(AbstractScannerResult):
upload = models.ForeignKey(
FileUpload,
related_name="%(class)ss", # scannerresults
on_delete=models.SET_NULL,
null=True,
)
matched_rules = models.ManyToManyField(
'ScannerRule', through='ScannerMatch', related_name='results'
)
# The value is a decimal between 0 and 1. `-1` is a special value to
# indicate an error or no score available.
score = models.DecimalField(
null=True, blank=True, max_digits=6, decimal_places=5, default=-1
)
model_version = models.CharField(max_length=30, null=True)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_results'
constraints = [
models.UniqueConstraint(
fields=('upload', 'scanner', 'version'),
name='scanners_results_upload_id_scanner_'
'version_id_ad9eb8a6_uniq',
)
]
class ScannerMatch(ModelBase):
result = models.ForeignKey(ScannerResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerRule, on_delete=models.CASCADE)
class ImproperScannerQueryRuleStateError(ValueError):
pass
class ScannerQueryRule(AbstractScannerRule):
scanner = models.PositiveSmallIntegerField(
choices=((YARA, 'yara'),), # For now code search only allows yara.
default=YARA,
)
state = models.PositiveSmallIntegerField(
choices=QUERY_RULE_STATES.items(), default=NEW
)
run_on_disabled_addons = models.BooleanField(
default=False,
help_text=_(
'Run this rule on add-ons that have been '
'force-disabled as well.'
),
)
celery_group_result_id = models.UUIDField(default=None, null=True)
task_count = models.PositiveIntegerField(default=0)
completed = models.DateTimeField(default=None, null=True, blank=True)
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_query_rules'
def change_state_to(self, target):
"""Immediately change state of the rule in database or raise
ImproperScannerQueryRuleStateError."""
prereqs = {
# New is the default state.
NEW: (),
# Scheduled should only happen through the admin. It's the
# prerequisite to running the task.
SCHEDULED: (NEW,),
# Running should only happen through the task, after we went
# through the admin to schedule the query.
RUNNING: (SCHEDULED,),
# Aborting can happen from various states.
ABORTING: (NEW, SCHEDULED, RUNNING),
# Aborted should only happen after aborting.
ABORTED: (ABORTING,),
# Completed should only happen through the task
COMPLETED: (RUNNING,),
}
if self.state in prereqs[target]:
props = {
'state': target,
}
if target == COMPLETED:
props['completed'] = datetime.now()
self.update(**props)
else:
raise ImproperScannerQueryRuleStateError()
def _get_completed_tasks_count(self):
if self.celery_group_result_id is not None:
from olympia.amo.celery import app as celery_app
result = celery_app.GroupResult.restore(
str(self.celery_group_result_id)
)
if result:
return result.completed_count()
return None
def completion_rate(self):
if self.state == RUNNING:
completed_tasks_count = self._get_completed_tasks_count()
if completed_tasks_count is not None and self.task_count:
rate = (completed_tasks_count / self.task_count) * 100
return '{:.2f}%'.format(rate)
return None
class ScannerQueryResult(AbstractScannerResult):
# Has to be overridden, because the parent refers to ScannerMatch.
matched_rules = models.ManyToManyField(
'ScannerQueryRule', through='ScannerQueryMatch', related_name='results'
)
was_blocked = models.NullBooleanField(default=None)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_query_results'
# FIXME indexes, unique constraints ?
class ScannerQueryMatch(ModelBase):
result = models.ForeignKey(ScannerQueryResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerQueryRule, on_delete=models.CASCADE)
|
|
"""The Tasmota integration."""
from __future__ import annotations
import asyncio
import logging
from hatasmota.const import (
CONF_IP,
CONF_MAC,
CONF_MANUFACTURER,
CONF_MODEL,
CONF_NAME,
CONF_SW_VERSION,
)
from hatasmota.discovery import clear_discovery_topic
from hatasmota.models import TasmotaDeviceConfig
from hatasmota.mqtt import TasmotaMQTTClient
import voluptuous as vol
from homeassistant.components import mqtt, websocket_api
from homeassistant.components.mqtt.subscription import (
async_subscribe_topics,
async_unsubscribe_topics,
)
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.helpers.device_registry import (
CONNECTION_NETWORK_MAC,
EVENT_DEVICE_REGISTRY_UPDATED,
DeviceRegistry,
async_entries_for_config_entry,
)
from . import device_automation, discovery
from .const import (
CONF_DISCOVERY_PREFIX,
DATA_REMOVE_DISCOVER_COMPONENT,
DATA_UNSUB,
DOMAIN,
PLATFORMS,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Tasmota from a config entry."""
websocket_api.async_register_command(hass, websocket_remove_device)
hass.data[DATA_UNSUB] = []
def _publish(
topic: str,
payload: mqtt.PublishPayloadType,
qos: int | None = None,
retain: bool | None = None,
) -> None:
mqtt.async_publish(hass, topic, payload, qos, retain)
async def _subscribe_topics(sub_state: dict | None, topics: dict) -> dict:
# Optionally mark message handlers as callback
for topic in topics.values():
if "msg_callback" in topic and "event_loop_safe" in topic:
topic["msg_callback"] = callback(topic["msg_callback"])
return await async_subscribe_topics(hass, sub_state, topics)
async def _unsubscribe_topics(sub_state: dict | None) -> dict:
return await async_unsubscribe_topics(hass, sub_state)
tasmota_mqtt = TasmotaMQTTClient(_publish, _subscribe_topics, _unsubscribe_topics)
device_registry = await hass.helpers.device_registry.async_get_registry()
def async_discover_device(config: TasmotaDeviceConfig, mac: str) -> None:
"""Discover and add a Tasmota device."""
async_setup_device(hass, mac, config, entry, tasmota_mqtt, device_registry)
async def async_device_removed(event: Event) -> None:
"""Handle the removal of a device."""
device_registry = await hass.helpers.device_registry.async_get_registry()
if event.data["action"] != "remove":
return
device = device_registry.deleted_devices[event.data["device_id"]]
if entry.entry_id not in device.config_entries:
return
macs = [c[1] for c in device.connections if c[0] == CONNECTION_NETWORK_MAC]
for mac in macs:
clear_discovery_topic(mac, entry.data[CONF_DISCOVERY_PREFIX], tasmota_mqtt)
hass.data[DATA_UNSUB].append(
hass.bus.async_listen(EVENT_DEVICE_REGISTRY_UPDATED, async_device_removed)
)
async def start_platforms() -> None:
await device_automation.async_setup_entry(hass, entry)
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
discovery_prefix = entry.data[CONF_DISCOVERY_PREFIX]
await discovery.async_start(
hass, discovery_prefix, entry, tasmota_mqtt, async_discover_device
)
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
# cleanup platforms
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if not unload_ok:
return False
# disable discovery
await discovery.async_stop(hass)
# cleanup subscriptions
for unsub in hass.data[DATA_UNSUB]:
unsub()
hass.data.pop(DATA_REMOVE_DISCOVER_COMPONENT.format("device_automation"))()
for platform in PLATFORMS:
hass.data.pop(DATA_REMOVE_DISCOVER_COMPONENT.format(platform))()
# deattach device triggers
device_registry = await hass.helpers.device_registry.async_get_registry()
devices = async_entries_for_config_entry(device_registry, entry.entry_id)
for device in devices:
await device_automation.async_remove_automations(hass, device.id)
return True
def _remove_device(
hass: HomeAssistant,
config_entry: ConfigEntry,
mac: str,
tasmota_mqtt: TasmotaMQTTClient,
device_registry: DeviceRegistry,
) -> None:
"""Remove device from device registry."""
device = device_registry.async_get_device(set(), {(CONNECTION_NETWORK_MAC, mac)})
if device is None:
return
_LOGGER.debug("Removing tasmota device %s", mac)
device_registry.async_remove_device(device.id)
clear_discovery_topic(mac, config_entry.data[CONF_DISCOVERY_PREFIX], tasmota_mqtt)
def _update_device(
hass: HomeAssistant,
config_entry: ConfigEntry,
config: TasmotaDeviceConfig,
device_registry: DeviceRegistry,
) -> None:
"""Add or update device registry."""
_LOGGER.debug("Adding or updating tasmota device %s", config[CONF_MAC])
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
configuration_url=f"http://{config[CONF_IP]}/",
connections={(CONNECTION_NETWORK_MAC, config[CONF_MAC])},
manufacturer=config[CONF_MANUFACTURER],
model=config[CONF_MODEL],
name=config[CONF_NAME],
sw_version=config[CONF_SW_VERSION],
)
def async_setup_device(
hass: HomeAssistant,
mac: str,
config: TasmotaDeviceConfig,
config_entry: ConfigEntry,
tasmota_mqtt: TasmotaMQTTClient,
device_registry: DeviceRegistry,
) -> None:
"""Set up the Tasmota device."""
if not config:
_remove_device(hass, config_entry, mac, tasmota_mqtt, device_registry)
else:
_update_device(hass, config_entry, config, device_registry)
@websocket_api.websocket_command(
{vol.Required("type"): "tasmota/device/remove", vol.Required("device_id"): str}
)
@websocket_api.async_response
async def websocket_remove_device(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Delete device."""
device_id = msg["device_id"]
dev_registry = await hass.helpers.device_registry.async_get_registry()
device = dev_registry.async_get(device_id)
if not device:
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Device not found"
)
return
for config_entry in device.config_entries:
config_entry = hass.config_entries.async_get_entry(config_entry)
# Only delete the device if it belongs to a Tasmota device entry
if config_entry.domain == DOMAIN:
dev_registry.async_remove_device(device_id)
connection.send_message(websocket_api.result_message(msg["id"]))
return
connection.send_error(
msg["id"], websocket_api.const.ERR_NOT_FOUND, "Non Tasmota device"
)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input and model functions for serving/inference."""
import six
import tensorflow.compat.v1 as tf
import box_utils
import heads
import mask_rcnn_model
import preprocess_ops
import spatial_transform_ops
def parse_tf_example(tf_example_string):
"""Parse the serialized tf.Example and decode it to the image tensor."""
decoded_tensors = tf.parse_single_example(
serialized=tf_example_string,
features={
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
})
image_bytes = decoded_tensors['image/encoded']
return image_bytes
def decode_image(image_bytes):
"""Decode the image bytes to the image tensor."""
image = tf.image.decode_jpeg(image_bytes)
return image
def convert_image(image):
"""Convert the uint8 image tensor to float32."""
return tf.image.convert_image_dtype(image, dtype=tf.float32)
def preprocess_image(image, desired_image_size, padding_stride):
"""Preprocess a decode image tensor."""
image = preprocess_ops.normalize_image(image)
image, image_info, _, _, _ = preprocess_ops.resize_crop_pad(
image, desired_image_size, padding_stride)
return image, image_info
def image_tensor_input(batch_size,
desired_image_size,
padding_stride):
"""Image tensor input."""
desired_image_height, desired_image_width = desired_image_size
placeholder = tf.placeholder(
dtype=tf.uint8,
shape=(batch_size, desired_image_height, desired_image_width, 3))
def _prepare(image):
return preprocess_image(
convert_image(image), desired_image_size, padding_stride)
if batch_size == 1:
image = tf.squeeze(placeholder, axis=0)
image, image_info = _prepare(image)
images = tf.expand_dims(image, axis=0)
images_info = tf.expand_dims(image_info, axis=0)
else:
images, images_info = tf.map_fn(
_prepare,
placeholder,
back_prop=False,
dtype=(tf.float32, tf.float32))
return placeholder, {'images': images, 'image_info': images_info}
def raw_image_tensor_input(batch_size,
image_size,
padding_stride):
"""Raw float32 image tensor input, no resize is preformed."""
image_height, image_width = image_size
if (image_height % padding_stride != 0 or
image_width % padding_stride != 0):
raise ValueError('Image size is not compatible with the stride.')
placeholder = tf.placeholder(
dtype=tf.float32,
shape=(batch_size, image_height, image_width, 3))
image_info_per_image = [
image_height, image_width, 1.0, image_height, image_width]
if batch_size == 1:
images_info = tf.constant([image_info_per_image], dtype=tf.float32)
else:
images_info = tf.constant(
[image_info_per_image for _ in range(batch_size)],
dtype=tf.float32)
images = placeholder
return placeholder, {'images': images, 'image_info': images_info}
def image_bytes_input(batch_size,
desired_image_size,
padding_stride):
"""Image bytes input."""
placeholder = tf.placeholder(dtype=tf.string, shape=(batch_size,))
def _prepare(image_bytes):
return preprocess_image(
convert_image(
decode_image(image_bytes)),
desired_image_size,
padding_stride)
if batch_size == 1:
image_bytes = tf.squeeze(placeholder, axis=0)
image, image_info = _prepare(image_bytes)
images = tf.expand_dims(image, axis=0)
images_info = tf.expand_dims(image_info, axis=0)
else:
images, images_info = tf.map_fn(
_prepare,
placeholder,
back_prop=False,
dtype=(tf.float32, tf.float32))
return placeholder, {'images': images, 'image_info': images_info}
def tf_example_input(batch_size,
desired_image_size,
padding_stride):
"""tf.Example input."""
placeholder = tf.placeholder(dtype=tf.string, shape=(batch_size,))
def _prepare(tf_example_string):
return preprocess_image(
convert_image(
decode_image(
parse_tf_example(tf_example_string))),
desired_image_size,
padding_stride)
if batch_size == 1:
tf_example_string = tf.squeeze(placeholder, axis=0)
image, image_info = _prepare(tf_example_string)
images = tf.expand_dims(image, axis=0)
images_info = tf.expand_dims(image_info, axis=0)
else:
images, images_info = tf.map_fn(
_prepare,
placeholder,
back_prop=False,
dtype=(tf.float32, tf.float32))
return placeholder, {'images': images, 'image_info': images_info}
def serving_input_fn(batch_size,
desired_image_size,
padding_stride,
input_type,
input_name='input'):
"""Input function for SavedModels and TF serving.
Args:
batch_size: The batch size.
desired_image_size: The tuple/list of two integers, specifying the desired
image size.
padding_stride: The integer used for padding. The image dimensions are
padded to the multiple of this number.
input_type: a string of 'image_tensor', 'image_bytes' or 'tf_example',
specifying which type of input will be used in serving.
input_name: a string to specify the name of the input signature.
Returns:
a `tf.estimator.export.ServingInputReceiver` for a SavedModel.
"""
if input_type == 'image_tensor':
placeholder, features = image_tensor_input(
batch_size, desired_image_size, padding_stride)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors={
input_name: placeholder,
})
elif input_type == 'raw_image_tensor':
placeholder, features = raw_image_tensor_input(
batch_size, desired_image_size, padding_stride)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors={
input_name: placeholder,
})
elif input_type == 'image_bytes':
placeholder, features = image_bytes_input(
batch_size, desired_image_size, padding_stride)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors={
input_name: placeholder,
})
elif input_type == 'tf_example':
placeholder, features = tf_example_input(
batch_size, desired_image_size, padding_stride)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors={
input_name: placeholder,
})
else:
raise NotImplementedError('Unknown input type!')
def serving_model_graph_builder(output_source_id,
output_image_info,
output_box_features,
output_normalized_coordinates,
cast_num_detections_to_float):
"""Serving model graph builder.
Args:
output_source_id: bool, whether output the source_id node.
output_image_info: bool, whether output the image_info node.
output_box_features: bool, whether output the box feature node.
output_normalized_coordinates: bool, whether box outputs are in the
normalized coordinates.
cast_num_detections_to_float: bool, whether to cast the number of
detections to float type.
Returns:
a function that builds the model graph for serving.
"""
def _serving_model_graph(features, params):
"""Build the model graph for serving."""
model_outputs = mask_rcnn_model.build_model_graph(
features, labels=None, is_training=False, params=params)
if cast_num_detections_to_float:
model_outputs['num_detections'] = tf.cast(
model_outputs['num_detections'], dtype=tf.float32)
if output_source_id:
model_outputs.update({
'source_id': features['source_id'],
})
if output_image_info:
model_outputs.update({
'image_info': features['image_info'],
})
final_boxes = model_outputs['detection_boxes']
if output_box_features:
final_box_rois = model_outputs['detection_boxes']
final_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
model_outputs['fpn_features'], final_box_rois, output_size=7)
class_outputs, _, final_box_features = heads.box_head(
final_roi_features, num_classes=params['num_classes'],
mlp_head_dim=params['fast_rcnn_mlp_head_dim'])
model_outputs.update({
'detection_logits': class_outputs,
'detection_features': final_box_features,
})
if output_normalized_coordinates:
model_outputs['detection_boxes'] = box_utils.to_normalized_coordinates(
final_boxes,
tf.expand_dims(features['image_info'][:, 0], axis=-1),
tf.expand_dims(features['image_info'][:, 1], axis=-1))
return model_outputs
def _serving_model_graph_wrapper(features, params):
"""Builds the model graph with outputs casted to bfloat16 if nessarary."""
if params['precision'] == 'bfloat16':
with tf.tpu.bfloat16_scope():
model_outputs = _serving_model_graph(features, params)
def _cast_outputs_to_float(d):
for k, v in sorted(six.iteritems(d)):
if isinstance(v, dict):
_cast_outputs_to_float(v)
else:
d[k] = tf.cast(v, tf.float32)
_cast_outputs_to_float(model_outputs)
else:
model_outputs = _serving_model_graph(features, params)
return model_outputs
return _serving_model_graph_wrapper
def serving_model_fn_builder(output_source_id,
output_image_info,
output_box_features,
output_normalized_coordinates,
cast_num_detections_to_float):
"""Serving model_fn builder.
Args:
output_source_id: bool, whether output the source_id node.
output_image_info: bool, whether output the image_info node.
output_box_features: bool, whether output the box feature node.
output_normalized_coordinates: bool, whether box outputs are in the
normalized coordinates.
cast_num_detections_to_float: bool, whether to cast the number of
detections to float type.
Returns:
a function that returns (TPU)EstimatorSpec for PREDICT mode.
"""
def _serving_model_fn(features, labels, mode, params):
"""Builds the serving model_fn."""
del labels # unused.
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError('To build the serving model_fn, set '
'mode = `tf.estimator.ModeKeys.PREDICT`')
serving_model_graph = serving_model_graph_builder(
output_source_id,
output_image_info,
output_box_features,
output_normalized_coordinates,
cast_num_detections_to_float)
model_outputs = serving_model_graph(features, params)
predictions = {
'num_detections': tf.identity(
model_outputs['num_detections'], 'NumDetections'),
'detection_boxes': tf.identity(
model_outputs['detection_boxes'], 'DetectionBoxes'),
'detection_classes': tf.identity(
model_outputs['detection_classes'], 'DetectionClasses'),
'detection_scores': tf.identity(
model_outputs['detection_scores'], 'DetectionScores'),
}
if params['include_mask']:
predictions.update({
'detection_masks': tf.identity(
model_outputs['detection_masks'], 'DetectionMasks')
})
if output_source_id:
predictions['source_id'] = tf.identity(
model_outputs['source_id'], 'SourceId')
if output_image_info:
predictions['image_info'] = tf.identity(
model_outputs['image_info'], 'ImageInfo')
if output_box_features:
predictions['detection_logits'] = tf.identity(
model_outputs['detection_logits'], 'DetectionLogits')
predictions['detection_features'] = tf.identity(
model_outputs['detection_features'], 'DetectionFeatures')
if params['use_tpu']:
return tf.estimator.tpu.TPUEstimatorSpec(mode=mode,
predictions=predictions)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
return _serving_model_fn
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BotsOperations:
"""BotsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.healthbot.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
bot_name: str,
parameters: "_models.HealthBot",
**kwargs
) -> "_models.HealthBot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthBot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'HealthBot')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('HealthBot', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('HealthBot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
bot_name: str,
parameters: "_models.HealthBot",
**kwargs
) -> AsyncLROPoller["_models.HealthBot"]:
"""Create a new HealthBot.
:param resource_group_name: The name of the Bot resource group in the user subscription.
:type resource_group_name: str
:param bot_name: The name of the Bot resource.
:type bot_name: str
:param parameters: The parameters to provide for the created bot.
:type parameters: ~azure.mgmt.healthbot.models.HealthBot
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either HealthBot or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.healthbot.models.HealthBot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthBot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
bot_name=bot_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('HealthBot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
async def get(
self,
resource_group_name: str,
bot_name: str,
**kwargs
) -> "_models.HealthBot":
"""Get a HealthBot.
:param resource_group_name: The name of the Bot resource group in the user subscription.
:type resource_group_name: str
:param bot_name: The name of the Bot resource.
:type bot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HealthBot, or the result of cls(response)
:rtype: ~azure.mgmt.healthbot.models.HealthBot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthBot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HealthBot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
async def update(
self,
resource_group_name: str,
bot_name: str,
parameters: "_models.HealthBotUpdateParameters",
**kwargs
) -> "_models.HealthBot":
"""Patch a HealthBot.
:param resource_group_name: The name of the Bot resource group in the user subscription.
:type resource_group_name: str
:param bot_name: The name of the Bot resource.
:type bot_name: str
:param parameters: The parameters to provide for the required bot.
:type parameters: ~azure.mgmt.healthbot.models.HealthBotUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HealthBot, or the result of cls(response)
:rtype: ~azure.mgmt.healthbot.models.HealthBot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthBot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'HealthBotUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('HealthBot', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('HealthBot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
bot_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
bot_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a HealthBot.
:param resource_group_name: The name of the Bot resource group in the user subscription.
:type resource_group_name: str
:param bot_name: The name of the Bot resource.
:type bot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
bot_name=bot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'botName': self._serialize.url("bot_name", bot_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots/{botName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.BotResponseList"]:
"""Returns all the resources of a particular type belonging to a resource group.
:param resource_group_name: The name of the Bot resource group in the user subscription.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BotResponseList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.healthbot.models.BotResponseList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BotResponseList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BotResponseList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthBot/healthBots'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.BotResponseList"]:
"""Returns all the resources of a particular type belonging to a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BotResponseList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.healthbot.models.BotResponseList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BotResponseList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-08"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BotResponseList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HealthBot/healthBots'} # type: ignore
|
|
"""Defines the unit tests for the :mod:`colour.plotting.diagrams` module."""
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.colorimetry import (
MSDS_CMFS,
SDS_ILLUMINANTS,
SpectralShape,
reshape_msds,
)
from colour.plotting import (
plot_chromaticity_diagram_CIE1931,
plot_chromaticity_diagram_CIE1960UCS,
plot_chromaticity_diagram_CIE1976UCS,
plot_sds_in_chromaticity_diagram_CIE1931,
plot_sds_in_chromaticity_diagram_CIE1960UCS,
plot_sds_in_chromaticity_diagram_CIE1976UCS,
)
from colour.plotting.diagrams import (
plot_spectral_locus,
plot_chromaticity_diagram_colours,
plot_chromaticity_diagram,
plot_sds_in_chromaticity_diagram,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"TestPlotSpectralLocus",
"TestPlotChromaticityDiagramColours",
"TestPlotChromaticityDiagram",
"TestPlotChromaticityDiagramCIE1931",
"TestPlotChromaticityDiagramCIE1960UCS",
"TestPlotChromaticityDiagramCIE1976UCS",
"TestPlotSdsInChromaticityDiagram",
"TestPlotSdsInChromaticityDiagramCIE1931",
"TestPlotSdsInChromaticityDiagramCIE1960UCS",
"TestPlotSdsInChromaticityDiagramCIE1976UCS",
]
class TestPlotSpectralLocus(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.plot_spectral_locus` definition
unit tests methods.
"""
def test_plot_spectral_locus(self):
"""Test :func:`colour.plotting.diagrams.plot_spectral_locus` definition."""
figure, axes = plot_spectral_locus()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_spectral_locus(spectral_locus_colours="RGB")
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_spectral_locus(
method="CIE 1960 UCS", spectral_locus_colours="RGB"
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_spectral_locus(
method="CIE 1976 UCS", spectral_locus_colours="RGB"
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
# pylint: disable=E1102
figure, axes = plot_spectral_locus(
reshape_msds(
MSDS_CMFS["CIE 1931 2 Degree Standard Observer"],
SpectralShape(400, 700, 10),
)
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
self.assertRaises(
ValueError, lambda: plot_spectral_locus(method="Undefined")
)
class TestPlotChromaticityDiagramColours(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.plot_chromaticity_diagram_colours`
definition unit tests methods.
"""
def test_plot_chromaticity_diagram_colours(self):
"""
Test :func:`colour.plotting.diagrams.plot_chromaticity_diagram_colours`
definition.
"""
figure, axes = plot_chromaticity_diagram_colours()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
self.assertRaises(
ValueError,
lambda: plot_chromaticity_diagram_colours(method="Undefined"),
)
figure, axes = plot_chromaticity_diagram_colours(diagram_colours="RGB")
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotChromaticityDiagram(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.plot_chromaticity_diagram`
definition unit tests methods.
"""
def test_plot_chromaticity_diagram(self):
"""
Test :func:`colour.plotting.diagrams.plot_chromaticity_diagram`
definition.
"""
figure, axes = plot_chromaticity_diagram()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_chromaticity_diagram(method="CIE 1960 UCS")
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_chromaticity_diagram(method="CIE 1976 UCS")
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
self.assertRaises(
ValueError,
lambda: plot_chromaticity_diagram(
method="Undefined",
show_diagram_colours=False,
show_spectral_locus=False,
),
)
class TestPlotChromaticityDiagramCIE1931(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.plot_chromaticity_diagram_CIE1931`
definition unit tests methods.
"""
def test_plot_chromaticity_diagram_CIE1931(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_chromaticity_diagram_CIE1931` definition.
"""
figure, axes = plot_chromaticity_diagram_CIE1931()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotChromaticityDiagramCIE1960UCS(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_chromaticity_diagram_CIE1960UCS` definition unit tests methods.
"""
def test_plot_chromaticity_diagram_CIE1960UCS(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_chromaticity_diagram_CIE1960UCS` definition.
"""
figure, axes = plot_chromaticity_diagram_CIE1960UCS()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotChromaticityDiagramCIE1976UCS(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_chromaticity_diagram_CIE1976UCS` definition unit tests methods.
"""
def test_plot_chromaticity_diagram_CIE1976UCS(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_chromaticity_diagram_CIE1976UCS` definition.
"""
figure, axes = plot_chromaticity_diagram_CIE1976UCS()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSdsInChromaticityDiagram(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram` definition unit tests methods.
"""
def test_plot_sds_in_chromaticity_diagram(self):
"""
Test :func:`colour.plotting.diagrams.plot_sds_in_chromaticity_diagram`
definition.
"""
figure, axes = plot_sds_in_chromaticity_diagram(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]],
annotate_kwargs={"arrowprops": {"width": 10}},
plot_kwargs={"normalise_sd_colours": True, "use_sd_colours": True},
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_sds_in_chromaticity_diagram(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]],
annotate_kwargs=[{"arrowprops": {"width": 10}}] * 2,
plot_kwargs=[
{"normalise_sd_colours": True, "use_sd_colours": True}
]
* 2,
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
self.assertRaises(
ValueError,
lambda: plot_sds_in_chromaticity_diagram(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]],
chromaticity_diagram_callable=lambda **x: x,
method="Undefined",
),
)
class TestPlotSdsInChromaticityDiagramCIE1931(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1931` definition unit tests methods.
"""
def test_plot_sds_in_chromaticity_diagram_CIE1931(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1931` definition.
"""
figure, axes = plot_sds_in_chromaticity_diagram_CIE1931(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]]
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSdsInChromaticityDiagramCIE1960UCS(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1960UCS` definition unit tests methods.
"""
def test_plot_sds_in_chromaticity_diagram_CIE1960UCS(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1960UCS` definition.
"""
figure, axes = plot_sds_in_chromaticity_diagram_CIE1960UCS(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]]
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSdsInChromaticityDiagramCIE1976UCS(unittest.TestCase):
"""
Define :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1976UCS` definition unit tests methods.
"""
def test_plot_sds_in_chromaticity_diagram_CIE1976UCS(self):
"""
Test :func:`colour.plotting.diagrams.\
plot_sds_in_chromaticity_diagram_CIE1976UCS` definition.
"""
figure, axes = plot_sds_in_chromaticity_diagram_CIE1976UCS(
[SDS_ILLUMINANTS["A"], SDS_ILLUMINANTS["D65"]]
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import PY3, range, u
import pandas as pd
from pandas import Float64Index, Index, Int64Index, RangeIndex, Series
import pandas.util.testing as tm
from .test_numeric import Numeric
class TestRangeIndex(Numeric):
_holder = RangeIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'),
index_dec=RangeIndex(18, -1, -2, name='bar'))
self.setup_indices()
def create_index(self):
return RangeIndex(5)
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self):
with pytest.raises(ValueError, match="^Length"):
self.index.names = ["roger", "harold"]
def test_constructor(self):
index = RangeIndex(5)
expected = np.arange(5, dtype=np.int64)
assert isinstance(index, RangeIndex)
assert index._start == 0
assert index._stop == 5
assert index._step == 1
assert index.name is None
tm.assert_index_equal(Index(expected), index)
index = RangeIndex(1, 5)
expected = np.arange(1, 5, dtype=np.int64)
assert isinstance(index, RangeIndex)
assert index._start == 1
tm.assert_index_equal(Index(expected), index)
index = RangeIndex(1, 5, 2)
expected = np.arange(1, 5, 2, dtype=np.int64)
assert isinstance(index, RangeIndex)
assert index._step == 2
tm.assert_index_equal(Index(expected), index)
for index in [RangeIndex(0), RangeIndex(start=0), RangeIndex(stop=0),
RangeIndex(0, 0)]:
expected = np.empty(0, dtype=np.int64)
assert isinstance(index, RangeIndex)
assert index._start == 0
assert index._stop == 0
assert index._step == 1
tm.assert_index_equal(Index(expected), index)
for index in [RangeIndex(0, name='Foo'),
RangeIndex(start=0, name='Foo'),
RangeIndex(stop=0, name='Foo'),
RangeIndex(0, 0, name='Foo')]:
assert isinstance(index, RangeIndex)
assert index.name == 'Foo'
# we don't allow on a bare Index
with pytest.raises(TypeError):
Index(0, 1000)
def test_constructor_invalid_args(self):
msg = "RangeIndex\\(\\.\\.\\.\\) must be called with integers"
with pytest.raises(TypeError, match=msg):
RangeIndex()
with pytest.raises(TypeError, match=msg):
RangeIndex(name='Foo')
# invalid args
for i in [Index(['a', 'b']), Series(['a', 'b']), np.array(['a', 'b']),
[], 'foo', datetime(2000, 1, 1, 0, 0), np.arange(0, 10),
np.array([1]), [1]]:
with pytest.raises(TypeError):
RangeIndex(i)
def test_constructor_same(self):
# pass thru w and w/o copy
index = RangeIndex(1, 5, 2)
result = RangeIndex(index, copy=False)
assert result.identical(index)
result = RangeIndex(index, copy=True)
tm.assert_index_equal(result, index, exact=True)
result = RangeIndex(index)
tm.assert_index_equal(result, index, exact=True)
with pytest.raises(TypeError):
RangeIndex(index, dtype='float64')
def test_constructor_range(self):
with pytest.raises(TypeError):
RangeIndex(range(1, 5, 2))
result = RangeIndex.from_range(range(1, 5, 2))
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
result = RangeIndex.from_range(range(5, 6))
expected = RangeIndex(5, 6, 1)
tm.assert_index_equal(result, expected, exact=True)
# an invalid range
result = RangeIndex.from_range(range(5, 1))
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected, exact=True)
result = RangeIndex.from_range(range(5))
expected = RangeIndex(0, 5, 1)
tm.assert_index_equal(result, expected, exact=True)
result = Index(range(1, 5, 2))
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises(TypeError):
Index(range(1, 5, 2), dtype='float64')
def test_constructor_name(self):
# GH12288
orig = RangeIndex(10)
orig.name = 'original'
copy = RangeIndex(orig)
copy.name = 'copy'
assert orig.name == 'original'
assert copy.name == 'copy'
new = Index(copy)
assert new.name == 'copy'
new.name = 'new'
assert orig.name == 'original'
assert copy.name == 'copy'
assert new.name == 'new'
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = RangeIndex(1, 5)
assert index.values.dtype == np.int64
tm.assert_index_equal(index, Index(arr))
# non-int raise Exception
with pytest.raises(TypeError):
RangeIndex('1', '10', '1')
with pytest.raises(TypeError):
RangeIndex(1.1, 10.2, 1.3)
# invalid passed type
with pytest.raises(TypeError):
RangeIndex(1, 5, dtype='float64')
def test_copy(self):
i = RangeIndex(5, name='Foo')
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._start == 0
assert i_copy._stop == 5
assert i_copy._step == 1
assert i_copy.name == 'Foo'
def test_repr(self):
i = RangeIndex(5, name='Foo')
result = repr(i)
if PY3:
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
else:
expected = "RangeIndex(start=0, stop=5, step=1, name=u'Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name='Foo')
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in (np.nan, pd.NaT, None):
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name='Foo')
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name='Foo')
i_view = i.view()
assert i_view.name == 'Foo'
i_view = i.view('i8')
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self):
assert self.index.dtype == np.int64
def test_is_monotonic(self):
assert self.index.is_monotonic is True
assert self.index.is_monotonic_increasing is True
assert self.index.is_monotonic_decreasing is False
assert self.index._is_strictly_monotonic_increasing is True
assert self.index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2))]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self):
i = Index(self.index.copy())
assert i.identical(self.index)
# we don't allow object dtype for RangeIndex
if isinstance(self.index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
assert same_values.identical(self.index.copy(dtype=object))
assert not i.identical(self.index)
assert Index(same_values, name='foo', dtype=object).identical(i)
assert not self.index.copy(dtype=object).identical(
self.index.copy(dtype='int64'))
def test_get_indexer(self):
target = RangeIndex(10)
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = RangeIndex(10)
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = RangeIndex(10)
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
# join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
tm.assert_index_equal(res, noidx_res)
eres = Int64Index([0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25])
elidx = np.array([0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9,
-1, -1, -1, -1, -1, -1, -1], dtype=np.intp)
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1, 0], dtype=np.intp)
assert isinstance(res, Int64Index)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# join with RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
tm.assert_index_equal(res, noidx_res)
assert isinstance(res, Int64Index)
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
# Join with non-RangeIndex
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([16, 18])
elidx = np.array([8, 9], dtype=np.intp)
eridx = np.array([9, 7], dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# Join two RangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
# Join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# Join withRangeIndex
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
assert isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
# Join with Int64Index
other = Int64Index(np.arange(25, 14, -1))
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1],
dtype=np.intp)
assert isinstance(other, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# Join withRangeIndex
other = RangeIndex(25, 14, -1)
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
assert isinstance(other, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = self.index.join(other, how='left')
tm.assert_index_equal(left, self.index.astype(object))
left2 = other.join(self.index, how='left')
tm.assert_index_equal(left2, other)
right = self.index.join(other, how='right')
tm.assert_index_equal(right, other)
right2 = other.join(self.index, how='right')
tm.assert_index_equal(right2, self.index.astype(object))
def test_join_non_unique(self):
other = Index([4, 4, 3, 3])
res, lidx, ridx = self.index.join(other, return_indexers=True)
eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1],
dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
assert self.index is joined
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, sort):
# intersect with Int64Index
other = Index(np.arange(1, 6))
result = self.index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(self.index.values,
other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(self.index, sort=sort)
expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
other.values))))
tm.assert_index_equal(result, expected)
# intersect with increasing RangeIndex
other = RangeIndex(1, 6)
result = self.index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(self.index.values,
other.values)))
tm.assert_index_equal(result, expected)
# intersect with decreasing RangeIndex
other = RangeIndex(5, 0, -1)
result = self.index.intersection(other, sort=sort)
expected = Index(np.sort(np.intersect1d(self.index.values,
other.values)))
tm.assert_index_equal(result, expected)
# reversed (GH 17296)
result = other.intersection(self.index, sort=sort)
tm.assert_index_equal(result, expected)
# GH 17296: intersect two decreasing RangeIndexes
first = RangeIndex(10, -2, -2)
other = RangeIndex(5, -4, -1)
expected = first.astype(int).intersection(other.astype(int), sort=sort)
result = first.intersection(other, sort=sort).astype(int)
tm.assert_index_equal(result, expected)
# reversed
result = other.intersection(first, sort=sort).astype(int)
tm.assert_index_equal(result, expected)
index = RangeIndex(5)
# intersect of non-overlapping indices
other = RangeIndex(5, 10, 1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
other = RangeIndex(-1, -5, -1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
# intersection of empty indices
other = RangeIndex(0, 0, 1)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
result = other.intersection(index, sort=sort)
tm.assert_index_equal(result, expected)
# intersection of non-overlapping values based on start value and gcd
index = RangeIndex(1, 10, 2)
other = RangeIndex(0, 10, 4)
result = index.intersection(other, sort=sort)
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = Index(np.concatenate((self.index, other)))
tm.assert_index_equal(result, expected)
result = other.union(self.index)
expected = Index(np.concatenate((other, self.index)))
tm.assert_index_equal(result, expected)
def test_union(self):
RI = RangeIndex
I64 = Int64Index
cases = [(RI(0, 10, 1), RI(0, 10, 1), RI(0, 10, 1)),
(RI(0, 10, 1), RI(5, 20, 1), RI(0, 20, 1)),
(RI(0, 10, 1), RI(10, 20, 1), RI(0, 20, 1)),
(RI(0, -10, -1), RI(0, -10, -1), RI(0, -10, -1)),
(RI(0, -10, -1), RI(-10, -20, -1), RI(-19, 1, 1)),
(RI(0, 10, 2), RI(1, 10, 2), RI(0, 10, 1)),
(RI(0, 11, 2), RI(1, 12, 2), RI(0, 12, 1)),
(RI(0, 21, 4), RI(-2, 24, 4), RI(-2, 24, 2)),
(RI(0, -20, -2), RI(-1, -21, -2), RI(-19, 1, 1)),
(RI(0, 100, 5), RI(0, 100, 20), RI(0, 100, 5)),
(RI(0, -100, -5), RI(5, -100, -20), RI(-95, 10, 5)),
(RI(0, -11, -1), RI(1, -12, -4), RI(-11, 2, 1)),
(RI(0), RI(0), RI(0)),
(RI(0, -10, -2), RI(0), RI(0, -10, -2)),
(RI(0, 100, 2), RI(100, 150, 200), RI(0, 102, 2)),
(RI(0, -100, -2), RI(-100, 50, 102), RI(-100, 4, 2)),
(RI(0, -100, -1), RI(0, -50, -3), RI(-99, 1, 1)),
(RI(0, 1, 1), RI(5, 6, 10), RI(0, 6, 5)),
(RI(0, 10, 5), RI(-5, -6, -20), RI(-5, 10, 5)),
(RI(0, 3, 1), RI(4, 5, 1), I64([0, 1, 2, 4])),
(RI(0, 10, 1), I64([]), RI(0, 10, 1)),
(RI(0), I64([1, 5, 6]), I64([1, 5, 6]))]
for idx1, idx2, expected in cases:
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
res3 = idx1._int64index.union(idx2)
tm.assert_index_equal(res1, expected, exact=True)
tm.assert_index_equal(res2, expected, exact=True)
tm.assert_index_equal(res3, expected)
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
def test_cant_or_shouldnt_cast(self):
# can't
with pytest.raises(TypeError):
RangeIndex('foo', 'bar', 'baz')
# shouldn't
with pytest.raises(TypeError):
RangeIndex('0', '1', '2')
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
assert result.dtype == np.object_
def test_take_preserve_name(self):
index = RangeIndex(1, 5, name='foo')
taken = index.take([3, 0, 1])
assert index.name == taken.name
def test_take_fill_value(self):
# GH 12631
idx = pd.RangeIndex(1, 4, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Int64Index([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
msg = "Unable to fill values because RangeIndex cannot contain NA"
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -1]), fill_value=True)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Int64Index([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
msg = "Unable to fill values because RangeIndex cannot contain NA"
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name='asdf')
assert idx.name == idx[1:].name
def test_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
idx = RangeIndex(5)
# float conversions
arr = np.arange(5, dtype='int64') * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype='float64')
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype='float64')
result = a - fidx
tm.assert_index_equal(result, expected)
def test_has_duplicates(self):
for ind in self.indices:
if not len(ind):
continue
idx = self.indices[ind]
assert idx.is_unique
assert not idx.has_duplicates
def test_extended_gcd(self):
result = self.index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = self.index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self):
# scalar indexing
res = self.index[1]
expected = 2
assert res == expected
res = self.index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index = self.index[:]
expected = self.index
tm.assert_index_equal(index, expected)
# positive slice values
index = self.index[7:10:2]
expected = Index(np.array([14, 18]), name='foo')
tm.assert_index_equal(index, expected)
# negative slice values
index = self.index[-1:-5:-2]
expected = Index(np.array([18, 14]), name='foo')
tm.assert_index_equal(index, expected)
# stop overshoot
index = self.index[2:100:4]
expected = Index(np.array([4, 12]), name='foo')
tm.assert_index_equal(index, expected)
# reverse
index = self.index[::-1]
expected = Index(self.index.values[::-1], name='foo')
tm.assert_index_equal(index, expected)
index = self.index[-8::-1]
expected = Index(np.array([4, 2, 0]), name='foo')
tm.assert_index_equal(index, expected)
index = self.index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name='foo')
tm.assert_index_equal(index, expected)
index = self.index[40::-1]
expected = Index(self.index.values[40::-1], name='foo')
tm.assert_index_equal(index, expected)
index = self.index[10::-1]
expected = Index(self.index.values[::-1], name='foo')
tm.assert_index_equal(index, expected)
def test_len_specialised(self):
# make sure that our len is the same as
# np.arange calc
for step in np.arange(1, 6, 1):
arr = np.arange(0, 5, step)
i = RangeIndex(0, 5, step)
assert len(i) == len(arr)
i = RangeIndex(5, 0, step)
assert len(i) == 0
for step in np.arange(-6, -1, 1):
arr = np.arange(5, 0, step)
i = RangeIndex(5, 0, step)
assert len(i) == len(arr)
i = RangeIndex(0, 5, step)
assert len(i) == 0
def test_append(self):
# GH16212
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
cases = [([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2,), RI(3, 5)], RI(3, 5)),
([RI(2,), RI(2)], I64([0, 1, 0, 1])),
([RI(2,), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2,), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3,), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3,), F64([-1, 3.1, 15.])], F64([0, 1, 2, -1, 3.1, 15.])),
([RI(3,), OI(['a', None, 14])], OI([0, 1, 2, 'a', None, 14])),
([RI(3, 1), OI(['a', None, 14])], OI(['a', None, 14]))
]
for indices, expected in cases:
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
|
|
# -*- coding: utf-8 -*-
"""
.. module:: djstripe.fields.
:synopsis: dj-stripe Custom Field Definitions
.. moduleauthor:: Bill Huneke (@wahuneke)
"""
import decimal
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .settings import USE_NATIVE_JSONFIELD
from .utils import convert_tstamp, dict_nested_accessor
if USE_NATIVE_JSONFIELD:
from django.contrib.postgres.fields import JSONField
else:
from jsonfield import JSONField
class StripeFieldMixin(object):
"""
Custom fields for all Stripe data.
This allows keeping track of which database fields are suitable for
sending to or receiving from Stripe. Also, allows a few handy extra parameters.
"""
# Used if the name at stripe is different from the name in our database
# Include a . in name if value is nested in dict in Stripe's object
# (e.g. stripe_name = "data.id" --> obj["data"]["id"])
stripe_name = None
# If stripe_name is None, this can also be used to specify a nested value, but
# the final value is assumed to be the database field name
# (e.g. nested_name = "data" --> obj["data"][db_field_name]
nested_name = None
# This indicates that this field will always appear in a stripe object. It will be
# an Exception if we try to parse a stripe object that does not include this field
# in the data. If set to False then null=True attribute will be automatically set
stripe_required = True
# If a field was populated in previous API versions but we don't want to drop the old
# data for some reason, mark it as deprecated. This will make sure we never try to send
# it to Stripe or expect in Stripe data received
# This setting automatically implies Null=True
deprecated = False
def __init__(self, *args, **kwargs):
"""
Assign class instance variables based on kwargs.
Assign extra class instance variables if stripe_required is defined or
if deprecated is defined.
"""
self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)
self.nested_name = kwargs.pop('nested_name', self.nested_name)
self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)
self.deprecated = kwargs.pop('deprecated', self.deprecated)
if not self.stripe_required:
kwargs["null"] = True
if self.deprecated:
kwargs["null"] = True
kwargs["default"] = None
super(StripeFieldMixin, self).__init__(*args, **kwargs)
def stripe_to_db(self, data):
"""Try converting stripe fields to defined database fields."""
if not self.deprecated:
try:
if self.stripe_name:
result = dict_nested_accessor(data, self.stripe_name)
elif self.nested_name:
result = dict_nested_accessor(data, self.nested_name + "." + self.name)
else:
result = data[self.name]
except (KeyError, TypeError):
if self.stripe_required:
model_name = self.model._meta.object_name if hasattr(self, "model") else ""
raise FieldError("Required stripe field '{field_name}' was not"
" provided in {model_name} data object.".format(field_name=self.name,
model_name=model_name))
else:
result = None
return result
class StripePercentField(StripeFieldMixin, models.DecimalField):
"""A field used to define a percent according to djstripe logic."""
def __init__(self, *args, **kwargs):
"""Assign default args to this field."""
defaults = {
'decimal_places': 2,
'max_digits': 5,
'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]
}
defaults.update(kwargs)
super(StripePercentField, self).__init__(*args, **defaults)
class StripeCurrencyField(StripeFieldMixin, models.DecimalField):
"""
A field used to define currency according to djstripe logic.
Stripe is always in cents. djstripe stores everything in dollars.
"""
def __init__(self, *args, **kwargs):
"""Assign default args to this field."""
defaults = {
'decimal_places': 2,
'max_digits': 8,
}
defaults.update(kwargs)
super(StripeCurrencyField, self).__init__(*args, **defaults)
def stripe_to_db(self, data):
"""Convert the raw value to decimal representation."""
val = super(StripeCurrencyField, self).stripe_to_db(data)
# Note: 0 is a possible return value, which is 'falseish'
if val is not None:
return val / decimal.Decimal("100")
class StripeBooleanField(StripeFieldMixin, models.BooleanField):
"""A field used to define a boolean value according to djstripe logic."""
def __init__(self, *args, **kwargs):
"""Throw an error when a user tries to deprecate."""
if kwargs.get("deprecated", False):
raise ImproperlyConfigured("Boolean field cannot be deprecated. Change field type to "
"StripeNullBooleanField")
super(StripeBooleanField, self).__init__(*args, **kwargs)
class StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):
"""A field used to define a NullBooleanField value according to djstripe logic."""
pass
class StripeCharField(StripeFieldMixin, models.CharField):
"""A field used to define a CharField value according to djstripe logic."""
pass
class StripeIdField(StripeCharField):
"""A field with enough space to hold any stripe ID."""
def __init__(self, *args, **kwargs):
"""
Assign default args to this field.
As per: https://stripe.com/docs/upgrades
You can safely assume object IDs we generate will never exceed 255
characters, but you should be able to handle IDs of up to that
length.
"""
defaults = {
'max_length': 255,
'blank': False,
'null': False,
}
defaults.update(kwargs)
super(StripeIdField, self).__init__(*args, **defaults)
class StripeTextField(StripeFieldMixin, models.TextField):
"""A field used to define a TextField value according to djstripe logic."""
pass
class StripeDateTimeField(StripeFieldMixin, models.DateTimeField):
"""A field used to define a DateTimeField value according to djstripe logic."""
def stripe_to_db(self, data):
"""Convert the raw timestamp value to a DateTime representation."""
val = super(StripeDateTimeField, self).stripe_to_db(data)
# Note: 0 is a possible return value, which is 'falseish'
if val is not None:
return convert_tstamp(val)
class StripeIntegerField(StripeFieldMixin, models.IntegerField):
"""A field used to define a IntegerField value according to djstripe logic."""
pass
class StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):
"""A field used to define a PositiveIntegerField value according to djstripe logic."""
pass
class StripeJSONField(StripeFieldMixin, JSONField):
"""A field used to define a JSONField value according to djstripe logic."""
pass
|
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl diags [--log-dir=<LOG_DIR>]
Description:
Save diagnostic information
Options:
--log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico]
"""
import sys
import sh
import os
from datetime import datetime
import tarfile
import socket
import tempfile
import traceback
import subprocess
from etcd import EtcdException
from pycalico.datastore import DatastoreClient
from shutil import copytree, ignore_patterns
from utils import hostname
from utils import print_paragraph
def diags(arguments):
"""
Main dispatcher for diags commands. Calls the corresponding helper function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
print("Collecting diags")
save_diags(arguments["--log-dir"])
sys.exit(0)
def save_diags(log_dir):
# Create temp directory
temp_dir = tempfile.mkdtemp()
temp_diags_dir = os.path.join(temp_dir, 'diagnostics')
os.mkdir(temp_diags_dir)
print("Using temp dir: %s" % temp_dir)
# Write date to file
with DiagsErrorWriter(temp_diags_dir, 'date') as f:
f.write("DATE=%s" % datetime.strftime(datetime.today(),
"%Y-%m-%d_%H-%M-%S"))
# Write hostname to file
with DiagsErrorWriter(temp_diags_dir, 'hostname') as f:
f.write(str(hostname))
# Write netstat output to file
with DiagsErrorWriter(temp_diags_dir, 'netstat') as f:
try:
print("Dumping netstat output")
netstat = sh.Command._create("netstat")
f.writelines(netstat(
# Display all sockets (default: connected)
all=True,
# Don't resolve names
numeric=True))
except sh.CommandNotFound as e:
print " - Missing command: %s" % e.message
f.writelines("Missing command: %s\n" % e.message)
# Write routes
print("Dumping routes")
with DiagsErrorWriter(temp_diags_dir, 'route') as f:
try:
route = sh.Command._create("route")
f.write("route --numeric\n")
f.writelines(route(numeric=True))
f.write('\n')
except sh.CommandNotFound as e:
print " - Missing command: %s" % e.message
f.writelines("Missing command: %s\n" % e.message)
try:
ip = sh.Command._create("ip")
f.write("ip route\n")
f.writelines(ip("route"))
f.write('\n')
f.write("ip -6 route\n")
f.writelines(ip("-6", "route"))
f.write('\n')
except sh.CommandNotFound as e:
print " - Missing command: %s" % e.message
f.writelines("Missing command: %s\n" % e.message)
# Dump iptables
with DiagsErrorWriter(temp_diags_dir, 'iptables') as f:
try:
iptables_save = sh.Command._create("iptables-save")
print("Dumping iptables")
f.writelines(iptables_save())
except sh.CommandNotFound as e:
print " - Missing command: %s" % e.message
f.writelines("Missing command: %s\n" % e.message)
# Dump ipset list
# TODO: ipset might not be installed on the host. But we don't want to
# gather the diags in the container because it might not be running...
with DiagsErrorWriter(temp_diags_dir, 'ipset') as f:
try:
ipset = sh.Command._create("ipset")
print("Dumping ipset")
f.writelines(ipset("list"))
except sh.CommandNotFound as e:
print " - Missing command: %s" % e.message
f.writelines("Missing command: %s\n" % e.message)
except sh.ErrorReturnCode_1 as e:
print " - Error running ipset. Maybe you need to run as root."
f.writelines("Error running ipset: %s\n" % e)
# Ask Felix to dump stats to its log file - ignore errors as the
# calico-node might not be running
subprocess.call(["docker", "exec", "calico-node",
"pkill", "-SIGUSR1", "felix"])
if os.path.isdir(log_dir):
print("Copying Calico logs")
# Skip the lock files as they can only be copied by root.
copytree(log_dir, os.path.join(temp_diags_dir, "logs"),
ignore=ignore_patterns('lock'))
else:
print('No logs found in %s; skipping log copying' % log_dir)
print("Dumping datastore")
# TODO: May want to move this into datastore.py as a dump-calico function
with DiagsErrorWriter(temp_diags_dir, 'etcd_calico') as f:
try:
datastore_client = DatastoreClient()
datastore_data = datastore_client.etcd_client.read("/calico",
recursive=True)
f.write("dir?, key, value\n")
# TODO: python-etcd bug: Leaves show up twice in get_subtree().
for child in datastore_data.get_subtree():
if child.dir:
f.write("DIR, %s,\n" % child.key)
else:
f.write("FILE, %s, %s\n" % (child.key, child.value))
except EtcdException, e:
print "Unable to dump etcd datastore"
f.write("Unable to dump etcd datastore: %s" % e)
# Create tar.
tar_filename = datetime.strftime(datetime.today(),
"diags-%d%m%y_%H%M%S.tar.gz")
full_tar_path = os.path.join(temp_dir, tar_filename)
with tarfile.open(full_tar_path, "w:gz") as tar:
# pass in arcname, otherwise zip contains layers of subfolders
tar.add(temp_dir, arcname="")
print("\nDiags saved to %s\n" % (full_tar_path))
print_paragraph("If required, you can upload the diagnostics bundle to a "
"file sharing service such as transfer.sh using curl or "
"similar. For example:")
print(" curl --upload-file %s https://transfer.sh/%s" %
(full_tar_path, os.path.basename(full_tar_path)))
class DiagsErrorWriter(object):
"""
Context manager used to handle error handling when writing diagnostics.
In the event of an exception being thrown within the context manager, the
details of the exception are written to file and the exception is
swallowed. This allows the diagnostics to retrieve as much information as
possible.
"""
def __init__(self, temp_dir, filename):
self.temp_dir = temp_dir
self.filename = filename
self.file = None
def __enter__(self):
"""
Open the diags file for writing, and return the file object.
:return: The file object.
"""
self.file = open(os.path.join(self.temp_dir, self.filename), "w")
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Close the diagnostics file and if an error occurred, write that into
the file.
:param exc_type: The exception type, or None.
:param exc_val: The exception instance, or None.
:param exc_tb: The exception traceback, or None.
:return: False for KeyboardInterrupt exceptions, or no exceptions,
True for all other exceptions (exception is traced in file).
"""
if exc_type is KeyboardInterrupt:
rc = False
elif exc_type is None:
rc = False
else:
print " - Error gathering diagnostics"
self.file.write("\nError gathering diagnostics\n")
self.file.write("Exception: %s(%s)\n" % (exc_type, exc_val))
traceback.print_tb(exc_tb, None, self.file)
rc = True
self.file.close()
return rc
|
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import itertools
import sys
import re
import pygraphviz as gv
import ply.lex as lex
import ply.yacc as yacc
from functools import lru_cache as memoize
diagnostic_header_pattern = re.compile(r'[^ ]+\.[^ ]+:[0-9]+:[0-9]+: ([^ ]*): (.*)')
in_file_included_from_pattern = re.compile('In file included from .*:')
in_instantiation_of_template_pattern = re.compile('in instantiation of (.*) (?:requested|required) here')
static_warning_marked_deprecated_here_pattern = re.compile('\'static_warning\' has been explicitly marked deprecated here')
class Diagnostic:
def __init__(self, kind, message):
self.kind = kind
self.message = message
self.template_instantiation_trace = []
tokens = (
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
'LBRACE',
'RBRACE',
'LESS_THAN',
'GREATER_THAN',
'DOUBLE_COLON',
'COMMA',
'IDENTIFIER',
'ASTERISK',
'AMPERSAND',
)
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'}'
t_RBRACE = r'{'
t_LESS_THAN = r'<'
t_GREATER_THAN = r'>'
t_DOUBLE_COLON = r'::'
t_COMMA = r','
t_ASTERISK = r'\*'
t_AMPERSAND = r'&'
# We conflate numbers as identifiers too, we don't care about the difference.
t_IDENTIFIER = r'[a-zA-Z0-9_]+'
t_ignore = ' \t'
def t_error(t):
raise Exception("Illegal character '%s' followed by %s" % (t.value[0], t.value[1:]))
class LayoutNeedsMultipleLinesException(Exception):
pass
class AstNode:
def __str__(self):
return ''.join(self)
class TerminalAstNode(AstNode):
def __init__(self, s):
self.s = s
self.is_multiline = (s == '\n')
# last_line_length is the string length if s is not a multiline string.
# For multiline strings ending in a newline, this is 0.
if self.is_multiline:
self.first_line_length = 0
self.last_line_length = 0
self.max_line_length = 0
else:
# This never happens ATM, so we don't handle it.
assert '\n' not in s
self.first_line_length = len(s)
self.last_line_length = len(s)
self.max_line_length = len(s)
def __iter__(self):
return iter((self.s,))
class NonTerminalAstNode(AstNode):
def __init__(self, children_ast_nodes):
self.children_ast_nodes = children_ast_nodes
first_line_length = 0
last_line_length = 0
is_multiline = False
max_line_length = 0
for node in children_ast_nodes:
if node.is_multiline:
last_line_length = node.last_line_length
max_line_length = max(max_line_length, last_line_length + node.first_line_length, node.max_line_length)
is_multiline = True
else:
last_line_length += node.last_line_length
max_line_length = max(max_line_length, last_line_length)
self.first_line_length = first_line_length
self.last_line_length = last_line_length
self.is_multiline = is_multiline
self.max_line_length = max_line_length
def __iter__(self):
return itertools.chain(*self.children_ast_nodes)
max_line_length = 80
# Size of an indent in spaces.
single_indent_length = 4
class TerminalNodeFactory():
def __init__(self, s):
self.s = s
def __call__(self, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
return TerminalAstNode(self.s)
# 'balanced_string' nodes evaluate to a function (or a callable object) taking these parameters:
# current_indent (integer): the indentation in the current line (spaces only)
# current_line_length (integer): the number of preceding characters in the current line (>=current_indent)
# inside_meta_type (boolean): whether we're inside a Type<...>
# last_token_was_type_wrapper (boolean): whether the immediately-preceding token was the identifier 'Type'
# and returning an AstNode
# 'comma_separated_balanced_string' nodes evaluate to a tuple of such functions
def p_comma_separated_balanced_string_empty(p):
'comma_separated_balanced_string : '
p[0] = tuple()
def p_comma_separated_balanced_string_not_empty(p):
'comma_separated_balanced_string : COMMA balanced_string comma_separated_balanced_string'
p[0] = (
p[2],
*(p[3])
)
def p_optional_balanced_string_empty(p):
'optional_balanced_string : '
p[0] = TerminalNodeFactory('')
def p_optional_balanced_string_not_empty(p):
'optional_balanced_string : balanced_string'
p[0] = p[1]
class BalancedStringTerminalNodeFactory():
def __init__(self, first_token, node_factory):
self.first_token = first_token
self.node_factory = node_factory
def __call__(self, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
terminal_node = TerminalAstNode(self.first_token)
non_terminal_node = self.node_factory(
current_indent,
current_line_length + len(self.first_token),
inside_meta_type,
self.first_token == 'Type',
accept_single_line_only)
if non_terminal_node is None:
return None
return NonTerminalAstNode((terminal_node, non_terminal_node))
def p_balanced_string_terminal(p):
'''balanced_string : DOUBLE_COLON balanced_string
| IDENTIFIER optional_balanced_string
| ASTERISK optional_balanced_string
| AMPERSAND optional_balanced_string
'''
first_token = p[1]
node_factory = p[2]
p[0] = BalancedStringTerminalNodeFactory(first_token, node_factory)
def create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, accept_single_line_only):
nodes = []
for node_factory, current_indent, inside_meta_type in node_factory_inside_meta_type_pairs:
node = node_factory(current_indent, current_line_length, inside_meta_type, False, accept_single_line_only)
if node is None:
return None
nodes.append(node)
if node.is_multiline:
if accept_single_line_only:
raise Exception('Unexpected multiline, due to factory: ' + node_factory)
# Note that due to the way we break lines, the last line will have the same indent as the first.
# So we don't need to update current_indent here.
current_line_length = node.last_line_length
else:
current_line_length += node.last_line_length
return NonTerminalAstNode(nodes)
def compute_layout(left_token, intermediate_node_factories, right_token, rhs_node_factory, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
# We lay out the result in one of two ways:
#
# $previousIndent $previousContent LPAREN x1, x2, x3 RPAREN balanced_string
#
# Or:
#
# $previousIndent $previousContent LPAREN
# $previousIndent $indent x1 ,
# $previousIndent $indent x2 ,
# $previousIndent $indent x3 RPAREN balanced_string
entering_meta_type = last_token_was_type_wrapper
# First, we try to use the first format if possible
node_factory_inside_meta_type_pairs = [
(TerminalNodeFactory(left_token), current_indent, inside_meta_type),
*((intermediate_node_factory, current_indent, (inside_meta_type or entering_meta_type))
for intermediate_node_factory in intermediate_node_factories),
(TerminalNodeFactory(right_token), current_indent, inside_meta_type),
(rhs_node_factory, current_indent, inside_meta_type),
]
node_with_single_line_layout = create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, True)
if node_with_single_line_layout is not None and node_with_single_line_layout.max_line_length <= max_line_length:
assert not node_with_single_line_layout.is_multiline
return node_with_single_line_layout
if accept_single_line_only:
return None
# The result exceeds the line length, let's switch to the second one.
node_factory_inside_meta_type_pairs = [
(TerminalNodeFactory(left_token),
current_indent,
inside_meta_type)
]
new_indent_length = current_indent + single_indent_length
comma_node_factory_inside_meta_type_pair = (TerminalNodeFactory(','), current_indent, inside_meta_type or entering_meta_type)
newline_node_factory_inside_meta_type_pair = (TerminalNodeFactory('\n'), current_indent, inside_meta_type or entering_meta_type)
indent_node_factory_inside_meta_type_pair = (TerminalNodeFactory(' ' * new_indent_length), current_indent, inside_meta_type or entering_meta_type)
for inner_node_factory in intermediate_node_factories:
node_factory_inside_meta_type_pairs.append(newline_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.append(indent_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.append((inner_node_factory, new_indent_length, inside_meta_type or entering_meta_type))
node_factory_inside_meta_type_pairs.append(comma_node_factory_inside_meta_type_pair)
node_factory_inside_meta_type_pairs.pop()
node_factory_inside_meta_type_pairs.append((TerminalNodeFactory(right_token), current_indent, inside_meta_type))
node_factory_inside_meta_type_pairs.append((rhs_node_factory, current_indent, inside_meta_type))
return create_composite_node_from_factories(node_factory_inside_meta_type_pairs, current_line_length, accept_single_line_only)
def p_balanced_string_with_balanced_token_no_comma_separated_elems(p):
'''balanced_string : LPAREN RPAREN optional_balanced_string
| LBRACKET RBRACKET optional_balanced_string
| LBRACE RBRACE optional_balanced_string
| LESS_THAN GREATER_THAN optional_balanced_string
'''
p_1 = p[1]
p_2 = p[2]
p_3 = p[3]
def result(current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
return compute_layout(p_1, [], p_2, p_3, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only)
p[0] = result
def p_balanced_string_with_balanced_token_some_comma_separated_elems(p):
'''balanced_string : LPAREN balanced_string comma_separated_balanced_string RPAREN optional_balanced_string
| LBRACKET balanced_string comma_separated_balanced_string RBRACKET optional_balanced_string
| LBRACE balanced_string comma_separated_balanced_string RBRACE optional_balanced_string
| LESS_THAN balanced_string comma_separated_balanced_string GREATER_THAN optional_balanced_string
'''
p_1 = p[1]
p_2 = p[2]
p_3 = p[3]
p_4 = p[4]
p_5 = p[5]
def result(current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only):
if not inside_meta_type:
if p_1 == '(' and p_4 == ')':
if len(p_3) == 0:
if isinstance(p_2, BalancedStringTerminalNodeFactory) and p_2.first_token == '*':
if isinstance(p_2.node_factory, TerminalNodeFactory) and p_2.node_factory.s == '':
# Special case: we're not inside a Type<...> and we've encountered a '(*)'.
# Discard it and just print the rhs.
return p_5(current_indent, current_line_length, inside_meta_type, False, accept_single_line_only)
return compute_layout(p_1, (p_2, *(p_3)), p_4, p_5, current_indent, current_line_length, inside_meta_type, last_token_was_type_wrapper, accept_single_line_only)
p[0] = result
def p_error(p):
raise Exception("Syntax error when parsing meta type: ", p[:])
lexer = lex.lex()
parser = yacc.yacc(start='balanced_string')
strings_to_remove = re.compile(r'template class |template type alias |function template specialization |member class |member function |default argument for |fruit::impl::meta::|fruit::impl::|fruit::')
def do_simplify_template_trace_element(element):
element, _ = re.subn(strings_to_remove, '', element)
element = element.strip()
if element[0] != '\'' or element[-1] != '\'':
raise Exception('Expected single quotes in: ' + element)
element = element[1:-1]
if element.startswith('DoEval<') and element[-1] == '>':
element = element[7:-1]
result = ''.join(parser.parse(element, lexer)(0, 0, False, False, False))
return result
@memoize(maxsize=1000)
def simplify_template_trace_element(element, executor):
return executor.submit(do_simplify_template_trace_element, element)
def to_dot_left_justified_string(s):
return '\\l'.join(s.splitlines() + [''])
def main():
diagnostics = []
with futures.ProcessPoolExecutor() as executor:
lines = sys.stdin.readlines()
for line_number, line in enumerate(lines):
# Remove the newline
line = line[:-1]
matches = in_file_included_from_pattern.search(line)
if matches:
continue
matches = diagnostic_header_pattern.search(line)
if matches:
diagnostic_kind, diagnostic_message = matches.groups()
if diagnostic_kind == 'error':
diagnostics.append(Diagnostic(diagnostic_kind, diagnostic_message))
print('Processing diagnostic. (%s / %s) ' % (line_number, len(lines)), file=sys.stderr)
elif diagnostic_kind == 'note':
matches = in_instantiation_of_template_pattern.search(diagnostic_message)
if matches:
if not diagnostics:
raise Exception('Found template instantiation note before any error diagnostic: %s' % diagnostic_message)
if 'in instantiation of template type alias' in line:
pass
else:
group = matches.groups()[0]
trace_element_future = simplify_template_trace_element(group, executor)
diagnostics[-1].template_instantiation_trace.append(trace_element_future)
continue
matches = static_warning_marked_deprecated_here_pattern.search(diagnostic_message)
if matches:
continue
raise Exception('Found unknown note: %s' % diagnostic_message)
call_graph = {}
graph = gv.AGraph(directed=True)
for diagnostic_index, diagnostic in enumerate(diagnostics):
if diagnostic_index % 10 == 0:
print('Constructing dep graph: iteration %s/%s' % (diagnostic_index, len(diagnostics)), file=sys.stderr)
template_instantiation_trace = [trace_element_future.result() for trace_element_future in diagnostic.template_instantiation_trace]
for called, caller in zip(template_instantiation_trace[1:], template_instantiation_trace[2:]):
if called in call_graph and call_graph[called] != caller:
# Avoid this edge, so that the resulting graph is a tree
continue
graph.add_edge(to_dot_left_justified_string(caller), to_dot_left_justified_string(called))
call_graph[called] = caller
print(graph)
if __name__ == '__main__':
main()
|
|
from __future__ import division
import os, sys
import logging
import logging.handlers
import httplib, httplib2
import urllib
import re
import base64
from xml.dom import minidom
import time
from collections import deque
import shutil
import pprint
import base64
import threading
import copy
# The max number of threads setup for HTTP type outputs
MAX_WORKERS = 5
# This is used only for the HTTP output outputModes
# This allows us to iowait while we keep on generating events
# in the background
class Worker(threading.Thread):
func = None
queue = None
running = None
def __init__(self, func, queue):
self.func = func
self.queue = queue
self.running = False
threading.Thread.__init__(self)
def run(self):
self.running = True
try:
self.func(self.queue)
except:
self.running = False
self.running = False
sys.exit(0)
class Output:
"""Output events, abstracting output method"""
_app = None
_sample = None
_c = None
_outputMode = None
_spoolDir = None
_spoolFile = None
_workingFilePath = None
_workingFH = None
_fileName = None
_fileMaxBytes = None
_fileBackupFiles = None
_fileLogger = None
_sessionKey = None
_splunkHost = None
_splunkPort = None
_splunkMethod = None
_splunkUser = None
_splunkPass = None
_splunkUrl = None
_splunkhttp = None
_index = None
_source = None
_sourcetype = None
_host = None
_hostRegex = None
_projectID = None
_accessToken = None
_workers = None
validOutputModes = ['spool', 'file', 'splunkstream']
validSplunkMethods = ['http', 'https']
# Queue of outputs. Will be sent to host when flush() is called
_queue = None
def __init__(self, sample):
from eventgenconfig import Config
self._c = Config()
self._app = sample.app
self._sample = sample.name
self._outputMode = sample.outputMode
self._queue = deque([])
self._workers = [ ]
# Logger already setup by config, just get an instance
logger = logging.getLogger('eventgen')
globals()['logger'] = logger
if self._outputMode in ('splunkstream', 'stormstream'):
self._index = sample.index
self._source = sample.source
self._sourcetype = sample.sourcetype
self._host = sample.host
self._hostRegex = sample.hostRegex
if self._outputMode == 'spool':
self._spoolDir = sample.pathParser(sample.spoolDir)
self._spoolFile = sample.spoolFile
elif self._outputMode == 'file':
if sample.fileName == None:
logger.error('outputMode file but file not specified for sample %s' % self._sample)
raise ValueError('outputMode file but file not specified for sample %s' % self._sample)
self._file = sample.fileName
self._fileMaxBytes = sample.fileMaxBytes
self._fileBackupFiles = sample.fileBackupFiles
# 9/7/12 Replacing python logging with our own logging handler code
# self._fileLogger = logging.getLogger('eventgen_realoutput_'+self._file)
# formatter = logging.Formatter('%(message)s')
# handler = logging.handlers.RotatingFileHandler(filename=self._file, maxBytes=self._fileMaxBytes,
# backupCount=self._fileBackupFiles)
# handler.setFormatter(formatter)
# self._fileLogger.addHandler(handler)
# self._fileLogger.setLevel(logging.DEBUG)
self._fileHandle = open(self._file, 'a')
self._fileLength = os.stat(self._file).st_size
logger.debug("Configured to log to '%s' with maxBytes '%s' with backupCount '%s'" % \
(self._file, self._fileMaxBytes, self._fileBackupFiles))
elif self._outputMode == 'splunkstream':
if self._c.splunkEmbedded:
try:
import splunk.auth
self._splunkUrl = splunk.auth.splunk.getLocalServerInfo()
results = re.match('(http|https)://([^:/]+):(\d+).*', self._splunkUrl)
self._splunkMethod = results.groups()[0]
self._splunkHost = results.groups()[1]
self._splunkPort = results.groups()[2]
except:
import traceback
trace = traceback.format_exc()
logger.error('Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s. Stacktrace: %s' % (self._sample, trace))
raise ValueError('Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s' % self._sample)
else:
if sample.splunkHost == None:
logger.error('outputMode splunkstream but splunkHost not specified for sample %s' % self._sample)
raise ValueError('outputMode splunkstream but splunkHost not specified for sample %s' % self._sample)
elif sample.splunkHost == '[':
try:
sample.splunkHost = json.loads(sample.splunkHost)
except:
logger.error('splunkHost configured as JSON, but unparseable for sample %s' % self._sample)
raise ValueError('splunkHost configured as JSON, but unparseable for sample %s' % self._sample)
if sample.splunkUser == None:
logger.error('outputMode splunkstream but splunkUser not specified for sample %s' % self._sample)
raise ValueError('outputMode splunkstream but splunkUser not specified for sample %s' % self._sample)
if sample.splunkPass == None:
logger.error('outputMode splunkstream but splunkPass not specified for sample %s' % self._sample)
raise ValueError('outputMode splunkstream but splunkPass not specified for sample %s' % self._sample)
self._splunkHost = sample.splunkHost
self._splunkPort = sample.splunkPort
self._splunkMethod = sample.splunkMethod
self._splunkUser = sample.splunkUser
self._splunkPass = sample.splunkPass
self._splunkUrl = '%s://%s:%s' % (self._splunkMethod, self._splunkHost, self._splunkPort)
try:
myhttp = httplib2.Http(disable_ssl_certificate_validation=True)
response = myhttp.request(self._splunkUrl + '/services/auth/login', 'POST',
headers = {}, body=urllib.urlencode({'username': self._splunkUser,
'password': self._splunkPass}))[1]
self._c.sessionKey = minidom.parseString(response).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
except:
logger.error('Error getting session key for non-SPLUNK_EMBEEDED for sample %s' % self._sample)
raise IOError('Error getting session key for non-SPLUNK_EMBEEDED for sample %s' % self._sample)
logging.debug("Retrieved session key '%s' for Splunk session for sample %s'" % (self._c.sessionKey, self._sample))
elif self._outputMode == 'stormstream':
self._accessToken = sample.accessToken
self._projectID = sample.projectID
logger.debug("Output init completed. Output: %s" % self)
def __str__(self):
"""Only used for debugging, outputs a pretty printed representation of this output"""
# Eliminate recursive going back to parent
temp = dict([ (key, value) for (key, value) in self.__dict__.items() if key != '_c'])
# return pprint.pformat(temp)
return ""
def __repr__(self):
return self.__str__()
def send(self, msg):
"""Queues a message for output to configured outputs"""
if self._outputMode in ('splunkstream', 'stormstream'):
self._queue.append({'_raw': msg, 'index': self._index,
'source': self._source, 'sourcetype': self._sourcetype,
'host': self._host, 'hostRegex': self._hostRegex})
else:
self._queue.append({'_raw': msg})
if self._outputMode in ('splunkstream', 'stormstream') and len(self._queue) > 1000:
self.flush()
elif len(self._queue) > 10:
self.flush()
def refreshconfig(self, sample):
"""Refreshes output config based on sample"""
if self._outputMode in ('splunkstream', 'stormstream'):
self._index = sample.index
self._source = sample.source
self._sourcetype = sample.sourcetype
self._host = sample.host
self._hostRegex = sample.hostRegex
logger.debug("Refreshed config. Set Index '%s': Source '%s': Sourcetype: '%s' Host: '%s' HostRegex: '%s'" % \
(self._index, self._source, self._sourcetype, self._host, self._hostRegex))
def flush(self, force=False):
"""Flushes output from the queue out to the specified output"""
# Force a flush with a queue bigger than 1000, or unless forced
if (len(self._queue) >= 1000 or (force and len(self._queue) > 0)) \
and self._outputMode in ('splunkstream', 'stormstream'):
# For faster processing, we need to break these up by source combos
# so they'll each get their own thread.
# Fixes a bug where we're losing source and sourcetype with bundlelines type transactions
queues = { }
for row in self._queue:
if row['source'] is None:
row['source'] = ''
if row['sourcetype'] is None:
row['sourcetype'] = ''
if not row['source']+'_'+row['sourcetype'] in queues:
queues[row['source']+'_'+row['sourcetype']] = deque([])
# logger.debug("Queues setup: %s" % pprint.pformat(queues))
m = self._queue.popleft()
while m:
queues[m['source']+'_'+m['sourcetype']].append(m)
try:
m = self._queue.popleft()
except IndexError:
m = False
logger.debug("Creating workers, limited to %s" % MAX_WORKERS)
for k, v in queues.items():
# Trying to limit to MAX_WORKERS
w = Worker(self._flush, v)
for i in xrange(0, len(self._workers)):
if not self._workers[i].running:
del self._workers[i]
break
while len(self._workers) > MAX_WORKERS:
logger.info("Waiting for workers, limited to %s" % MAX_WORKERS)
for i in xrange(0, len(self._workers)):
if not self._workers[i].running:
del self._workers[i]
break
time.sleep(0.5)
self._workers.append(w)
w.start()
elif (len(self._queue) >= 1000 or (force and len(self._queue) > 0)) \
and self._outputMode in ('spool'):
q = copy.deepcopy(self._queue)
self._queue.clear()
self._flush(q)
elif self._outputMode in ('file'):
# q = copy.deepcopy(self._queue)
# self._queue.clear()
# self._flush(q)
self._flush(self._queue)
# w = Worker(self._flush, q)
# w.start()
# 9/15/12 CS Renaming to internal function and wrapping with a future
def _flush(self, queue):
"""Internal function which does the real flush work"""
splunkhttp = None
if len(queue) > 0:
streamout = ""
# SHould now be getting a different output thread for each source
# So therefore, look at the first message in the queue, set based on that
# and move on
metamsg = queue.popleft()
msg = metamsg['_raw']
try:
index = metamsg['index']
source = metamsg['source']
sourcetype = metamsg['sourcetype']
host = metamsg['host']
hostRegex = metamsg['hostRegex']
except KeyError:
pass
logger.debug("Flushing output for sample '%s' in app '%s' for queue '%s'" % (self._sample, self._app, self._source))
if self._outputMode == 'spool':
nowtime = int(time.mktime(time.gmtime()))
workingfile = str(nowtime) + '-' + self._sample + '.part'
self._workingFilePath = os.path.join(self._c.greatgrandparentdir, self._app, 'samples', workingfile)
logger.debug("Creating working file '%s' for sample '%s' in app '%s'" % (workingfile, self._sample, self._app))
self._workingFH = open(self._workingFilePath, 'w')
#elif self._outputMode == 'splunkstream':
# try:
# if self._splunkMethod == 'https':
# connmethod = httplib.HTTPSConnection
# else:
# connmethod = httplib.HTTPConnection
# splunkhttp = connmethod(self._splunkHost, self._splunkPort)
# splunkhttp.connect()
# urlparms = [ ]
# if index != None:
# urlparms.append(('index', index))
# if source != None:
# urlparms.append(('source', source))
# if sourcetype != None:
# urlparms.append(('sourcetype', sourcetype))
# if hostRegex != None:
# urlparms.append(('host_regex', hostRegex))
# elif host != None:
# urlparms.append(('host', host))
# url = '/services/receivers/simple?%s' % (urllib.urlencode(urlparms))
# splunkhttp.putrequest("POST", url)
# splunkhttp.putheader("Authorization", "Splunk %s" % self._c.sessionKey)
#splunkhttp.putheader("x-splunk-input-mode", "streaming")
# splunkhttp.endheaders()
# except httplib.HTTPException, e:
# logger.error('Error connecting to Splunk for logging for sample %s. Exception "%s" Config: %s' % (self._sample, e.args, self))
# raise IOError('Error connecting to Splunk for logging for sample %s' % self._sample)
try:
while msg:
if self._outputMode == 'spool':
self._workingFH.write(msg)
elif self._outputMode == 'file':
# # 5/9/12 CS We log as error so that even the most restrictive
# # filter will push to file
# if msg[-1] == '\n':
# msg = msg[:-1]
# self._fileLogger.error(msg)
if msg[-1] != '\n':
msg += '\n'
self._fileHandle.write(msg)
self._fileLength += len(msg)
# If we're at the end of the max allowable size, shift all files
# up a number and create a new one
if self._fileLength > self._fileMaxBytes:
self._fileHandle.flush()
self._fileHandle.close()
if os.path.exists(self._file+'.'+str(self._fileBackupFiles)):
logger.debug('File Output: Removing file: %s' % self._file+'.'+str(self._fileBackupFiles))
os.unlink(self._file+'.'+str(self._fileBackupFiles))
for x in range(1, self._fileBackupFiles)[::-1]:
logger.debug('File Output: Checking for file: %s' % self._file+'.'+str(x))
if os.path.exists(self._file+'.'+str(x)):
logger.debug('File Output: Renaming file %s to %s' % (self._file+'.'+str(x), self._file+'.'+str(x+1)))
os.rename(self._file+'.'+str(x), self._file+'.'+str(x+1))
os.rename(self._file, self._file+'.1')
self._fileHandle = open(self._file, 'w')
self._fileLength = 0
#elif self._outputMode == 'splunkstream':
# if msg[-1] != '\n':
# msg += '\n'
# logger.debug("Sending %s to self._splunkhttp" % msg)
# splunkhttp.send(msg)
elif self._outputMode in ('splunkstream', 'stormstream'):
streamout += msg
msg = queue.popleft()['_raw']
logger.debug("Queue for app '%s' sample '%s' written" % (self._app, self._sample))
except IndexError:
logger.debug("Queue for app '%s' sample '%s' written" % (self._app, self._sample))
else:
streamout = ""
# Cleanup after writing queue
if self._outputMode == 'spool':
## Move file to spool
self._workingFH.close()
spoolPath = self._spoolDir + os.sep + self._spoolFile
logger.debug("Moving '%s' to '%s' for sample '%s' in app '%s'" % (self._workingFilePath, spoolPath, self._sample, self._app))
if os.path.exists(self._workingFilePath):
if os.path.exists(spoolPath):
os.system("cat %s >> %s" % (self._workingFilePath, spoolPath))
os.remove(self._workingFilePath)
else:
shutil.move(self._workingFilePath, spoolPath)
else:
logger.error("File '%s' missing" % self._workingFilePath)
elif self._outputMode == 'file':
if not self._fileHandle.closed:
self._fileHandle.flush()
#elif self._outputMode == 'splunkstream':
# #logger.debug("Closing self._splunkhttp connection")
# logger.debug("POSTing to url %s on %s://%s:%s with sessionKey %s" \
# % (url, self._splunkMethod, self._splunkHost, self._splunkPort, self._c.sessionKey))
# splunkhttp.request("POST", url, streamout, headers)
# response = splunkhttp.getresponse()
# data = response.read()
# logger.debug("Response: %s Data returned %s" % (response, data))
# splunkhttp.close()
# splunkhttp = None
elif self._outputMode in ('splunkstream', 'stormstream'):
if len(streamout) > 0:
try:
if self._outputMode == 'splunkstream':
if self._splunkMethod == 'https':
connmethod = httplib.HTTPSConnection
else:
connmethod = httplib.HTTPConnection
self._splunkhttp = connmethod(self._splunkHost, self._splunkPort)
else:
self._splunkhttp = httplib.HTTPSConnection('api.splunkstorm.com', 443)
urlparms = [ ]
if self._source != None:
urlparms.append(('source', self._source))
if self._sourcetype != None:
urlparms.append(('sourcetype', self._sourcetype))
if self._host != None:
urlparms.append(('host', self._host))
if self._outputMode == 'splunkstream':
if index != None:
urlparms.append(('index', index))
url = '/services/receivers/simple?%s' % (urllib.urlencode(urlparms))
headers = {'Authorization': "Splunk %s" % self._c.sessionKey }
else:
if self._projectID != None:
urlparms.append(('project', self._projectID))
url = '/1/inputs/http?%s' % (urllib.urlencode(urlparms))
headers = {'Authorization': "Basic %s" % base64.b64encode(':'+self._accessToken)}
self._splunkhttp.request("POST", url, streamout, headers)
logger.debug("POSTing to url %s on %s://%s:%s with sessionKey %s" \
% (url, self._splunkMethod, self._splunkHost, self._splunkPort, self._c.sessionKey))
#logger.debug("POSTing to url %s on https://api.splunkstorm.com with accessToken %s" \
# % (url, base64.b64encode(self._accessToken+':')))
except httplib.HTTPException:
logger.error('Error connecting to Splunk for logging for sample %s' % self._sample)
raise IOError('Error connecting to Splunk for logging for sample %s' % self._sample)
try:
response = self._splunkhttp.getresponse()
data = response.read()
if response.status != 200:
logger.error("Data not written to Splunk. Splunk returned %s" % data)
self._splunkhttp.close()
self._splunkhttp = None
except httplib.BadStatusLine:
logger.error("Received bad status from Storm for sample '%s'" % self._sample)
|
|
#!/usr/bin/python3
# Copyright (c) 2017 Johannes Leupolz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio, gbulb
from pydbus import SessionBus
from pydbus import SystemBus
import unittest
from pydbus.generic import signal
from gi.repository import GLib, Gio
from core import BluetoothAudioBridge
class TestFakeObjectManager():
# org.freedesktop.DBus.ObjectManager.GetManagedObjects (out DICT<OBJPATH,DICT<STRING,DICT<STRING,VARIANT>>> objpath_interfaces_and_properties);
# org.freedesktop.DBus.ObjectManager.InterfacesAdded (OBJPATH object_path, DICT<STRING,DICT<STRING,VARIANT>> interfaces_and_properties);
# org.freedesktop.DBus.ObjectManager.InterfacesRemoved (OBJPATH object_path, ARRAY<STRING> interfaces);
dbus="""
<node>
<interface name='org.freedesktop.DBus.ObjectManager'>
<method name='GetManagedObjects'>
<arg type='a{oa{sa{sv}}}' name='objpath_interfaces_and_properties' direction='out'/>
</method>
<signal name="InterfacesAdded">
<arg direction="out" type="o" name="object_path"/>
<arg direction="out" type="a{sa{sv}}" name="interfaces_and_properties"/>
</signal>
<signal name="InterfacesRemoved">
<arg direction="out" type="o" name="object_path"/>
<arg direction="out" type="as" name="interfaces"/>
</signal>
</interface>
</node>"""
def GetManagedObjects(self):
"""get managed objects"""
print("get managed objects")
result = {}
for path,obj in self.objects.items():
resObj = {}
node_info = type(obj).dbus
node_info = Gio.DBusNodeInfo.new_for_xml(node_info)
interfaces = node_info.interfaces
for interface in interfaces:
resInterface={}
for p in interface.properties:
pvalue = getattr(obj,p.name)
pvalueVariant=GLib.Variant(p.signature,pvalue)
if pvalueVariant==None:
print("could not convert value "+str(pvalue)+" of "+p.name+"("+ str(type(pvalue)) +")to a variant")
else:
resInterface[p.name]=pvalueVariant
resObj[interface.name]=resInterface
result[path]=resObj
return result
def __init__(self,bus):
self.bus=bus
self.objects = {}
self.registrations = {}
def export(self,path,obj):
newRegistration=self.bus.register_object(path,obj,None)
self.objects[path]=obj
self.registrations[path]=newRegistration
return newRegistration
def unexport(self,path):
self.registrations[path].unregister()
self.registrations.pop(path)
self.objects.pop(path)
PropertiesChanged = signal()
InterfacesAdded = signal()
InterfacesRemoved = signal()
class TestFakeDbusBluezAdapter():
dbus="""
<node>
<interface name='BluetoothAudioBridge.FakeDbusObject.Adapter1'>
<method name='StartDiscovery'>
</method>
<method name='StopDiscovery'>
</method>
<property name="Address" type="s" access="read">
</property>
</interface>
</node>"""
def StartDiscovery(self):
"""start discovery"""
print("startDiscovery")
self.fakes.TestResult=self.fakes.TestResult+1
def StopDiscovery(self):
"""stop discovery"""
print("stopDiscovery")
self.fakes.TestResult=self.fakes.TestResult+2
def __init__(self,fakes):
self._address = "initial value"
self.fakes=fakes
@property
def Address(self):
return self._address
@Address.setter
def Address(self, value):
self._address = value
self.PropertiesChanged("BluetoothAudioBridge.FakeDbusObject.Adapter1", {"Address": self._address}, [])
PropertiesChanged = signal()
class TestFakeDbusBluezDevice():
dbus="""
<node>
<interface name='BluetoothAudioBridge.FakeDbusObject.Device1'>
<method name='Connect'>
</method>
<method name='Disconnect'>
</method>
<method name='Pair'>
</method>
<property name="Connected" type="b" access="read">
</property>
<property name="Trusted" type="b" access="readwrite">
</property>
<property name="UUIDs" type="as" access="read">
</property>
</interface>
</node>"""
def Connect(self):
"""connect"""
print("connect")
self.fakes.TestResult=self.fakes.TestResult+1
def Disconnect(self):
"""disconnect"""
print("disconnect")
self.fakes.TestResult=self.fakes.TestResult+2
def Pair(self):
"""pair"""
print("pair")
self.fakes.TestResult=self.fakes.TestResult+4
def __init__(self,fakes):
self._address = "initial value"
self._connected = False
self._trusted = False
self._uuids = ['0000110b-0000-1000-8000-00805f9b34fb']
self.fakes=fakes
@property
def Address(self):
return self._address
@Address.setter
def Address(self, value):
self._address = value
self.PropertiesChanged("BluetoothAudioBridge.FakeDbusObject.Device1", {"Address": self._address}, [])
@property
def Connected(self):
return self._connected
@Connected.setter
def Connected(self, value):
print("dummy-connection: "+str(value))
self._connected = value
self.PropertiesChanged("BluetoothAudioBridge.FakeDbusObject.Device1", {"Connected": self._connected}, [])
@property
def Trusted(self):
#self.fakes.TestResult=self.fakes.TestResult+8
return self._trusted
@Trusted.setter
def Trusted(self, value):
self._trusted = value
self.PropertiesChanged("BluetoothAudioBridge.FakeDbusObject.Device1", {"Trusted": self._trusted}, [])
@property
def UUIDs(self):
return self._uuids
@UUIDs.setter
def UUIDs(self, value):
self._uuids = value
self.PropertiesChanged("BluetoothAudioBridge.FakeDbusObject.Device1", {"UUIDs": self._uuids}, [])
PropertiesChanged = signal()
class TestFakeMethods():
def __init__(self,bluetoothAudioBridge):
self.TestResult = 0
self.bluetoothAudioBridge=bluetoothAudioBridge
self.bluetoothAudioBridge.DbusBluezBusName = "BluetoothAudioBridge.FakeDbusObject"
self.bluetoothAudioBridge.DbusBluezObjectPath = "/BluetoothAudioBridge/FakeDbusObject/hci0"
self.bluetoothAudioBridge.PollingCycle = 1
self.fakeDbusDevices = []
self.fakeDbusAdapter = None
self.fakeDbusAdapterRegistration = None
self.fakeDbusObjectManager = None
self.fakeDbusObjectManagerRegistration = None
self.bus = None
self.bluetoothAudioBridge.DbusBluezOnSystemBus=False
self.busName = None
def callerWithOneParameterWasCalled(self):
def methodCall(parameter):
print("parameter "+parameter)
#print(self.TestResult)
self.TestResult=self.TestResult+1
return methodCall
def callerWithOneParameterWasCalledAsync(self):
async def methodCall(parameter):
print("parameter "+parameter)
self.TestResult=self.TestResult+1
return methodCall
async def unexportAllDevices(self):
if self.fakeDbusObjectManager:
for name,obj in self.fakeDbusDevices:
self.fakeDbusObjectManager.unexport(name)
self.fakeDbusDevices=[]
#if self.fakeDbusDevice:
# self.fakeDbusDevice.unregister()
#if self.fakeDbusObjectManager:
# self.fakeDbusObjectManager.unregister()
async def unexportDevice(self,path):
self.fakeDbusObjectManager.unexport(path)
self.fakeDbusDevices.remove(path)
async def startTestFakeDbusBluez(self):
if not self.bus:
self.bus = SessionBus()
await self.unexportAllDevices()
if self.busName:
busName.unown()
if self.fakeDbusAdapterRegistration:
self.fakeDbusAdapterRegistration.unregister()
self.fakeDbusAdapterRegistration=None
if self.fakeDbusObjectManagerRegistration:
self.fakeDbusObjectManagerRegistration.unregister()
self.fakeDbusObjectManagerRegistration=None
await asyncio.sleep(0.5)
prefix = "/"+ self.bluetoothAudioBridge.DbusBluezBusName.replace(".","/")
self.fakeDbusObjectManager = TestFakeObjectManager(self.bus)
self.fakeDbusAdapter = TestFakeDbusBluezAdapter(self)
self.fakeDbusObjectManagerRegistration=self.bus.register_object("/",self.fakeDbusObjectManager,None)
self.fakeDbusAdapterRegistration=self.fakeDbusObjectManager.export(prefix+"/hci0",self.fakeDbusAdapter)
self.busName=self.bus.request_name(self.bluetoothAudioBridge.DbusBluezBusName)
async def exportNewDevice(self,name):
prefix = "/"+ self.bluetoothAudioBridge.DbusBluezBusName.replace(".","/")
self.fakeDbusDevice = TestFakeDbusBluezDevice(self)
result = (prefix+"/hci0/dev_"+name,self.fakeDbusDevice)
self.fakeDbusObjectManager.export(result[0],result[1])
self.fakeDbusDevices.append(result)
return result
async def stopTestFakeDbusBluez(self):
await self.unexportAllDevices()
if (self.fakeDbusObjectManagerRegistration):
self.fakeDbusObjectManagerRegistration.unregister()
if (self.fakeDbusAdapterRegistration):
self.fakeDbusAdapterRegistration.unregister()
if self.busName:
self.busName.unown()
self.busName = None
self.fakeDbusDevices = []
self.fakeDbusObject = None
self.fakeDbusObjectManager = None
self.fakeDbusObjectManagerRegistration=None
self.fakeDbusAdapter = None
self.fakeDbusAdapterRegistration=None
self.bus = None
await asyncio.sleep(0.5)
async def cancelIn2Seconds(self):
await asyncio.sleep(2)
self.bluetoothAudioBridge.CancellationToken.set_result(True)
async def setResultInXSecondsCancelable(self,time):
(finished,result) = await self.bluetoothAudioBridge.awaitOrStop(asyncio.sleep(time))
if finished:
print("set Result to true")
self.TestResult=1
class TestBridge(unittest.TestCase):
def setUp(self):
gbulb.install()
self.loop=asyncio.get_event_loop()
self.bluetoothAudioBridge=BluetoothAudioBridge(self.loop)
self.fakes=TestFakeMethods(self.bluetoothAudioBridge)
def atest_startFakeObjectManager(self):
self.loop.run_until_complete(self.fakes.startTestFakeDbusBluez())
self.loop.run_until_complete(self.fakes.exportNewDevice("dev_aa_12_00_41_aa_01"))
self.loop.run_until_complete(asyncio.sleep(30))
self.loop.run_until_complete(self.fakes.stopTestFakeDbusBluez())
def test_detectMockedBluetoothDevice(self):
self.bluetoothAudioBridge.dbusBtDeviceDetected=self.fakes.callerWithOneParameterWasCalledAsync()
self.loop.run_until_complete(self.fakes.startTestFakeDbusBluez())
self.loop.run_until_complete(self.bluetoothAudioBridge.registerDbus())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.fakes.exportNewDevice("aa_12_00_41_aa_01"))
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.bluetoothAudioBridge.unregister())
self.loop.run_until_complete(self.fakes.stopTestFakeDbusBluez())
self.assertEqual(self.fakes.TestResult,1)
def test_removeMockedBluetoothDevice(self):
self.bluetoothAudioBridge.dbusBtDeviceRemoved=self.fakes.callerWithOneParameterWasCalledAsync()
self.loop.run_until_complete(self.fakes.startTestFakeDbusBluez())
self.loop.run_until_complete(self.bluetoothAudioBridge.registerDbus())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.fakes.exportNewDevice("aa_12_00_41_aa_01"))
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.fakes.unexportAllDevices())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.bluetoothAudioBridge.unregister())
self.loop.run_until_complete(self.fakes.stopTestFakeDbusBluez())
self.assertEqual(self.fakes.TestResult,1)
def test_detectMockedBluetoothDeviceConnection(self):
self.bluetoothAudioBridge.dbusBtDeviceConnected=self.fakes.callerWithOneParameterWasCalledAsync()
self.loop.run_until_complete(self.fakes.startTestFakeDbusBluez())
self.loop.run_until_complete(self.bluetoothAudioBridge.registerDbus())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.fakes.exportNewDevice("aa_12_00_41_aa_01"))
devicepath,deviceobj=self.fakes.fakeDbusDevices[0]
deviceobj.Connected=True
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.bluetoothAudioBridge.unregister())
self.loop.run_until_complete(self.fakes.stopTestFakeDbusBluez())
self.assertEqual(self.fakes.TestResult,1)
def test_detectMockedBluetoothDeviceDisconnection(self):
self.bluetoothAudioBridge.dbusBtDeviceDisconnected=self.fakes.callerWithOneParameterWasCalledAsync()
self.loop.run_until_complete(self.fakes.startTestFakeDbusBluez())
self.loop.run_until_complete(self.bluetoothAudioBridge.registerDbus())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.fakes.exportNewDevice("aa_12_00_41_aa_01"))
devicepath,deviceobj=self.fakes.fakeDbusDevices[0]
deviceobj.Connected=True
self.loop.run_until_complete(asyncio.sleep(2))
deviceobj.Connected=False
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.bluetoothAudioBridge.unregister())
self.loop.run_until_complete(self.fakes.stopTestFakeDbusBluez())
self.assertEqual(self.fakes.TestResult,1)
def atest_listMockedDbusEntriesOnScanMessage(self):
self.loop.run_until_complete(self.fakes.startFakeBroker())
self.loop.run_until_complete(self.bluetoothAudioBridge.registerMqtt())
self.loop.run_until_complete(asyncio.sleep(2))
self.loop.run_until_complete(self.bluetoothAudioBridge.unregister())
self.loop.run_until_complete(self.fakes.stopFakeBroker())
#self.assertTrue(self.fakes.BridgeWorks)
def atest_awaitOrStop1(self):
asyncio.ensure_future(self.fakes.cancelIn2Seconds())
asyncio.ensure_future(self.fakes.setResultInXSecondsCancelable(3))
self.loop.run_until_complete(asyncio.sleep(4))
self.assertEqual(self.fakes.TestResult,0)
def atest_awaitOrStop2(self):
asyncio.ensure_future(self.fakes.cancelIn2Seconds())
asyncio.ensure_future(self.fakes.setResultInXSecondsCancelable(1))
self.loop.run_until_complete(asyncio.sleep(4))
self.assertEqual(self.fakes.TestResult,1)
if __name__ == '__main__':
unittest.main()
|
|
import asyncio
import logging
import threading
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog, messagebox
import webbrowser
import json
from modis.tools import data, config
logger = logging.getLogger(__name__)
class Frame(ttk.Frame):
"""A tab containing the core controls of the bot"""
def __init__(self, parent):
"""Create the frame.
Args:
parent: A tk or ttk object.
"""
super(Frame, self).__init__(parent, padding=8)
# Add elements
info = self.Info(self)
control = self.Control(self)
log = self.Log(self)
# Grid elements
info.grid(column=0, row=0, padx=8, pady=8, stick="W E N S")
control.grid(column=1, row=0, padx=8, pady=8, sticky="W E N S")
log.grid(column=0, columnspan=2, row=1, padx=8, pady=8, sticky="W E N S")
# Configure stretch ratios
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=1)
class Info(ttk.LabelFrame):
"""The control panel for the Modis bot."""
def __init__(self, parent):
"""Create the frame.
Args:
parent: A tk or ttk object.
"""
super(Frame.Info, self).__init__(parent, padding=8, text="Info")
# Variables
self.invite_text = tk.StringVar(value="Paste Client ID here for invite link")
# Add elements
def hyperlink_website(event):
webbrowser.open_new("https://modisworks.github.io/")
def hyperlink_discord(event):
webbrowser.open_new("https://modisworks.github.io/#getting-started")
def hyperlink_invite(event):
client_id = self.invite_text.get()
if len(client_id) != 18:
messagebox.showerror(title="Invalid Client ID", message="Client ID should be an 18 digit number.")
return
try:
int(client_id)
except ValueError:
messagebox.showerror(title="Invalid Client ID", message="Client ID should be an 18 digit number.")
return
webbrowser.open_new("https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=0".format(client_id))
image = tk.PhotoImage(file=__file__[:-16] + "assets/64t.png")
logo = tk.Label(self, image=image)
logo.image = image
name = tk.Label(self, text="Welcome to Modis c:", justify="left")
website = tk.Label(self, text="Website", fg="blue", cursor="hand2")
website.bind("<Button-1>", hyperlink_website)
discord = tk.Label(self, text="Discord server", fg="blue", cursor="hand2")
discord.bind("<Button-1>", hyperlink_discord)
clientid_entry = ttk.Entry(self, textvariable=self.invite_text)
invite_link = tk.Label(self, text="Invite bot to server", fg="blue", cursor="hand2")
invite_link.bind("<Button-1>", hyperlink_invite)
# Grid elements
logo.grid(column=0, row=0, rowspan=3, padx=4, pady=4, sticky="W")
name.grid(column=1, row=0, padx=4, pady=4, sticky="W")
website.grid(column=1, row=1, padx=4, pady=0, sticky="W")
discord.grid(column=1, row=2, padx=4, pady=0, sticky="W")
clientid_entry.grid(column=0, columnspan=2, row=3, padx=4, pady=4, sticky="W E")
invite_link.grid(column=0, columnspan=2, row=4, padx=4, pady=0, sticky="W")
# Configure stretch ratios
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=0)
self.rowconfigure(2, weight=0)
self.rowconfigure(3, weight=0)
self.rowconfigure(4, weight=0)
class Control(ttk.Labelframe):
"""The control panel for the Modis bot."""
def __init__(self, parent):
"""Create the frame.
Args:
parent: A tk or ttk object.
"""
super(Frame.Control, self).__init__(parent, padding=8, text="Control")
# Variables
self.thread = None
self.datapath = tk.StringVar(value=config.DATAFILE)
self.token = tk.StringVar(value=data.cache["keys"]["discord_token"])
self.state = "off"
self.button_text = tk.StringVar(value="Start Modis")
# Add elements
datapath_label = ttk.Label(self, text="Data file path:")
datapath_entry = ttk.Entry(self, textvariable=self.datapath, state="readonly")
datapath_button = ttk.Button(self, command=self.set_data_location, text="Change")
token_label = ttk.Label(self, text="Discord bot token:")
token_entry = ttk.Entry(self, textvariable=self.token, show="\u25cf")
start_button = ttk.Button(self, command=self.toggle, textvariable=self.button_text)
# Grid elements
datapath_label.grid(column=0, row=0, padx=4, pady=4, stick="E")
datapath_entry.grid(column=1, row=0, padx=4, pady=4, sticky="W E")
datapath_button.grid(column=2, row=0, padx=4, pady=4, sticky="E")
token_label.grid(column=0, row=1, padx=4, pady=4, sticky="E")
token_entry.grid(column=1, columnspan=2, row=1, padx=4, pady=4, sticky="W E")
start_button.grid(column=2, columnspan=3, row=3, padx=4, pady=4, sticky="E")
# Configure stretch ratios
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=0)
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=0)
self.rowconfigure(2, weight=1)
self.rowconfigure(3, weight=0)
def set_data_location(self):
newpath = filedialog.askopenfile()
oldpath = config.DATAFILE
try:
newpath = newpath.name
except AttributeError:
# Window was closed
logger.warning("Data file not changed")
return
if not messagebox.askokcancel(title="Change data file path", message="Change data file to:\n{}".format(newpath)):
# User cancelled path change
messagebox.showinfo(title="Change data file path", message="Data file not changed.")
return
# Change the path
config.DATAFILE = newpath
try:
data.pull()
except json.decoder.JSONDecodeError:
# Chosen file invalid
logger.error("Chosen file is not a valid json; reverting changes")
messagebox.showerror(title="Change data file path", message="Chosen file is not a valid json.")
# Try again
config.DATAFILE = oldpath
data.pull()
self.set_data_location()
return
# Successful change
self.datapath.set(newpath)
logger.warning("data file changed to " + config.DATAFILE)
messagebox.showinfo(title="Change data file path", message="Data file change successful.")
def toggle(self):
"""Toggle Modis on or off."""
if self.state == 'off':
self.start()
elif self.state == 'on':
self.stop()
def start(self):
"""Start Modis and log it into Discord."""
self.button_text.set("Stop Modis")
self.state = "on"
logger.warning("Starting Modis")
statuslog = logging.getLogger("globalstatus")
statuslog.info("1")
data.cache["keys"]["discord_token"] = self.token.get()
data.push()
from modis import main
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.thread = threading.Thread(target=main.start, args=[loop])
self.thread.start()
def stop(self):
"""Stop Modis and log out of Discord."""
self.button_text.set("Start Modis")
self.state = "off"
logger.warning("Stopping Modis")
statuslog = logging.getLogger("globalstatus")
statuslog.info("0")
from modis.main import client
# Logout
try:
asyncio.run_coroutine_threadsafe(client.logout(), client.loop)
except AttributeError:
# Client object no longer exists
pass
try:
self.thread.stop()
except AttributeError:
# Thread no longer exists
return
# Cancel all pending tasks
# TODO Fix this
# try:
# pending = asyncio.Task.all_tasks(loop=client.loop)
# gathered = asyncio.gather(*pending, loop=client.loop)
# gathered.cancel()
# client.loop.run_until_complete(gathered)
# gathered.exception()
# except Exception as e:
# logger.exception(e)
class Log(ttk.Labelframe):
"""The text box showing the logging output"""
def __init__(self, parent):
"""Create the frame.
Args:
parent: A tk or ttk object.
"""
super(Frame.Log, self).__init__(parent, padding=8, text="Log")
# Add elements
log_panel = tk.Text(self, wrap="none")
formatter = logging.Formatter("{levelname:8} {name} - {message}", style="{")
handler = self.PanelHandler(log_panel)
handler.setFormatter(formatter)
root_logger = logging.getLogger("modis")
root_logger.addHandler(handler)
log_panel.configure(background="#202020")
log_panel.tag_config('CRITICAL', foreground="#FF00AA")
log_panel.tag_config('ERROR', foreground="#FFAA00")
log_panel.tag_config('WARNING', foreground="#00AAFF")
log_panel.tag_config('INFO', foreground="#AAAAAA")
log_panel.tag_config('DEBUG', foreground="#444444")
yscrollbar = ttk.Scrollbar(self, orient="vertical", command=log_panel.yview)
xscrollbar = ttk.Scrollbar(self, orient="horizontal", command=log_panel.xview)
log_panel['yscrollcommand'] = yscrollbar.set
log_panel['xscrollcommand'] = xscrollbar.set
# Grid elements
log_panel.grid(column=0, row=0, sticky="W E N S")
yscrollbar.grid(column=1, row=0, sticky="N S")
xscrollbar.grid(column=0, row=1, sticky="W E")
# Configure stretch ratios
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
class PanelHandler(logging.Handler):
def __init__(self, text_widget):
logging.Handler.__init__(self)
self.text_widget = text_widget
self.text_widget.config(state=tk.DISABLED)
def emit(self, record):
msg = self.format(record)
msg_level = logging.Formatter("{levelname}", style="{").format(record)
# Remove '.modis' from start of logs
msg = msg[:9] + msg[15:]
# Exceptions
if msg_level.startswith("ERROR"):
msg_level = "ERROR"
self.text_widget.config(state=tk.NORMAL)
self.text_widget.insert("end", msg + "\n", msg_level)
self.text_widget.config(state=tk.DISABLED)
self.text_widget.see("end")
|
|
import os
import sys
import subprocess
import six
import plistlib
from bento.compat.api \
import \
check_call
BENTO_INFO = "0.0.5"
MKBOM = "/usr/bin/mkbom"
def _unicode(*args, **kw):
if six.PY3:
return str(*args, **kw)
else:
return unicode(*args, **kw)
def path_requirement(SpecArgument, Level=six.u('requires'), **kw):
return dict(
Level=Level,
SpecType=six.u('file'),
SpecArgument=unicode_path(SpecArgument),
SpecProperty=six.u('NSFileType'),
TestOperator=six.u('eq'),
TestObject=six.u('NSFileTypeDirectory'),
**kw
)
def common_info(pkg_info):
# Keys that can appear in any package
name = _unicode(pkg_info.name)
major, minor = pkg_info.version_info[0], pkg_info.version_info[1]
version = pkg_info.version
defaults = dict(
CFBundleGetInfoString='%s %s' % (name, version),
CFBundleIdentifier='org.pythonmac.%s' % (name,),
CFBundleName=name,
CFBundleShortVersionString=_unicode(version),
IFMajorVersion=major,
IFMinorRevision=minor,
IFPkgFormatVersion=0.10000000149011612,
IFRequirementDicts=[path_requirement(six.u('/'))],
PythonInfoDict=dict(
PythonLongVersion=_unicode(sys.version),
PythonShortVersion=_unicode(sys.version[:3]),
PythonExecutable=_unicode(sys.executable),
bento_version=dict(
version=BENTO_INFO
),
),
)
return defaults
def common_description(pkg_info):
return dict(
IFPkgDescriptionTitle=_unicode(pkg_info.name),
IFPkgDescriptionVersion=_unicode(pkg_info.version),
)
def unicode_path(path, encoding=sys.getfilesystemencoding()):
if isinstance(path, six.text_type):
return path
return _unicode(path, encoding)
def write(dct, path):
p = plistlib.Plist()
p.update(dct)
p.write(path)
def ensure_directories(pkg_info):
for d in [pkg_info.contents, pkg_info.resources, pkg_info.en_lproj]:
if not os.path.exists(d):
os.makedirs(d)
def build_bom(pkg_info):
check_call([MKBOM, pkg_info.source_root, pkg_info.bom])
def build_archive(pkg_info):
check_call(["pax", "-w", "-f", pkg_info.archive, "-x", "cpio", "-z", "."],
cwd=pkg_info.source_root)
def build_info_plist(pkg_info):
d = common_info(pkg_info)
# Keys that can only appear in single packages
d.update(dict(
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction=six.u('AdminAuthorization'),
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=False,
IFPkgFlagIsRequired=False,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction=six.u('NoRestart'),
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
))
d.update(dict(
IFPkgFlagAuthorizationAction=pkg_info.auth,
IFPkgFlagDefaultLocation=unicode_path(pkg_info.prefix),
))
write(d, pkg_info.info_plist)
def build_pkg_info(pkg_info):
fid = open(pkg_info.pkg_info, "w")
try:
fid.write("pmkrpkg1")
finally:
fid.close()
def build_description_plist(pkg_info):
desc = common_description(pkg_info)
desc['IFPkgDescriptionDescription'] = pkg_info.description
write(desc, pkg_info.description_plist)
def build_pkg(pkg_info):
ensure_directories(pkg_info)
build_bom(pkg_info)
build_archive(pkg_info)
build_info_plist(pkg_info)
build_pkg_info(pkg_info)
build_description_plist(pkg_info)
class PackageInfo(object):
def __init__(self, pkg_name, prefix, source_root, pkg_root, admin=True, description=None, version=None):
if admin:
self.auth = six.u("AdminAuthorization")
else:
self.auth = six.u("RootAuthorization")
# Where things will be installed by Mac OS X installer
self.prefix = prefix
# Root directory for files to be packaged
self.source_root = source_root
# Root directory for produced .pkg directory/file
self.pkg_root = pkg_root
self.name = pkg_name
# FIXME: version handling -> use distutils2 version module
self.version_info = (0, 0, 5, None)
if version is None:
self.version = ""
else:
self.version = version
if description:
self.description = description
else:
self.description = ""
self.contents = os.path.join(self.pkg_root, "Contents")
self.resources = os.path.join(self.contents, "Resources")
self.en_lproj = os.path.join(self.resources, "en.lproj")
self.bom = os.path.join(self.contents, "Archive.bom")
self.archive = os.path.join(self.contents, "Archive.pax.gz")
self.info_plist = os.path.join(self.contents, "Info.plist")
self.pkg_info = os.path.join(self.contents, "PkgInfo")
self.description_plist = os.path.join(self.en_lproj, "Description.plist")
class MetaPackageInfo(object):
@classmethod
def from_build_manifest(cls, build_manifest):
m = build_manifest.meta
info_string = "%s %s" % (m["name"], m["version"])
identifier = "com.github.cournape.bento"
version_info = (0, 0, 5)
return cls(m["name"], info_string, version_info, identifier, m["summary"])
def __init__(self, name, info_string, version_info, identifier, summary):
self.major, self.minor, self.micro = version_info[0], version_info[1], version_info[2]
self.info_string = info_string
self.name = name
self.identifier = identifier
self.description = summary
self.short_version = ".".join([str(i) for i in [self.major, self.minor, self.micro]])
def make_mpkg_plist(mpkg_info, path):
pl = dict(
CFBundleGetInfoString=mpkg_info.info_string,
CFBundleIdentifier=mpkg_info.identifier,
CFBundleName=mpkg_info.name,
CFBundleShortVersionString=mpkg_info.short_version,
IFMajorVersion=mpkg_info.major,
IFMinorVersion=mpkg_info.minor,
IFPkgFlagComponentDirectory="Contents/Packages",
IFPkgFlagPackageList=[
dict(
IFPkgFlagPackageLocation=pkg,
IFPkgFlagPackageSelection='selected'
)
for pkg in mpkg_info.packages
],
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagBackgroundScaling="proportional",
IFPkgFlagBackgroundAlignment="left",
IFPkgFlagAuthorizationAction="RootAuthorization",
)
write(pl, path)
return pl
def make_mpkg_description(mpkg_info, path):
d = dict(IFPkgDescriptionTitle=mpkg_info.name,
IFPkgDescriptionDescription=mpkg_info.description,
IFPkgDescriptionVersion=mpkg_info.short_version)
write(d, path)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import collections.abc
import copy
import re
from typing import List, Type
from google.protobuf import descriptor_pb2
from google.protobuf import message
from google.protobuf.json_format import MessageToDict, MessageToJson, Parse
from proto import _file_info
from proto import _package_info
from proto.fields import Field
from proto.fields import MapField
from proto.fields import RepeatedField
from proto.marshal import Marshal
from proto.primitives import ProtoType
class MessageMeta(type):
"""A metaclass for building and registering Message subclasses."""
def __new__(mcls, name, bases, attrs):
# Do not do any special behavior for Message itself.
if not bases:
return super().__new__(mcls, name, bases, attrs)
# Get the essential information about the proto package, and where
# this component belongs within the file.
package, marshal = _package_info.compile(name, attrs)
# Determine the local path of this proto component within the file.
local_path = tuple(attrs.get("__qualname__", name).split("."))
# Sanity check: We get the wrong full name if a class is declared
# inside a function local scope; correct this.
if "<locals>" in local_path:
ix = local_path.index("<locals>")
local_path = local_path[: ix - 1] + local_path[ix + 1 :]
# Determine the full name in protocol buffers.
full_name = ".".join((package,) + local_path).lstrip(".")
# Special case: Maps. Map fields are special; they are essentially
# shorthand for a nested message and a repeated field of that message.
# Decompose each map into its constituent form.
# https://developers.google.com/protocol-buffers/docs/proto3#maps
map_fields = {}
for key, field in attrs.items():
if not isinstance(field, MapField):
continue
# Determine the name of the entry message.
msg_name = "{pascal_key}Entry".format(
pascal_key=re.sub(
r"_\w", lambda m: m.group()[1:].upper(), key,
).replace(key[0], key[0].upper(), 1),
)
# Create the "entry" message (with the key and value fields).
#
# Note: We instantiate an ordered dictionary here and then
# attach key and value in order to ensure that the fields are
# iterated in the correct order when the class is created.
# This is only an issue in Python 3.5, where the order is
# random (and the wrong order causes the pool to refuse to add
# the descriptor because reasons).
entry_attrs = collections.OrderedDict(
{
"__module__": attrs.get("__module__", None),
"__qualname__": "{prefix}.{name}".format(
prefix=attrs.get("__qualname__", name), name=msg_name,
),
"_pb_options": {"map_entry": True},
}
)
entry_attrs["key"] = Field(field.map_key_type, number=1)
entry_attrs["value"] = Field(
field.proto_type, number=2, enum=field.enum, message=field.message,
)
map_fields[msg_name] = MessageMeta(msg_name, (Message,), entry_attrs)
# Create the repeated field for the entry message.
map_fields[key] = RepeatedField(
ProtoType.MESSAGE, number=field.number, message=map_fields[msg_name],
)
# Add the new entries to the attrs
attrs.update(map_fields)
# Okay, now we deal with all the rest of the fields.
# Iterate over all the attributes and separate the fields into
# their own sequence.
fields = []
new_attrs = {}
oneofs = collections.OrderedDict()
proto_imports = set()
index = 0
for key, field in attrs.items():
# Sanity check: If this is not a field, do nothing.
if not isinstance(field, Field):
# The field objects themselves should not be direct attributes.
new_attrs[key] = field
continue
# Add data that the field requires that we do not take in the
# constructor because we can derive it from the metaclass.
# (The goal is to make the declaration syntax as nice as possible.)
field.mcls_data = {
"name": key,
"parent_name": full_name,
"index": index,
"package": package,
}
# Add the field to the list of fields.
fields.append(field)
# If this field is part of a "oneof", ensure the oneof itself
# is represented.
if field.oneof:
# Keep a running tally of the index of each oneof, and assign
# that index to the field's descriptor.
oneofs.setdefault(field.oneof, len(oneofs))
field.descriptor.oneof_index = oneofs[field.oneof]
# If this field references a message, it may be from another
# proto file; ensure we know about the import (to faithfully
# construct our file descriptor proto).
if field.message and not isinstance(field.message, str):
field_msg = field.message
if hasattr(field_msg, "pb") and callable(field_msg.pb):
field_msg = field_msg.pb()
# Sanity check: The field's message may not yet be defined if
# it was a Message defined in the same file, and the file
# descriptor proto has not yet been generated.
#
# We do nothing in this situation; everything will be handled
# correctly when the file descriptor is created later.
if field_msg:
proto_imports.add(field_msg.DESCRIPTOR.file.name)
# Same thing, but for enums.
elif field.enum and not isinstance(field.enum, str):
field_enum = (
field.enum._meta.pb
if hasattr(field.enum, "_meta")
else field.enum.DESCRIPTOR
)
if field_enum:
proto_imports.add(field_enum.file.name)
# Increment the field index counter.
index += 1
# As per descriptor.proto, all synthetic oneofs must be ordered after
# 'real' oneofs.
opt_attrs = {}
for field in fields:
if field.optional:
field.oneof = "_{}".format(field.name)
field.descriptor.oneof_index = oneofs[field.oneof] = len(oneofs)
opt_attrs[field.name] = field.name
# Generating a metaclass dynamically provides class attributes that
# instances can't see. This provides idiomatically named constants
# that enable the following pattern to check for field presence:
#
# class MyMessage(proto.Message):
# field = proto.Field(proto.INT32, number=1, optional=True)
#
# m = MyMessage()
# MyMessage.field in m
if opt_attrs:
mcls = type("AttrsMeta", (mcls,), opt_attrs)
# Determine the filename.
# We determine an appropriate proto filename based on the
# Python module.
filename = _file_info._FileInfo.proto_file_name(
new_attrs.get("__module__", name.lower())
)
# Get or create the information about the file, including the
# descriptor to which the new message descriptor shall be added.
file_info = _file_info._FileInfo.maybe_add_descriptor(filename, package)
# Ensure any imports that would be necessary are assigned to the file
# descriptor proto being created.
for proto_import in proto_imports:
if proto_import not in file_info.descriptor.dependency:
file_info.descriptor.dependency.append(proto_import)
# Retrieve any message options.
opts = descriptor_pb2.MessageOptions(**new_attrs.pop("_pb_options", {}))
# Create the underlying proto descriptor.
desc = descriptor_pb2.DescriptorProto(
name=name,
field=[i.descriptor for i in fields],
oneof_decl=[
descriptor_pb2.OneofDescriptorProto(name=i) for i in oneofs.keys()
],
options=opts,
)
# If any descriptors were nested under this one, they need to be
# attached as nested types here.
child_paths = [p for p in file_info.nested.keys() if local_path == p[:-1]]
for child_path in child_paths:
desc.nested_type.add().MergeFrom(file_info.nested.pop(child_path))
# Same thing, but for enums
child_paths = [p for p in file_info.nested_enum.keys() if local_path == p[:-1]]
for child_path in child_paths:
desc.enum_type.add().MergeFrom(file_info.nested_enum.pop(child_path))
# Add the descriptor to the file if it is a top-level descriptor,
# or to a "holding area" for nested messages otherwise.
if len(local_path) == 1:
file_info.descriptor.message_type.add().MergeFrom(desc)
else:
file_info.nested[local_path] = desc
# Create the MessageInfo instance to be attached to this message.
new_attrs["_meta"] = _MessageInfo(
fields=fields,
full_name=full_name,
marshal=marshal,
options=opts,
package=package,
)
# Run the superclass constructor.
cls = super().__new__(mcls, name, bases, new_attrs)
# The info class and fields need a reference to the class just created.
cls._meta.parent = cls
for field in cls._meta.fields.values():
field.parent = cls
# Add this message to the _FileInfo instance; this allows us to
# associate the descriptor with the message once the descriptor
# is generated.
file_info.messages[full_name] = cls
# Generate the descriptor for the file if it is ready.
if file_info.ready(new_class=cls):
file_info.generate_file_pb(new_class=cls, fallback_salt=full_name)
# Done; return the class.
return cls
@classmethod
def __prepare__(mcls, name, bases, **kwargs):
return collections.OrderedDict()
@property
def meta(cls):
return cls._meta
def __dir__(self):
try:
names = set(dir(type))
names.update(
(
"meta",
"pb",
"wrap",
"serialize",
"deserialize",
"to_json",
"from_json",
"to_dict",
"copy_from",
)
)
desc = self.pb().DESCRIPTOR
names.update(t.name for t in desc.nested_types)
names.update(e.name for e in desc.enum_types)
return names
except AttributeError:
return dir(type)
def pb(cls, obj=None, *, coerce: bool = False):
"""Return the underlying protobuf Message class or instance.
Args:
obj: If provided, and an instance of ``cls``, return the
underlying protobuf instance.
coerce (bool): If provided, will attempt to coerce ``obj`` to
``cls`` if it is not already an instance.
"""
if obj is None:
return cls.meta.pb
if not isinstance(obj, cls):
if coerce:
obj = cls(obj)
else:
raise TypeError("%r is not an instance of %s" % (obj, cls.__name__,))
return obj._pb
def wrap(cls, pb):
"""Return a Message object that shallowly wraps the descriptor.
Args:
pb: A protocol buffer object, such as would be returned by
:meth:`pb`.
"""
# Optimized fast path.
instance = cls.__new__(cls)
super(cls, instance).__setattr__("_pb", pb)
return instance
def serialize(cls, instance) -> bytes:
"""Return the serialized proto.
Args:
instance: An instance of this message type, or something
compatible (accepted by the type's constructor).
Returns:
bytes: The serialized representation of the protocol buffer.
"""
return cls.pb(instance, coerce=True).SerializeToString()
def deserialize(cls, payload: bytes) -> "Message":
"""Given a serialized proto, deserialize it into a Message instance.
Args:
payload (bytes): The serialized proto.
Returns:
~.Message: An instance of the message class against which this
method was called.
"""
return cls.wrap(cls.pb().FromString(payload))
def to_json(
cls,
instance,
*,
use_integers_for_enums=True,
including_default_value_fields=True,
preserving_proto_field_name=False,
) -> str:
"""Given a message instance, serialize it to json
Args:
instance: An instance of this message type, or something
compatible (accepted by the type's constructor).
use_integers_for_enums (Optional(bool)): An option that determines whether enum
values should be represented by strings (False) or integers (True).
Default is True.
preserving_proto_field_name (Optional(bool)): An option that
determines whether field name representations preserve
proto case (snake_case) or use lowerCamelCase. Default is False.
Returns:
str: The json string representation of the protocol buffer.
"""
return MessageToJson(
cls.pb(instance),
use_integers_for_enums=use_integers_for_enums,
including_default_value_fields=including_default_value_fields,
preserving_proto_field_name=preserving_proto_field_name,
)
def from_json(cls, payload, *, ignore_unknown_fields=False) -> "Message":
"""Given a json string representing an instance,
parse it into a message.
Args:
paylod: A json string representing a message.
ignore_unknown_fields (Optional(bool)): If True, do not raise errors
for unknown fields.
Returns:
~.Message: An instance of the message class against which this
method was called.
"""
instance = cls()
Parse(payload, instance._pb, ignore_unknown_fields=ignore_unknown_fields)
return instance
def to_dict(
cls,
instance,
*,
use_integers_for_enums=True,
preserving_proto_field_name=True,
including_default_value_fields=True,
) -> "Message":
"""Given a message instance, return its representation as a python dict.
Args:
instance: An instance of this message type, or something
compatible (accepted by the type's constructor).
use_integers_for_enums (Optional(bool)): An option that determines whether enum
values should be represented by strings (False) or integers (True).
Default is True.
preserving_proto_field_name (Optional(bool)): An option that
determines whether field name representations preserve
proto case (snake_case) or use lowerCamelCase. Default is True.
including_default_value_fields (Optional(bool)): An option that
determines whether the default field values should be included in the results.
Default is True.
Returns:
dict: A representation of the protocol buffer using pythonic data structures.
Messages and map fields are represented as dicts,
repeated fields are represented as lists.
"""
return MessageToDict(
cls.pb(instance),
including_default_value_fields=including_default_value_fields,
preserving_proto_field_name=preserving_proto_field_name,
use_integers_for_enums=use_integers_for_enums,
)
def copy_from(cls, instance, other):
"""Equivalent for protobuf.Message.CopyFrom
Args:
instance: An instance of this message type
other: (Union[dict, ~.Message):
A dictionary or message to reinitialize the values for this message.
"""
if isinstance(other, cls):
# Just want the underlying proto.
other = Message.pb(other)
elif isinstance(other, cls.pb()):
# Don't need to do anything.
pass
elif isinstance(other, collections.abc.Mapping):
# Coerce into a proto
other = cls._meta.pb(**other)
else:
raise TypeError(
"invalid argument type to copy to {}: {}".format(
cls.__name__, other.__class__.__name__
)
)
# Note: we can't just run self.__init__ because this may be a message field
# for a higher order proto; the memory layout for protos is NOT LIKE the
# python memory model. We cannot rely on just setting things by reference.
# Non-trivial complexity is (partially) hidden by the protobuf runtime.
cls.pb(instance).CopyFrom(other)
class Message(metaclass=MessageMeta):
"""The abstract base class for a message.
Args:
mapping (Union[dict, ~.Message]): A dictionary or message to be
used to determine the values for this message.
ignore_unknown_fields (Optional(bool)): If True, do not raise errors for
unknown fields. Only applied if `mapping` is a mapping type or there
are keyword parameters.
kwargs (dict): Keys and values corresponding to the fields of the
message.
"""
def __init__(
self, mapping=None, *, ignore_unknown_fields=False, **kwargs,
):
# We accept several things for `mapping`:
# * An instance of this class.
# * An instance of the underlying protobuf descriptor class.
# * A dict
# * Nothing (keyword arguments only).
if mapping is None:
if not kwargs:
# Special fast path for empty construction.
super().__setattr__("_pb", self._meta.pb())
return
mapping = kwargs
elif isinstance(mapping, self._meta.pb):
# Make a copy of the mapping.
# This is a constructor for a new object, so users will assume
# that it will not have side effects on the arguments being
# passed in.
#
# The `wrap` method on the metaclass is the public API for taking
# ownership of the passed in protobuf object.
mapping = copy.deepcopy(mapping)
if kwargs:
mapping.MergeFrom(self._meta.pb(**kwargs))
super().__setattr__("_pb", mapping)
return
elif isinstance(mapping, type(self)):
# Just use the above logic on mapping's underlying pb.
self.__init__(mapping=mapping._pb, **kwargs)
return
elif isinstance(mapping, collections.abc.Mapping):
# Can't have side effects on mapping.
mapping = copy.copy(mapping)
# kwargs entries take priority for duplicate keys.
mapping.update(kwargs)
else:
# Sanity check: Did we get something not a map? Error if so.
raise TypeError(
"Invalid constructor input for %s: %r"
% (self.__class__.__name__, mapping,)
)
params = {}
# Update the mapping to address any values that need to be
# coerced.
marshal = self._meta.marshal
for key, value in mapping.items():
(key, pb_type) = self._get_pb_type_from_key(key)
if pb_type is None:
if ignore_unknown_fields:
continue
raise ValueError(
"Unknown field for {}: {}".format(self.__class__.__name__, key)
)
try:
pb_value = marshal.to_proto(pb_type, value)
except ValueError:
# Underscores may be appended to field names
# that collide with python or proto-plus keywords.
# In case a key only exists with a `_` suffix, coerce the key
# to include the `_` suffix. It's not possible to
# natively define the same field with a trailing underscore in protobuf.
# See related issue
# https://github.com/googleapis/python-api-core/issues/227
if isinstance(value, dict):
keys_to_update = [
item
for item in value
if not hasattr(pb_type, item) and hasattr(pb_type, f"{item}_")
]
for item in keys_to_update:
value[f"{item}_"] = value.pop(item)
pb_value = marshal.to_proto(pb_type, value)
if pb_value is not None:
params[key] = pb_value
# Create the internal protocol buffer.
super().__setattr__("_pb", self._meta.pb(**params))
def _get_pb_type_from_key(self, key):
"""Given a key, return the corresponding pb_type.
Args:
key(str): The name of the field.
Returns:
A tuple containing a key and pb_type. The pb_type will be
the composite type of the field, or the primitive type if a primitive.
If no corresponding field exists, return None.
"""
pb_type = None
try:
pb_type = self._meta.fields[key].pb_type
except KeyError:
# Underscores may be appended to field names
# that collide with python or proto-plus keywords.
# In case a key only exists with a `_` suffix, coerce the key
# to include the `_` suffix. It's not possible to
# natively define the same field with a trailing underscore in protobuf.
# See related issue
# https://github.com/googleapis/python-api-core/issues/227
if f"{key}_" in self._meta.fields:
key = f"{key}_"
pb_type = self._meta.fields[key].pb_type
return (key, pb_type)
def __dir__(self):
desc = type(self).pb().DESCRIPTOR
names = {f_name for f_name in self._meta.fields.keys()}
names.update(m.name for m in desc.nested_types)
names.update(e.name for e in desc.enum_types)
names.update(dir(object()))
# Can't think of a better way of determining
# the special methods than manually listing them.
names.update(
(
"__bool__",
"__contains__",
"__dict__",
"__getattr__",
"__getstate__",
"__module__",
"__setstate__",
"__weakref__",
)
)
return names
def __bool__(self):
"""Return True if any field is truthy, False otherwise."""
return any(k in self and getattr(self, k) for k in self._meta.fields.keys())
def __contains__(self, key):
"""Return True if this field was set to something non-zero on the wire.
In most cases, this method will return True when ``__getattr__``
would return a truthy value and False when it would return a falsy
value, so explicitly calling this is not useful.
The exception case is empty messages explicitly set on the wire,
which are falsy from ``__getattr__``. This method allows to
distinguish between an explicitly provided empty message and the
absence of that message, which is useful in some edge cases.
The most common edge case is the use of ``google.protobuf.BoolValue``
to get a boolean that distinguishes between ``False`` and ``None``
(or the same for a string, int, etc.). This library transparently
handles that case for you, but this method remains available to
accommodate cases not automatically covered.
Args:
key (str): The name of the field.
Returns:
bool: Whether the field's value corresponds to a non-empty
wire serialization.
"""
pb_value = getattr(self._pb, key)
try:
# Protocol buffers "HasField" is unfriendly; it only works
# against composite, non-repeated fields, and raises ValueError
# against any repeated field or primitive.
#
# There is no good way to test whether it is valid to provide
# a field to this method, so sadly we are stuck with a
# somewhat inefficient try/except.
return self._pb.HasField(key)
except ValueError:
return bool(pb_value)
def __delattr__(self, key):
"""Delete the value on the given field.
This is generally equivalent to setting a falsy value.
"""
self._pb.ClearField(key)
def __eq__(self, other):
"""Return True if the messages are equal, False otherwise."""
# If these are the same type, use internal protobuf's equality check.
if isinstance(other, type(self)):
return self._pb == other._pb
# If the other type is the target protobuf object, honor that also.
if isinstance(other, self._meta.pb):
return self._pb == other
# Ask the other object.
return NotImplemented
def __getattr__(self, key):
"""Retrieve the given field's value.
In protocol buffers, the presence of a field on a message is
sufficient for it to always be "present".
For primitives, a value of the correct type will always be returned
(the "falsy" values in protocol buffers consistently match those
in Python). For repeated fields, the falsy value is always an empty
sequence.
For messages, protocol buffers does distinguish between an empty
message and absence, but this distinction is subtle and rarely
relevant. Therefore, this method always returns an empty message
(following the official implementation). To check for message
presence, use ``key in self`` (in other words, ``__contains__``).
.. note::
Some well-known protocol buffer types
(e.g. ``google.protobuf.Timestamp``) will be converted to
their Python equivalents. See the ``marshal`` module for
more details.
"""
(key, pb_type) = self._get_pb_type_from_key(key)
if pb_type is None:
raise AttributeError(
"Unknown field for {}: {}".format(self.__class__.__name__, key)
)
pb_value = getattr(self._pb, key)
marshal = self._meta.marshal
return marshal.to_python(pb_type, pb_value, absent=key not in self)
def __ne__(self, other):
"""Return True if the messages are unequal, False otherwise."""
return not self == other
def __repr__(self):
return repr(self._pb)
def __setattr__(self, key, value):
"""Set the value on the given field.
For well-known protocol buffer types which are marshalled, either
the protocol buffer object or the Python equivalent is accepted.
"""
if key[0] == "_":
return super().__setattr__(key, value)
marshal = self._meta.marshal
(key, pb_type) = self._get_pb_type_from_key(key)
if pb_type is None:
raise AttributeError(
"Unknown field for {}: {}".format(self.__class__.__name__, key)
)
pb_value = marshal.to_proto(pb_type, value)
# Clear the existing field.
# This is the only way to successfully write nested falsy values,
# because otherwise MergeFrom will no-op on them.
self._pb.ClearField(key)
# Merge in the value being set.
if pb_value is not None:
self._pb.MergeFrom(self._meta.pb(**{key: pb_value}))
def __getstate__(self):
"""Serialize for pickling."""
return self._pb.SerializeToString()
def __setstate__(self, value):
"""Deserialization for pickling."""
new_pb = self._meta.pb().FromString(value)
super().__setattr__("_pb", new_pb)
class _MessageInfo:
"""Metadata about a message.
Args:
fields (Tuple[~.fields.Field]): The fields declared on the message.
package (str): The proto package.
full_name (str): The full name of the message.
file_info (~._FileInfo): The file descriptor and messages for the
file containing this message.
marshal (~.Marshal): The marshal instance to which this message was
automatically registered.
options (~.descriptor_pb2.MessageOptions): Any options that were
set on the message.
"""
def __init__(
self,
*,
fields: List[Field],
package: str,
full_name: str,
marshal: Marshal,
options: descriptor_pb2.MessageOptions,
) -> None:
self.package = package
self.full_name = full_name
self.options = options
self.fields = collections.OrderedDict((i.name, i) for i in fields)
self.fields_by_number = collections.OrderedDict((i.number, i) for i in fields)
self.marshal = marshal
self._pb = None
@property
def pb(self) -> Type[message.Message]:
"""Return the protobuf message type for this descriptor.
If a field on the message references another message which has not
loaded, then this method returns None.
"""
return self._pb
__all__ = ("Message",)
|
|
"""
Amazon mobi format output
Publishing Guidelines:
http://kindlegen.s3.amazonaws.com/AmazonKindlePublishingGuidelines.pdf
Tools:
http://www.amazon.com/gp/feature.html?ie=UTF8&docId=1000234621
(KindleGen and Kindle Previewer)
This still needs a lot of work.
"""
from .output import OutputFile
from .. import text
import os
import shutil
OPF_FILENAME = 'book.opf'
NCX_FILENAME = 'book.ncx'
STYLE_FILENAME = 'style.css'
HTML_COVER_FILENAME = 'cover.html'
HTML_END_FILENAME = 'end.html'
SEARCH_ORDER_PRE = 1
SEARCH_ORDER_POST = 2
class MobiOutput(OutputFile):
def __init__(self, outfile, outdir):
OutputFile.__init__(self)
self.sections = {}
self.metadata = None
self.toc = None
self.outfile = outfile
self.outdir = outdir
self.section_filenames = {}
self.css = {}
def add_section(self, section):
assert section is not None
self.section_filenames[section.index] = 'chap{0:03d}.html'.format(section.index)
if section.is_toc:
assert self.toc is None
self.toc = section
self.sections[section.index] = section
def set_metadata(self, metadata):
self.metadata = metadata
def write(self):
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
opffile = os.path.join(self.outdir, OPF_FILENAME)
self.write_opf(opffile)
ncxfile = os.path.join(self.outdir, NCX_FILENAME)
self.write_ncx(ncxfile)
coverhfile = os.path.join(self.outdir, HTML_COVER_FILENAME)
self.write_html_cover(coverhfile)
coverifile = os.path.join(self.outdir,
os.path.basename(self.metadata.cover.filename))
if not os.path.samefile(self.metadata.cover.filename, coverifile):
shutil.copyfile(self.metadata.cover.filename, coverifile)
if self.toc is not None:
tocfile = os.path.join(self.outdir, self.section_filenames[self.toc.index])
self.write_toc(tocfile, False, self.toc)
endfile = os.path.join(self.outdir, HTML_END_FILENAME)
self.write_html_end(endfile)
# All the sections before the TOC are considered part of the title.
# So allocate them into a structure, and call the write_toc when the
# TOC is finally found.
before_toc = []
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
chfile = os.path.join(self.outdir,
self.section_filenames[ch.index])
if before_toc is None:
self.write_section(chfile, ch)
elif ch.is_toc:
self.write_toc(chfile, before_toc, ch)
before_toc = None
else:
before_toc.append(ch)
if hasattr(ch, 'divs'):
for media in ch.divs:
if isinstance(media, text.Media):
# FIXME
mediafile = os.path.join(self.outdir,
os.path.basename(media.filename))
if not os.path.samefile(mediafile, media.filename):
shutil.copyfile(self.metadata.cover.filename, coverifile)
cssfile = os.path.join(self.outdir, STYLE_FILENAME)
self.write_css(cssfile)
# FIXME run mobi converter
def write_opf(self, outfile):
manifest = self.create_manifest()
spine = self.create_spine()
guide = self.create_guide()
with open(outfile, "w") as f:
f.write(OPF_TEMPLATE.format(
manifest=manifest,
spine=spine,
guide=guide,
title=self.metadata.title,
language=self.metadata.language,
isbn_10=self.metadata.isbn_10,
isbn_13=self.metadata.isbn_13,
author_first=self.metadata.author_first,
author_last=self.metadata.author_last,
description=self.metadata.description
))
def write_ncx(self, outfile):
navpoints, last_play_order, maxdepth = self.create_navpoints()
toc_loc = None
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
if ch.is_toc:
toc_loc = self.section_filenames[ch.index]
break
with open(outfile, "w") as f:
f.write(NCX_TEMPLATE.format(
navpoints=navpoints,
title=self.metadata.title,
author_first=self.metadata.author_first,
author_last=self.metadata.author_last,
description=self.metadata.description,
depth=maxdepth,
toc_loc=toc_loc,
lastPlayOrder=last_play_order
))
def write_toc(self, outfile, before_toc, toc):
title_sections = ""
for ch in before_toc:
title_sections += self.create_section_text(ch)
toc_title = self.create_div_text(toc.title_div)
toc_divs = ""
def visit(ch, order, depth):
raise NotImplementedError()
# FIXME use search_sections
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
chfile = self.section_filenames[ch.index]
if ch not in before_toc:
pass
with open(outfile, "w") as f:
f.write(TOC_TEMPLATE.format(
title=self.metadata.title,
cover_loc=os.path.basename(self.metadata.cover.filename),
title_sections=title_sections,
toc_title=toc_title,
toc_divs=toc_divs
))
raise NotImplementedError()
def write_html_cover(self, outfile):
with open(outfile, "w") as f:
f.write(COVER_TEMPLATE.format(
loc=os.path.basename(self.metadata.cover.filename),
title=self.manifest.title
))
def write_section(self, outfile, section):
raise NotImplementedError()
def write_html_end(self, outfile):
raise NotImplementedError()
def write_css(self, outfile):
raise NotImplementedError()
def create_section_text(self, section):
raise NotImplementedError()
def create_div_text(self, div):
raise NotImplementedError()
def create_manifest(self):
found = ['cover-image', 'toc', 'cover-page']
ret = (
MANIFEST_ENTRY_TEMPLATE.format(
name='cover-image',
loc=os.path.basename(self.metadata.cover.filename),
mimetype=self.metadata.cover.get_mimetype())
)
# FIXME use search_sections
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
ret += MANIFEST_ENTRY_TEMPLATE.format(
name=self.section_filenames[ch.index],
loc=self.section_filenames[ch.index],
mimetype='application/xhtml+xml'
)
if hasattr(ch, 'divs'):
for media in ch.divs:
if isinstance(media, text.Media):
ret += MANIFEST_ENTRY_TEMPLATE.format(
name=os.path.basename(media.filename),
loc=os.path.basename(media.filename),
mimetype=media.get_mimetype()
)
return ret
def create_spine(self):
# NOTE: we hard-code the location of the cover, because that's
# what's required of us.
ret = ""
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
name = self.section_filenames[ch.index],
ret += SPINE_ENTRY_TEMPLATE.format(name=name)
return ret
def create_guide(self):
# Find the actual page name of the TOC and first non-toc section
first = None
toc = None
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
if toc is not None and ch.is_toc:
toc = self.section_filenames[ch.index]
elif first is not None:
first = self.section_filenames[ch.index]
if toc is not None and first is not None:
break
return GUIDE_TEMPLATE.format(first_loc=first, toc_loc=toc)
def create_navpoints(self):
global text_stack
text_stack = []
def visit(ch, order, depth):
global text_stack
clazz = ""
if ch.is_book:
clazz = 'class="book"'
while depth > len(text_stack):
text_stack.append("")
if depth + 1 == len(text_stack):
# Bottom of tree
text_stack[depth] += NAVPOINT_TEMPLATE.format(
name=str(order),
order=str(order),
title=ch.name,
loc=self.section_filenames[ch.index],
index=ch.index,
navpoints="",
clazz=clazz)
elif depth + 2 == len(text_stack):
# parent
children = text_stack[-1]
text_stack = text_stack[:-1]
text_stack[depth] += NAVPOINT_TEMPLATE.format(
name=str(order),
order=str(order),
title=ch.name,
loc=self.section_filenames[ch.index],
index=ch.index,
navpoints=children,
clazz=clazz)
else:
raise Exception("invalid walking: depth {0}, text_stack: {1}".format(depth, text_stack))
play_order, max_depth = self.search_sections(3, SEARCH_ORDER_POST, False, visit)
return text_stack[0], play_order, max_depth
def search_sections(self, init_index, search_order, visit_toc, visitor):
max_depth = 0
play_order = init_index
keys = list(self.sections.keys())
keys.sort()
for key in keys:
ch = self.sections[key]
if not ch.is_toc or visit_toc:
vals = self.__search_sections(
ch, play_order, 0, search_order, visitor)
play_order = vals[0]
if vals[1] > max_depth:
max_depth = vals[1]
return play_order, max_depth
def __search_sections(self, ch, index, depth, search_order, visitor):
ret_play_order = index + 1
ret_depth = depth
if search_order == SEARCH_ORDER_PRE:
visitor(ch, index, depth)
if hasattr(ch, 'divs'):
for sub in ch.divs:
if isinstance(sub, text.Chapter):
vals = self.__create_navpoint(sub, ret_play_order, depth + 1)
ret_play_order = vals[0]
if vals[1] > ret_depth:
ret_depth = vals[1]
if search_order == SEARCH_ORDER_POST:
visitor(ch, index, depth)
return ret_play_order, ret_depth
OPF_TEMPLATE = """<?xml version="1.0"?>
<package version="2.0" xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:title>{title}</dc:title>
<dc:language>{language}</dc:language>
<dc:identifier id="BookId" opf:scheme="ISBN">{isbn_10}</dc:identifier>
<dc:creator opf:file-as="{author_last}, {author_first}" opf:role="aut">{author_first} {author_last}</dc:creator>
<dc:description>
{description}
</dc:description>
<meta name="cover" content="cover-image" />
</metadata>
<manifest>
<item id="toc" href="book.ncx" media-type="application/x-dtbncx+xml"/>
<item id="cover-page" href="cover.html" media-type="application/xhtml+xml"/>
{manifest}
</manifest>
<spine toc="toc">
<itemref idref="cover-page" linear="no" />
<itemref idref="toc" />
{spine}
</spine>
<guide>
<reference type="cover" title="Cover Image" href="cover.html" />
{guide}
</guide>
</package>
"""
MANIFEST_ENTRY_TEMPLATE = """ <item id="{name}" href="{loc}" media-type="{mimetype}"/>
"""
SPINE_ENTRY_TEMPLATE = """ <itemref idref="{name}" />
"""
GUIDE_TEMPLATE = """ <reference type="start" title="Start" href="{first_loc}%23start" />
<reference type="toc" title="Table of Contents" href="{toc_loc}%23toc" />
"""
NCX_TEMPLATE = """<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN"
"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/"
version="2005-1" xml:lang="en-US">
<head>
<meta name="dtb:uid" content="uid"/>
<meta name="dtb:depth" content="{depth}"/>
<meta name="dtb:totalPageCount" content="0"/>
<meta name="dtb:maxPageNumber" content="0"/>
</head>
<docTitle><text>{title}</text></docTitle>
<docAuthor><text>{author_first} {author_last}</text></docAuthor>
<navMap>
<navPoint class="titlepage" id="TitlePage" playOrder="1">
<navLabel><text>Title Page</text></navLabel>
<content src="{toc_loc}#Title Page" />
</navPoint>
<navPoint id="toc" playOrder="2">
<navLabel><text>Table of Contents</text></navLabel>
<content src="{toc_loc}#toc" />
</navPoint>
{navpoints}
<navPoint id="Copyright" playOrder="{lastPlayOrder}">
<navLabel><text>Copyright Notice</text></navLabel>
<content src="{toc_loc}#copyright" />
</navPoint>
</navMap>
</ncx>
"""
NAVPOINT_TEMPLATE = """ <navPoint {clazz} id="{name}" playOrder="{order}">
<navLabel><text>{title}</text></navLabel>
<content src="{loc}#{index}" />
{navpoints}
</navPoint>
"""
COVER_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/>
<title>{title}</title>
<meta name="cover" content="{loc}" />
<link rel="StyleSheet" href="style.css" type="text/css" media="screen, print" />
</head>
<body>
<a name="Cover"></a>
<mbp:section>
<img src="{loc}" name="{title}" />
</mbp:section>
</body>
</html>"""
TOC_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/>
<title>{title}</title>
<meta name="cover" content="{cover_loc}" />
<link rel="StyleSheet" href="style.css" type="text/css" media="screen, print" />
</head>
<body>
<!-- ********************* TITLE PAGE ******************************** -->
<a name="Title Page"></a>
{title_sections}
<!-- ********************* TABLE OF CONTENTS ******************************** -->
<a name="toc"></a>
<mbp:section>
<h1 class="chapter">Table of Contents</h1>
<div><a href="#Title Page">Title Page</a></div>
{toc_divs}
<div><a href="end.html#copyright">Copyright Notice</a></div>
</mbp:section>
</body>
</html>"""
|
|
# -*- coding: utf-8 -*-
"""
:mod:`pambox.inner` regroups processes of the inner ear.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy import exp, sin, cos, sqrt, abs, ones, pi
import scipy as sp
import scipy.signal as ss
from .utils import next_pow_2, hilbert
try:
_ = np.use_fastnumpy # Use Enthought MKL optimizations
from numpy.fft import fft, ifft, rfft, irfft
except AttributeError:
try:
import mklfft # MKL FFT optimizations from Continuum Analytics
from numpy.fft import fft, ifft, rfft, irfft
except ImportError:
# Finally, just use Numpy's and Scipy's
from scipy.fftpack import fft, ifft
from numpy.fft import rfft, irfft
CENTER_F = np.asarray([63, 80, 100, 125, 160, 200, 250, 315, 400, 500,
630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000,
5000, 6300, 8000])
FS = np.asarray([22050.])
def erb_bandwidth(fc):
"""Bandwitdh of an Equivalent Rectangular Bandwidth (ERB).
Parameters
----------
fc : ndarray
Center frequency, or center frequencies, of the filter.
Returns
-------
ndarray or float
Equivalent rectangular bandwidth of the filter(s).
"""
# In Hz, according to Glasberg and Moore (1990)
return 24.7 + fc / 9.265
def lowpass_env_filtering(x, cutoff=150., n=1, fs=22050):
"""Low-pass filters a signal using a Butterworth filter.
Parameters
----------
x : ndarray
cutoff : float, optional
Cut-off frequency of the low-pass filter, in Hz. The default is 150 Hz.
n : int, optional
Order of the low-pass filter. The default is 1.
fs : float, optional
Sampling frequency of the signal to filter. The default is 22050 Hz.
Returns
-------
ndarray
Low-pass filtered signal.
"""
b, a = sp.signal.butter(N=n, Wn=cutoff * 2. / fs, btype='lowpass')
return sp.signal.lfilter(b, a, x)
class GammatoneFilterbank(object):
def __init__(self, fs, cf, b=1.019, order=1, q=9.26449, min_bw=24.7):
"""Gammatone Filterbank
Parameters
----------
fs : float
Sampling frequency of the signals to filter.
cf : array_like
Center frequencies of the filterbank.
b : float
beta of the gammatone filters. The default is `b` = 1.019.
order : int
Order. The default value is 1.
q : float
Q-value of the ERB. The default value is 9.26449.
min_bw : float
Minimum bandwidth of an ERB.
References
----------
"""
self.fs = fs
try:
len(cf)
except TypeError:
cf = [cf]
self.cf = np.asarray(cf)
self.b = b
self.erb_order = order
self.q = q
self.min_bw = min_bw
def _calculate_coefficients(self):
cf = self.cf
b = self.b
order = self.erb_order
q = self.q
min_bw = self.min_bw
erb = ((cf / q) ** order + min_bw ** order) ** (1 / order)
t = 1 / self.fs
b = b * 2 * pi * erb
a0 = t
a2 = 0
b0 = 1
b1 = -2 * cos(2 * cf * pi * t) / exp(b * t)
b2 = exp(-2 * b * t)
a11 = -(2 * t * cos(2 * cf * pi * t) / exp(b * t) + 2 * sqrt(
3 + 2 ** 1.5) * t * sin(2 * cf * pi * t) / exp(b * t)) / 2
a12 = -(2 * t * cos(2 * cf * pi * t) / exp(b * t) - 2 * sqrt(
3 + 2 ** 1.5) * t * sin(2 * cf * pi * t) / exp(b * t)) / 2
a13 = -(2 * t * cos(2 * cf * pi * t) / exp(b * t) + 2 * sqrt(
3 - 2 ** 1.5) * t * sin(2 * cf * pi * t) / exp(b * t)) / 2
a14 = -(2 * t * cos(2 * cf * pi * t) / exp(b * t) - 2 * sqrt(
3 - 2 ** 1.5) * t * sin(2 * cf * pi * t) / exp(b * t)) / 2
i = 1j
gain = abs((-2 * exp(4 * i * cf * pi * t) * t +
2 * exp(-(b * t) + 2 * i * cf * pi * t) * t *
(cos(2 * cf * pi * t) - sqrt(3 - 2 ** (3. / 2)) *
sin(2 * cf * pi * t))) *
(-2 * exp(4 * i * cf * pi * t) * t +
2 * exp(-(b * t) + 2 * i * cf * pi * t) * t *
(cos(2 * cf * pi * t) + sqrt(3 - 2 ** (3. / 2)) *
sin(2 * cf * pi * t))) *
(-2 * exp(4 * i * cf * pi * t) * t +
2 * exp(-(b * t) + 2 * i * cf * pi * t) * t *
(cos(2 * cf * pi * t) -
sqrt(3 + 2 ** (3. / 2)) * sin(2 * cf * pi * t))) *
(-2 * exp(4 * i * cf * pi * t) * t + 2 * exp(
-(b * t) + 2 * i * cf * pi * t) * t *
(cos(2 * cf * pi * t) + sqrt(3 + 2 ** (3. / 2)) * sin(
2 * cf * pi * t))) /
(-2 / exp(2 * b * t) - 2 * exp(4 * i * cf * pi * t) +
2 * (1 + exp(4 * i * cf * pi * t)) / exp(b * t)) ** 4)
allfilts = ones(len(cf))
return a0 * allfilts, a11, a12, a13, a14, a2 * allfilts, \
b0 * allfilts, b1, b2, gain
def filter(self, x):
"""Filters a signal along its last dimension.
Parameters
----------
x : ndarray
Signal to filter.
Returns
-------
ndarray
Filtered signals with shape ``(M, N)``, where ``M`` is the number of
channels, and ``N`` is the input signal's nubmer of samples.
"""
a0, a11, a12, a13, a14, a2, b0, b1, b2, gain = self._calculate_coefficients()
output = np.zeros((gain.shape[0], x.shape[-1]))
for chan in range(gain.shape[0]):
y1 = ss.lfilter([a0[chan] / gain[chan], a11[chan] / gain[chan],
a2[chan] / gain[chan]],
[b0[chan], b1[chan], b2[chan]], x)
y2 = ss.lfilter([a0[chan], a12[chan], a2[chan]],
[b0[chan], b1[chan], b2[chan]], y1)
y3 = ss.lfilter([a0[chan], a13[chan], a2[chan]],
[b0[chan], b1[chan], b2[chan]], y2)
y4 = ss.lfilter([a0[chan], a14[chan], a2[chan]],
[b0[chan], b1[chan], b2[chan]], y3)
output[chan, :] = y4
return output
class RectangularFilterbank(object):
def __init__(self, fs, center_f, width=3, output_time=False):
"""Rectangular filterbank with Nth-octave wide filters.
Parameters
----------
fs : int
Sampling frequency of the input signal.
center_f : array_like
List of the center frequencies of the filterbank.
width : float
Width of the filters, in fraction of octave. The default value is 3,
therefore 1/3-octave.
output_time : bool, optional
If `True`, also outputs the time output of the filtering. The default
is to output the RMS value of each band only. Doing the inverse FFT
is very costly; setting the argument to `False` prevents from doing
that computation.
Returns
-------
out_rms : ndarray
RMS power at the output of each filter.
out_time : ndarray
Time signals at the output of the filterbank. The shape is (`len(
center_f) x len(x)`).
"""
self.fs = fs
self.center_f = center_f
self.width = width
self.output_time = output_time
def filter(self, x):
"""
Parameters
----------
x : array_like
Input signal
Returns
-------
Notes
-----
This method uses Numpy's FFT because it returns 1 complex result per
frequency bin, instead of Scipy's rfft, which returns 2 real results
per frequency bin.
"""
center_f = np.asarray(self.center_f, dtype='float')
n = len(x)
# TODO Use powers of 2 to calculate the power spectrum, and also, possibly
# use RFFT instead of the complete fft.
X = rfft(x)
X_pow = np.abs(X) ** 2 / n # Power spectrum
X_pow[1:] = X_pow[1:] * 2.
bound_f = np.zeros(len(center_f) + 1)
bound_f[0] = center_f[0] * 2. ** (- 1. / (2. * self.width))
bound_f[1:] = center_f * 2. ** (1. / (2. * self.width))
bound_f = bound_f[bound_f < self.fs / 2]
# Convert from frequencies to vector indexes. Factor of two is because
# we consider positive frequencies only.
bound_idx = np.floor(bound_f / (self.fs / 2.) * len(X_pow)).astype('int')
# Initialize arrays
out_rms = np.zeros(len(center_f))
out_time = np.zeros((len(center_f), x.shape[-1]), dtype='complex')
for idx, (l, f) in enumerate(zip(bound_idx[0:], bound_idx[1:])):
out_time[idx, l:f] = X[l:f]
out_rms[idx] = np.sqrt(np.sum(X_pow[l:f]) / n)
if self.output_time:
out_time = np.real(irfft(out_time, n=n, axis=-1))
return out_rms, out_time
else:
return out_rms
def hilbert_envelope(signal):
"""Calculates the Hilbert envelope of a signal.
Parameters
----------
signal : array_like
Signal on which to calculate the hilbert envelope. The calculation
is done along the last axis (i.e. ``axis=-1``).
Returns
-------
ndarray
"""
signal = np.asarray(signal)
N_orig = signal.shape[-1]
# Next power of 2.
N = next_pow_2(N_orig)
y_h = hilbert(signal, N)
# Return signal with same shape as original
return np.abs(y_h[..., :N_orig])
|
|
import random, csv
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from itertools import chain,cycle,islice
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def steppify(x,y):
sx = roundrobin(chain([0],x),x)
sy = roundrobin(y,chain(y,[y[-1]]))
return list(sx), list(sy)
class Market:
def __init__(self,bidfile = '../php/bids.txt'):
self.players = {}
self._playerlist = set()
self.bidfile = bidfile
def update(self):
self.load_latest_bids()
self.plot()
def load_latest_bids(self):
for ID,name,bid in self.readfile():
if ID in self._playerlist:
self.players[ID].setbid(float(bid))
self.schedule_production()
price = self.get_current_pay_as_bid_price()
for p in self.players.itervalues():
p.push_bid_and_profit(price)
self.papricelist.append(price)
self.write_stats_file()
def load_first_bids(self):
for ID,name,bid in self.readfile():
self.players[ID] = Player(ID,name)
self.players[ID].setbid(float(bid))
self._playerlist.add(ID)
self.nplayers = len(self._playerlist)
# Set demand so there is a 10% chance of using the large power plant
self.demand = 10*self.nplayers - 5*1.28*0.8165*np.sqrt(self.nplayers)
self.schedule_production()
curprice = self.get_current_pay_as_bid_price()
for p in self.players.itervalues():
p.push_bid_and_profit(curprice)
self.papricelist = [curprice]
self.write_stats_file()
def readfile(self):
return csv.reader(open(self.bidfile))
def schedule_production(self):
x = 0.0
pids = {pid:self.players[pid].curbid for pid in self._playerlist}
pids = sorted(pids.keys(), key=pids.get)
for pid in pids:
x+= self.players[pid].curprod
if x < self.demand:
self.players[pid].schedprod = self.players[pid].curprod
else:
self.players[pid].schedprod = max(0.0,self.demand + self.players[pid].curprod - x)
def get_current_pay_as_bid_price(self):
x = self.demand
pids = {pid:self.players[pid].curbid for pid in self._playerlist}
pids = sorted(pids.keys(), key=pids.get)
for pid in pids:
x -= self.players[pid].curprod
if x < 0:
return self.players[pid].curbid
return 100.00
def get_current_mc_price(self):
x = self.demand
pids = {pid:self.players[pid].curbid for pid in self._playerlist}
pids = sorted(pids.keys(), key=pids.get)
for pid in pids:
x-= self.players[pid].curprod
if x < 0:
return self.players[pid].mc
return 100.00
def plot(self):
plt.ion()
plt.figure(1, figsize=(8,5), dpi=100)
plt.subplot(121)
plt.cla()
self.plot_bid_curve()
plt.subplot(122)
plt.cla()
self.plot_profits()
plt.tight_layout()
plt.savefig('../pic/out.png')
plt.figure(2, figsize=(8,5), dpi=100)
plt.subplot(121)
plt.cla()
self.plot_bid_curve()
plt.subplot(122)
plt.cla()
self.plot_profits()
plt.tight_layout()
def plot_bid_curve(self):
pids = {pid:self.players[pid].curbid for pid in self._playerlist}
pids = sorted(pids.keys(), key=pids.get)
ymc = [self.players[pid].mc for pid in pids]+[100]
ybid = [self.players[pid].curbid for pid in pids]+[100]
x = np.cumsum([self.players[pid].curprod for pid in pids]+[self.demand])
sx,symc = steppify(x,ymc)
sx,sybid = steppify(x,ybid)
tmp = [(xx,yy,zz) for xx,yy,zz in zip(sx,sybid,symc) if xx < self.demand]
tmp.append((self.demand,tmp[-1][1],tmp[-1][2]))
sxless,sybidless,symcless = zip(*tmp)
plt.fill_between(sxless,symcless,sybidless,color = 'g',alpha=0.3)
plt.plot(sx,symc,lw=3,c='k')
plt.plot(sx,sybid,lw=3,c='k')
plt.axvline(self.demand,lw=3,ls='--',c='k')
plt.axhline(sybidless[-1],lw=3,ls='..',c='k')
plt.title('Final price: {:.02f}'.format(sybidless[-1]))
plt.xlabel('Amount [MWh]')
plt.ylabel('Price [$/MWh]')
def plot_mc_curve(self):
pids = {pid:self.players[pid].mc for pid in self._playerlist}
pids = sorted(pids.keys(), key=pids.get)
ymc = [self.players[pid].mc for pid in pids]+[100]
ybid = [self.players[pid].curbid for pid in pids]+[100]
x = np.cumsum([self.players[pid].curprod for pid in pids]+[self.demand])
sx,symc = steppify(x,ymc)
sx,sybid = steppify(x,ybid)
tmp = [(xx,yy,zz) for xx,yy,zz in zip(sx,sybid,symc) if xx < self.demand]
tmp.append((self.demand,tmp[-1][1],tmp[-1][2]))
sxless,sybidless,symcless = zip(*tmp)
plt.fill_between(sxless,symcless,symcless[-1],color = 'g',alpha=0.3)
plt.plot(sx,symc,lw=3,c='k')
plt.plot(sx,sybid,lw=3,c='k')
plt.axvline(self.demand,lw=3,ls='--',c='k')
plt.axhline(sybidless[-1],lw=3,ls=':',c='k')
plt.title('Final price: {:.02f}'.format(symcless[-1]))
def plot_profits(self):
bestprofit = -100.0
for p in self.players.itervalues():
if sum(p.pabprofitlist) > bestprofit:
bestprofit = sum(p.pabprofitlist)
bestname = p.name
plt.plot(np.cumsum(p.pabprofitlist),c='k',marker='.')
# plt.plot(np.cumsum(p.mcprofitlist),c='r',marker='.')
plt.title('Current leader: {0} \n with a profit of {1:.01f}'.format(bestname, bestprofit))
plt.xlabel('Round number')
plt.ylabel('Profit [$]')
def write_stats_file(self):
outArr = []
for pid,p in self.players.iteritems():
outArr.append(map(float,[p.ID,p.curbid,p.curprod,p.schedprod,sum(p.pabprofitlist)]))
np.savetxt('../php/stats.txt',outArr,fmt='%d,%.02f,%.02f,%.02f,%.02f')
def get_pandas_dataframe(self):
df = pd.DataFrame()
for pid, p in self.players.iteritems():
df = df.append(pd.DataFrame({
"player_ID": [pid for _ in p.bidlist],
"round": [i for i,_ in enumerate(p.bidlist)],
"pab_profit": [v for v in p.pabprofitlist],
"up_profit": [v for v in p.mcprofitlist],
"scheduled": [v for v in p.prodlist],
"potential": [v for v in p.potprodlist],
"price": [v for v in p.pricelist]
}), ignore_index=True)
df['cumulative_profit'] = (df.pab_profit - df.up_profit)
df['cumulative_profit'] = df.groupby('player_ID')['cumulative_profit'].cumsum()
self.df = df
return df
def plot_pandas(self):
try:
df = self.df
except AttributeError:
df = self.get_pandas_dataframe()
plt.figure(3, figsize=(8,5), dpi=100)
ax3 = plt.axes()
df.groupby('player_ID').sum().plot(kind='scatter', x='potential', y='pab_profit', ax=ax3)
plt.ylabel('Pay-as-bid profit')
plt.figure(4, figsize=(8,5), dpi=100)
ax4 = plt.axes()
gb = df.groupby('player_ID')
for id, g in gb:
g.plot(x='round', y='cumulative_profit', marker='.', ax=ax4)
plt.xlabel('Round')
plt.ylabel('PAB Profit - UP Profit')
class Player:
def __init__(self, ID = -1,name=''):
self.ID = ID
self.name = name
# self.mc = round((int(ID) * 10.0)/30000 + 5,2)
self.mc = 0
self.bidlist = []
self.pabprofitlist = []
self.mcprofitlist = []
self.prodlist = []
self.potprodlist = []
self.pricelist = []
self.totalprod = 0
def setbid(self, bid):
self.curbid = bid
self.curprod = random.randint(1,3)*5
self.schedprod = 0.0
def push_bid_and_profit(self,price = 0.0):
self.bidlist.append(self.curbid)
self.pabprofitlist.append((self.curbid-self.mc)*self.schedprod)
self.mcprofitlist.append((price-self.mc)*self.schedprod)
self.totalprod += self.schedprod
self.prodlist.append(self.schedprod)
self.potprodlist.append(self.curprod)
self.pricelist.append(price)
|
|
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function thanh or the
sigmoid function (defined here by a ``SigmoidalLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will
# translate into a TanhLayer connected to the LogisticRegression
# layer; this can be replaced by a SigmoidalLayer, or a layer
# implementing any other nonlinearity
self.hiddenLayer = HiddenLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hidden,
activation=T.tanh)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = abs(self.hiddenLayer.W).sum() \
+ abs(self.logRegressionLayer.W).sum()
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
+ (self.logRegressionLayer.W ** 2).sum()
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='../data/mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(rng=rng, input=x, n_in=28 * 28,
n_hidden=n_hidden, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = classifier.negative_log_likelihood(y) \
+ L1_reg * classifier.L1 \
+ L2_reg * classifier.L2_sqr
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
validate_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = []
for param in classifier.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
updates = []
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
# same length, zip generates a list C of same size, where each element
# is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
for param, gparam in zip(classifier.params, gparams):
updates.append((param, param - learning_rate * gparam))
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(inputs=[index], outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_mlp()
|
|
import collections
import multiprocessing
import pyes
from pyes.exceptions import NotFoundException
import pymongo
import logging
import time
from bson.objectid import ObjectId
from .spec import QuerySpecification
from .oplog_watcher import OplogWatcher
import base64
_indexes = []
_connections = {}
class IndexMeta(type):
def __new__(mcs, name, bases, attrs):
metaclass = attrs.get('__metaclass__')
super_new = super(IndexMeta, mcs).__new__
if metaclass and issubclass(metaclass, IndexMeta):
return super_new(mcs, name, bases, attrs)
terms = {}
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, Term):
term = attr_value
term.name = attr_name
if term.index_name is None:
term.index_name = term.name
terms[attr_name] = attr_value
del attrs[attr_name]
attrs['terms'] = terms
meta = attrs.pop('Meta', None)
attrs['_meta'] = {
'host': getattr(meta, 'host'),
'model': getattr(meta, 'model'),
'spec': getattr(meta, 'spec', QuerySpecification()),
}
new_cls = super_new(mcs, name, bases, attrs)
index = new_cls.instance()
_indexes.append(index)
index.model._search_index = index
return new_cls
class Index(object):
__metaclass__ = IndexMeta
def __init__(self):
self._meta = self.__class__._meta
self.model = self._meta['model']
self.spec = self._meta['spec']
self.uri, _, db = self.model._meta['db'].rpartition('/')
self.namespace = '%s-%s' % (db, self.model._meta['collection'])
self.doc_type = self.model._name
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
@property
def connection(self):
if not hasattr(self, '_connection'):
host = self._meta['host']
if host not in _connections:
_connections[host] = pyes.ES(host)
self._connection = _connections[host]
return self._connection
def search(self, query, page=1, limit=5, filters=None):
return search(self, query, page, limit, filters)
def indexer(self):
return Indexer(self)
class Term(object):
def __init__(self, index_name=None, index=True, boost=1.0, null_value=None, coerce=None):
self.name = None
self.index_name = index_name
self.index = index
self.boost = boost
self.null_value = null_value
self.coerce = coerce
class Indexer(object):
def __init__(self, index):
self.index = index
def index_document(self, obj, bulk=False):
doc = {}
for term in self.index.terms.values():
if not term.index:
continue
value = getattr(obj, term.name)
if value is not None:
if isinstance(value, ObjectId):
value = str(value)
if term.coerce is not None:
value = term.coerce(value)
doc[term.index_name] = value
self._execute(self.index.connection.index, doc, self.index.namespace,
self.index.doc_type, id=base64.b64encode(str(obj.id)), bulk=bulk)
def delete_document(self, doc_id):
self._execute(self.index.connection.delete, self.index.namespace,
self.index.doc_type, base64.b64encode(str(doc_id)))
def insert(self, obj):
obj = self.index.model.to_python(obj)
logging.info('Indexing %s (%s)' % (self.index.model._name, obj.id))
self.index_document(obj)
def update(self, obj_id, raw):
o = raw['o']
fields = self.index.terms.keys()
if o.has_key('$set') and len(set(fields) - set(o['$set'].keys())) < len(fields):
obj = self.index.model.objects.only(*fields).filter(self.index.spec).with_id(obj_id)
if obj is not None:
logging.info('Updating %s (%s)' % (self.index.model._name, obj.id))
self.index_document(obj)
else:
self.delete(obj_id)
def delete(self, obj_id):
logging.info('Deleting %s (%s)' % (self.index.model._name, obj_id))
self.delete_document(obj_id)
def _execute(self, func, *args, **kwargs):
attempts = 0
while attempts < 5:
try:
func(*args, **kwargs)
break
except NotFoundException:
break
except Exception:
attempts += 1
logging.warning('Retrying... (%d)' % attempts, exc_info=True)
time.sleep(1)
class ResultSet(object):
def __init__(self, objects=None, total=0, elapsed_time=0, max_score=0):
self.objects = objects or []
self.meta = {}
self.total = total
self.elapsed_time = elapsed_time
self.max_score = max_score
def append(self, value, meta):
if value is not None:
self.objects.append(value)
self.meta[value] = meta
def has_more(self):
return len(self.objects) < self.total
def __len__(self):
return self.objects.__len__()
def __iter__(self):
for obj in self.objects:
yield obj, self.meta[obj]
def search(indexes, query, page=1, limit=5, filters=None):
if not isinstance(indexes, list):
indexes = [indexes]
namespaces = []
models = {}
for i, index in enumerate(indexes):
if not isinstance(index, Index):
model = index
for index in _indexes:
if index.model == model:
indexes[i] = index
break
namespaces.append(index.namespace)
models[index.namespace] = index.model
result_set = ResultSet()
result_set.query = query
if isinstance(query, (str, unicode)):
if query.endswith(':'):
query = query[:-1]
if any([op in query for op in ['?', '*', '~', 'OR', 'AND', '+', 'NOT', '-', ':']]):
query = pyes.StringQuery(query)
else:
query = pyes.StringQuery(query + '*')
if not isinstance(query, pyes.FilteredQuery) and filters:
term_filter = pyes.TermFilter()
for field, value in filters.iteritems():
term_filter.add(field, value)
query = pyes.FilteredQuery(query, term_filter)
page = int(page)
limit = int(limit)
skip = (page - 1) * limit
try:
response = _indexes[0].connection.search(query, indices=namespaces, **{
'from': str(skip),
'size': str(limit)
})
result_set.total = response.total
result_set.elapsed_time = response._results['took'] / 1000.0
result_set.max_score = response.max_score
for i, hit in enumerate(response.hits):
result_set.append(models[hit['_index']].objects.with_id(base64.b64decode(hit['_id'])), {
'rank': skip + i + 1,
'score': hit['_score'],
'relevance': int(hit['_score'] / result_set.max_score * 100)
})
except pyes.exceptions.SearchPhaseExecutionException:
pass
return result_set
def reindex(only=None):
logging.info('Reindexing...')
for index in _indexes:
if only and index.namespace not in only:
continue
try:
index.connection.delete_index(index.namespace)
except pyes.exceptions.IndexMissingException:
pass
index.connection.create_index(index.namespace)
objects = index.model.objects.only(*index.terms.keys()).filter(index.spec)
count = objects.count()
logging.info('%d object(s) from %s' % (count, index.namespace))
indexer = Indexer(index)
for i, obj in enumerate(objects):
i += 1
if not i % 10000:
logging.info('%d/%d', i, count)
indexer.index_document(obj, bulk=True)
indexer.index.connection.force_bulk()
logging.info('Done!')
def watch():
hosts = collections.defaultdict(list)
global _indexes
for index in _indexes:
hosts[index.uri].append(index)
def target(uri, indexes):
namespaces = [index.namespace.replace('-', '.') for index in indexes]
logging.info('Watching %s' % namespaces)
oplog_watcher = OplogWatcher(pymongo.Connection(uri), namespaces=namespaces)
for index in indexes:
indexer = index.indexer()
for op in ('insert', 'update', 'delete',):
oplog_watcher.add_handler(index.namespace.replace('-', '.'), op, getattr(indexer, op))
oplog_watcher.start()
if len(hosts) > 1:
for uri, _indexes in hosts.items():
multiprocessing.Process(target=target, args=(uri, _indexes)).start()
else:
target(*hosts.items()[0])
while True:
time.sleep(1)
|
|
import json
import logging
from unittest import mock
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.test import TestCase, override_settings
from django.urls import reverse
from freezegun import freeze_time
from wagtail.core.models import (
GroupApprovalTask, Page, Task, TaskState, Workflow, WorkflowPage, WorkflowState, WorkflowTask)
from wagtail.core.signals import page_published
from wagtail.tests.testapp.models import SimplePage, SimpleTask
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
def delete_existing_workflows():
WorkflowPage.objects.all().delete()
Workflow.objects.all().delete()
Task.objects.all().delete()
WorkflowTask.objects.all().delete()
class TestWorkflowsIndexView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="add_workflow"))
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_workflows:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/index.html')
# Initially there should be no workflows listed
self.assertContains(response, "There are no enabled workflows.")
Workflow.objects.create(name="test_workflow", active=True)
# Now the listing should contain our workflow
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/index.html')
self.assertNotContains(response, "There are no enabled workflows.")
self.assertContains(response, "test_workflow")
def test_deactivated(self):
Workflow.objects.create(name="test_workflow", active=False)
# The listing should contain our workflow, as well as marking it as disabled
response = self.get(params={'show_disabled': 'true'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "No workflows have been created.")
self.assertContains(response, "test_workflow")
self.assertContains(response, '<span class="status-tag">Disabled</span>', html=True)
# If we set 'show_disabled' to 'False', the workflow should not be displayed
response = self.get(params={})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "There are no enabled workflows.")
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
class TestWorkflowsCreateView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.task_1 = SimpleTask.objects.create(name="first_task")
self.task_2 = SimpleTask.objects.create(name="second_task")
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="add_workflow"))
self.root_page = Page.objects.get(depth=1)
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_workflows:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailadmin_workflows:add'), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/create.html')
def test_post(self):
response = self.post({
'name': ['test_workflow'],
'active': ['on'],
'workflow_tasks-TOTAL_FORMS': ['2'],
'workflow_tasks-INITIAL_FORMS': ['0'],
'workflow_tasks-MIN_NUM_FORMS': ['0'],
'workflow_tasks-MAX_NUM_FORMS': ['1000'],
'workflow_tasks-0-task': [str(self.task_1.id)],
'workflow_tasks-0-id': [''],
'workflow_tasks-0-ORDER': ['1'],
'workflow_tasks-0-DELETE': [''],
'workflow_tasks-1-task': [str(self.task_2.id)],
'workflow_tasks-1-id': [''],
'workflow_tasks-1-ORDER': ['2'],
'workflow_tasks-1-DELETE': [''],
'pages-TOTAL_FORMS': ['2'],
'pages-INITIAL_FORMS': ['1'],
'pages-MIN_NUM_FORMS': ['0'],
'pages-MAX_NUM_FORMS': ['1000'],
'pages-0-page': [str(self.root_page.id)],
'pages-0-DELETE': [''],
'pages-1-page': [''],
'pages-1-DELETE': [''],
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailadmin_workflows:index'))
# Check that the workflow was created
workflows = Workflow.objects.filter(name="test_workflow", active=True)
self.assertEqual(workflows.count(), 1)
workflow = workflows.first()
# Check that the tasks are associated with the workflow
self.assertEqual([self.task_1.task_ptr, self.task_2.task_ptr], list(workflow.tasks))
# Check that the tasks have sort_order set on WorkflowTask correctly
self.assertEqual(WorkflowTask.objects.get(workflow=workflow, task=self.task_1.task_ptr).sort_order, 0)
self.assertEqual(WorkflowTask.objects.get(workflow=workflow, task=self.task_2.task_ptr).sort_order, 1)
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
def test_page_already_has_workflow_check(self):
workflow = Workflow.objects.create(name="existing_workflow")
WorkflowPage.objects.create(workflow=workflow, page=self.root_page)
response = self.post({
'name': ['test_workflow'],
'active': ['on'],
'workflow_tasks-TOTAL_FORMS': ['2'],
'workflow_tasks-INITIAL_FORMS': ['0'],
'workflow_tasks-MIN_NUM_FORMS': ['0'],
'workflow_tasks-MAX_NUM_FORMS': ['1000'],
'workflow_tasks-0-task': [str(self.task_1.id)],
'workflow_tasks-0-id': [''],
'workflow_tasks-0-ORDER': ['1'],
'workflow_tasks-0-DELETE': [''],
'workflow_tasks-1-task': [str(self.task_2.id)],
'workflow_tasks-1-id': [''],
'workflow_tasks-1-ORDER': ['2'],
'workflow_tasks-1-DELETE': [''],
'pages-TOTAL_FORMS': ['2'],
'pages-INITIAL_FORMS': ['1'],
'pages-MIN_NUM_FORMS': ['0'],
'pages-MAX_NUM_FORMS': ['1000'],
'pages-0-page': [str(self.root_page.id)],
'pages-0-DELETE': [''],
'pages-1-page': [''],
'pages-1-DELETE': [''],
})
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'pages_formset', 0, 'page', ["This page already has workflow 'existing_workflow' assigned."])
class TestWorkflowsEditView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.workflow = Workflow.objects.create(name="workflow_to_edit")
self.task_1 = SimpleTask.objects.create(name="first_task")
self.task_2 = SimpleTask.objects.create(name="second_task")
self.inactive_task = SimpleTask.objects.create(name="inactive_task", active=False)
self.workflow_task = WorkflowTask.objects.create(workflow=self.workflow, task=self.task_1.task_ptr, sort_order=0)
self.page = Page.objects.first()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="change_workflow"))
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_workflows:edit', args=[self.workflow.id]), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailadmin_workflows:edit', args=[self.workflow.id]), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/edit.html')
# Check that the list of pages has the page to which this workflow is assigned
self.assertContains(response, self.page.title)
def test_post(self):
response = self.post({
'name': [str(self.workflow.name)],
'active': ['on'],
'workflow_tasks-TOTAL_FORMS': ['2'],
'workflow_tasks-INITIAL_FORMS': ['1'],
'workflow_tasks-MIN_NUM_FORMS': ['0'],
'workflow_tasks-MAX_NUM_FORMS': ['1000'],
'workflow_tasks-0-task': [str(self.task_1.id)],
'workflow_tasks-0-id': [str(self.workflow_task.id)],
'workflow_tasks-0-ORDER': ['1'],
'workflow_tasks-0-DELETE': [''],
'workflow_tasks-1-task': [str(self.task_2.id)],
'workflow_tasks-1-id': [''],
'workflow_tasks-1-ORDER': ['2'],
'workflow_tasks-1-DELETE': [''],
'pages-TOTAL_FORMS': ['2'],
'pages-INITIAL_FORMS': ['1'],
'pages-MIN_NUM_FORMS': ['0'],
'pages-MAX_NUM_FORMS': ['1000'],
'pages-0-page': [str(self.page.id)],
'pages-0-DELETE': [''],
'pages-1-page': [''],
'pages-1-DELETE': [''],
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailadmin_workflows:index'))
# Check that the workflow was created
workflows = Workflow.objects.filter(name="workflow_to_edit", active=True)
self.assertEqual(workflows.count(), 1)
workflow = workflows.first()
# Check that the tasks are associated with the workflow
self.assertEqual([self.task_1.task_ptr, self.task_2.task_ptr], list(workflow.tasks))
# Check that the tasks have sort_order set on WorkflowTask correctly
self.assertEqual(WorkflowTask.objects.get(workflow=workflow, task=self.task_1.task_ptr).sort_order, 0)
self.assertEqual(WorkflowTask.objects.get(workflow=workflow, task=self.task_2.task_ptr).sort_order, 1)
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
def test_duplicate_page_check(self):
response = self.post({
'name': [str(self.workflow.name)],
'active': ['on'],
'workflow_tasks-TOTAL_FORMS': ['2'],
'workflow_tasks-INITIAL_FORMS': ['1'],
'workflow_tasks-MIN_NUM_FORMS': ['0'],
'workflow_tasks-MAX_NUM_FORMS': ['1000'],
'workflow_tasks-0-task': [str(self.task_1.id)],
'workflow_tasks-0-id': [str(self.workflow_task.id)],
'workflow_tasks-0-ORDER': ['1'],
'workflow_tasks-0-DELETE': [''],
'workflow_tasks-1-task': [str(self.task_2.id)],
'workflow_tasks-1-id': [''],
'workflow_tasks-1-ORDER': ['2'],
'workflow_tasks-1-DELETE': [''],
'pages-TOTAL_FORMS': ['2'],
'pages-INITIAL_FORMS': ['1'],
'pages-MIN_NUM_FORMS': ['0'],
'pages-MAX_NUM_FORMS': ['1000'],
'pages-0-page': [str(self.page.id)],
'pages-0-DELETE': [''],
'pages-1-page': [str(self.page.id)],
'pages-1-DELETE': [''],
})
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'pages_formset', None, None, ['You cannot assign this workflow to the same page multiple times.'])
def test_pages_ignored_if_workflow_disabled(self):
self.workflow.active = False
self.workflow.save()
self.workflow.workflow_pages.all().delete()
response = self.post({
'name': [str(self.workflow.name)],
'active': ['on'],
'workflow_tasks-TOTAL_FORMS': ['2'],
'workflow_tasks-INITIAL_FORMS': ['1'],
'workflow_tasks-MIN_NUM_FORMS': ['0'],
'workflow_tasks-MAX_NUM_FORMS': ['1000'],
'workflow_tasks-0-task': [str(self.task_1.id)],
'workflow_tasks-0-id': [str(self.workflow_task.id)],
'workflow_tasks-0-ORDER': ['1'],
'workflow_tasks-0-DELETE': [''],
'workflow_tasks-1-task': [str(self.task_2.id)],
'workflow_tasks-1-id': [''],
'workflow_tasks-1-ORDER': ['2'],
'workflow_tasks-1-DELETE': [''],
'pages-TOTAL_FORMS': ['2'],
'pages-INITIAL_FORMS': ['1'],
'pages-MIN_NUM_FORMS': ['0'],
'pages-MAX_NUM_FORMS': ['1000'],
'pages-0-page': [str(self.page.id)],
'pages-0-DELETE': [''],
'pages-1-page': [''],
'pages-1-DELETE': [''],
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailadmin_workflows:index'))
# Check that the pages weren't added to the workflow
self.workflow.refresh_from_db()
self.assertFalse(self.workflow.workflow_pages.exists())
class TestRemoveWorkflow(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
delete_existing_workflows()
self.login()
self.workflow = Workflow.objects.create(name="workflow")
self.page = Page.objects.first()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="change_workflow"))
def post(self, post_data={}):
return self.client.post(reverse('wagtailadmin_workflows:remove', args=[self.page.id, self.workflow.id]), post_data)
def test_post(self):
# Check that a WorkflowPage instance is removed correctly
self.post()
self.assertEqual(WorkflowPage.objects.filter(workflow=self.workflow, page=self.page).count(), 0)
def test_no_permissions(self):
self.login(user=self.editor)
response = self.post()
self.assertEqual(response.status_code, 302)
def test_post_with_permission(self):
self.login(user=self.moderator)
response = self.post()
self.assertEqual(response.status_code, 302)
class TestTaskIndexView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="change_task"))
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_workflows:task_index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/task_index.html')
# Initially there should be no tasks listed
self.assertContains(response, "There are no enabled tasks")
SimpleTask.objects.create(name="test_task", active=True)
# Now the listing should contain our task
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/task_index.html')
self.assertNotContains(response, "There are no enabled tasks")
self.assertContains(response, "test_task")
def test_deactivated(self):
Task.objects.create(name="test_task", active=False)
# The listing should contain our task, as well as marking it as disabled
response = self.get(params={'show_disabled': 'true'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "No tasks have been created.")
self.assertContains(response, "test_task")
self.assertContains(response, '<span class="status-tag">Disabled</span>', html=True)
# The listing should not contain task if show_disabled query parameter is 'False'
response = self.get(params={})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "There are no enabled tasks")
self.assertNotContains(response, "test_task")
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
class TestCreateTaskView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="add_task"))
def get(self, url_kwargs=None, params={}):
url_kwargs = url_kwargs or {}
url_kwargs.setdefault('app_label', SimpleTask._meta.app_label)
url_kwargs.setdefault('model_name', SimpleTask._meta.model_name)
return self.client.get(reverse('wagtailadmin_workflows:add_task', kwargs=url_kwargs), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailadmin_workflows:add_task', kwargs={'app_label': SimpleTask._meta.app_label, 'model_name': SimpleTask._meta.model_name}), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/create_task.html')
def test_get_with_non_task_model(self):
response = self.get(url_kwargs={'app_label': 'wagtailcore', 'model_name': 'Site'})
self.assertEqual(response.status_code, 404)
def test_get_with_base_task_model(self):
response = self.get(url_kwargs={'app_label': 'wagtailcore', 'model_name': 'Task'})
self.assertEqual(response.status_code, 404)
def test_post(self):
response = self.post({'name': 'test_task', 'active': 'on'})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailadmin_workflows:task_index'))
# Check that the task was created
tasks = Task.objects.filter(name="test_task", active=True)
self.assertEqual(tasks.count(), 1)
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
class TestSelectTaskTypeView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
def get(self):
return self.client.get(reverse('wagtailadmin_workflows:select_task_type'))
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/select_task_type.html')
# Check that the list of available task types includes SimpleTask and GroupApprovalTask
self.assertContains(response, SimpleTask.get_verbose_name())
self.assertContains(response, GroupApprovalTask.get_verbose_name())
self.assertContains(response, GroupApprovalTask.get_description())
class TestEditTaskView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.task = GroupApprovalTask.objects.create(name="test_task")
self.editor = self.create_user(
username='editor',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.editor)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.permissions.add(Permission.objects.get(codename="change_task"))
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_workflows:edit_task', args=[self.task.id]), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailadmin_workflows:edit_task', args=[self.task.id]), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/edit_task.html')
def test_post(self):
self.assertEqual(self.task.groups.count(), 0)
editors = Group.objects.get(name='Editors')
response = self.post({'name': 'test_task_modified', 'active': 'on', 'groups': [str(editors.id)]})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailadmin_workflows:task_index'))
# Check that the task was updated
task = GroupApprovalTask.objects.get(id=self.task.id)
# The task name cannot be changed
self.assertEqual(task.name, "test_task")
# This request should've added a group to the task
self.assertEqual(task.groups.count(), 1)
self.assertTrue(task.groups.filter(id=editors.id).exists())
def test_permissions(self):
self.login(user=self.editor)
response = self.get()
self.assertEqual(response.status_code, 302)
full_context = {key: value for context in response.context for key, value in context.items()}
self.assertEqual(full_context['message'], 'Sorry, you do not have permission to access this area.')
self.login(user=self.moderator)
response = self.get()
self.assertEqual(response.status_code, 200)
class TestSubmitToWorkflow(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.submitter = self.create_user(
username='submitter',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.submitter)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
self.superuser = self.create_superuser(
username='superuser',
email='[email protected]',
password='password',
)
self.login(user=self.submitter)
# Create a page
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.page.save_revision()
self.workflow, self.task_1, self.task_2 = self.create_workflow_and_tasks()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
def create_workflow_and_tasks(self):
workflow = Workflow.objects.create(name='test_workflow')
task_1 = GroupApprovalTask.objects.create(name='test_task_1')
task_2 = GroupApprovalTask.objects.create(name='test_task_2')
task_1.groups.set(Group.objects.filter(name='Moderators'))
task_2.groups.set(Group.objects.filter(name='Moderators'))
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
WorkflowTask.objects.create(workflow=workflow, task=task_2, sort_order=2)
return workflow, task_1, task_2
def submit(self):
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-submit': "True",
}
return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
def test_submit_for_approval_creates_states(self):
"""Test that WorkflowState and TaskState objects are correctly created when a Page is submitted for approval"""
self.submit()
workflow_state = self.page.current_workflow_state
self.assertEqual(type(workflow_state), WorkflowState)
self.assertEqual(workflow_state.workflow, self.workflow)
self.assertEqual(workflow_state.status, workflow_state.STATUS_IN_PROGRESS)
self.assertEqual(workflow_state.requested_by, self.submitter)
task_state = workflow_state.current_task_state
self.assertEqual(type(task_state), TaskState)
self.assertEqual(task_state.task.specific, self.task_1)
self.assertEqual(task_state.status, task_state.STATUS_IN_PROGRESS)
def test_submit_for_approval_changes_status_in_header_meta(self):
edit_url = reverse('wagtailadmin_pages:edit', args=(self.page.id, ))
response = self.client.get(edit_url)
self.assertContains(response, 'Draft', count=1)
# submit for approval
self.submit()
response = self.client.get(edit_url)
workflow_status_url = reverse('wagtailadmin_pages:workflow_status', args=(self.page.id, ))
self.assertContains(response, workflow_status_url)
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.page.current_workflow_task.name))
self.assertNotContains(response, 'Draft')
@mock.patch.object(EmailMultiAlternatives, 'send', side_effect=IOError('Server down'))
def test_email_send_error(self, mock_fn):
logging.disable(logging.CRITICAL)
response = self.submit()
logging.disable(logging.NOTSET)
# An email that fails to send should return a message rather than crash the page
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('wagtailadmin_home'))
def test_resume_rejected_workflow(self):
# test that an existing workflow can be resumed by submitting when rejected
self.workflow.start(self.page, user=self.submitter)
workflow_state = self.page.current_workflow_state
workflow_state.current_task_state.approve(user=self.superuser)
workflow_state.refresh_from_db()
workflow_state.current_task_state.reject(user=self.superuser)
workflow_state.refresh_from_db()
self.assertEqual(workflow_state.current_task_state.task.specific, self.task_2)
self.assertEqual(workflow_state.status, WorkflowState.STATUS_NEEDS_CHANGES)
self.submit()
workflow_state.refresh_from_db()
# check that the same workflow state's status is now in progress
self.assertEqual(workflow_state.status, WorkflowState.STATUS_IN_PROGRESS)
# check that the workflow remains on the rejecting task, rather than resetting
self.assertEqual(workflow_state.current_task_state.task.specific, self.task_2)
def test_restart_rejected_workflow(self):
# test that an existing workflow can be restarted when rejected
self.workflow.start(self.page, user=self.submitter)
workflow_state = self.page.current_workflow_state
workflow_state.current_task_state.approve(user=self.superuser)
workflow_state.refresh_from_db()
workflow_state.current_task_state.reject(user=self.superuser)
workflow_state.refresh_from_db()
self.assertEqual(workflow_state.current_task_state.task.specific, self.task_2)
self.assertEqual(workflow_state.status, WorkflowState.STATUS_NEEDS_CHANGES)
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-restart-workflow': "True",
}
self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
workflow_state.refresh_from_db()
# check that the same workflow state's status is now cancelled
self.assertEqual(workflow_state.status, WorkflowState.STATUS_CANCELLED)
# check that the new workflow has started on the first task
new_workflow_state = self.page.current_workflow_state
self.assertEqual(new_workflow_state.status, WorkflowState.STATUS_IN_PROGRESS)
self.assertEqual(new_workflow_state.current_task_state.task.specific, self.task_1)
def test_cancel_workflow(self):
# test that an existing workflow can be cancelled after submission by the submitter
self.workflow.start(self.page, user=self.submitter)
workflow_state = self.page.current_workflow_state
self.assertEqual(workflow_state.current_task_state.task.specific, self.task_1)
self.assertEqual(workflow_state.status, WorkflowState.STATUS_IN_PROGRESS)
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-cancel-workflow': "True",
}
self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
workflow_state.refresh_from_db()
# check that the workflow state's status is now cancelled
self.assertEqual(workflow_state.status, WorkflowState.STATUS_CANCELLED)
self.assertEqual(workflow_state.current_task_state.status, TaskState.STATUS_CANCELLED)
def test_email_headers(self):
# Submit
self.submit()
msg_headers = set(mail.outbox[0].message().items())
headers = {('Auto-Submitted', 'auto-generated')}
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing the Auto-Submitted header.',)
@freeze_time("2020-03-31 12:00:00")
class TestApproveRejectWorkflow(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.submitter = self.create_user(
username='submitter',
first_name='Sebastian',
last_name='Mitter',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.submitter)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
self.superuser = self.create_superuser(
username='superuser',
email='[email protected]',
password='password',
)
self.login(user=self.submitter)
# Create a page
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.workflow, self.task_1 = self.create_workflow_and_tasks()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
self.submit()
self.login(user=self.moderator)
def create_workflow_and_tasks(self):
workflow = Workflow.objects.create(name='test_workflow')
task_1 = GroupApprovalTask.objects.create(name='test_task_1')
task_1.groups.set(Group.objects.filter(name='Moderators'))
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
return workflow, task_1
def submit(self):
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-submit': "True",
}
return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
@override_settings(WAGTAIL_FINISH_WORKFLOW_ACTION='')
def test_approve_task_and_workflow(self):
"""
This posts to the approve task view and checks that the page was approved and published
"""
# Unset WAGTAIL_FINISH_WORKFLOW_ACTION - default action should be to publish
del settings.WAGTAIL_FINISH_WORKFLOW_ACTION
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)), {'comment': 'my comment'})
# Check that the workflow was approved
workflow_state = WorkflowState.objects.get(page=self.page, requested_by=self.submitter)
self.assertEqual(workflow_state.status, workflow_state.STATUS_APPROVED)
# Check that the task was approved
task_state = workflow_state.current_task_state
self.assertEqual(task_state.status, task_state.STATUS_APPROVED)
# Check that the comment was added to the task state correctly
self.assertEqual(task_state.comment, 'my comment')
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page should now have no unpublished changes
self.assertFalse(
page.has_unpublished_changes,
"Approving moderation failed to set has_unpublished_changes=False"
)
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
def test_workflow_action_get(self):
"""
This tests that a GET request to the workflow action view (for the approve action) returns a modal with a form for extra data entry:
adding a comment
"""
response = self.client.get(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/workflow_action_modal.html')
html = json.loads(response.content)['html']
self.assertTagInHTML('<form action="' + reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)) + '" method="POST" novalidate>', html)
self.assertIn('Comment', html)
def test_workflow_action_view_bad_page_id(self):
"""
This tests that the workflow action view handles invalid page ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(127777777777, 'approve', self.page.current_workflow_task_state.id)))
# Check that the user received a 404 response
self.assertEqual(response.status_code, 404)
def test_workflow_action_view_not_in_group(self):
"""
This tests that the workflow action view for a GroupApprovalTask won't allow approval from a user not in the
specified group/a superuser
"""
# Remove privileges from user
self.login(user=self.submitter)
# Post
response = self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)))
# Check that the user received a permission denied response
self.assertRedirects(response, '/admin/')
def test_edit_view_workflow_cancellation_not_in_group(self):
"""
This tests that the page edit view for a GroupApprovalTask, locked to a user not in the
specified group/a superuser, still allows the submitter to cancel workflows
"""
self.login(user=self.submitter)
# Post
response = self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id, )), {'action-cancel-workflow': 'True'})
# Check that the user received a 200 response
self.assertEqual(response.status_code, 200)
# Check that the workflow state was marked as cancelled
workflow_state = WorkflowState.objects.get(page=self.page, requested_by=self.submitter)
self.assertEqual(workflow_state.status, WorkflowState.STATUS_CANCELLED)
def test_reject_task_and_workflow(self):
"""
This posts to the reject task view and checks that the page was rejected and not published
"""
# Post
self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'reject', self.page.current_workflow_task_state.id)))
# Check that the workflow was marked as needing changes
workflow_state = WorkflowState.objects.get(page=self.page, requested_by=self.submitter)
self.assertEqual(workflow_state.status, workflow_state.STATUS_NEEDS_CHANGES)
# Check that the task was rejected
task_state = workflow_state.current_task_state
self.assertEqual(task_state.status, task_state.STATUS_REJECTED)
page = Page.objects.get(id=self.page.id)
# Page must not be live
self.assertFalse(page.live)
def test_workflow_action_view_rejection_not_in_group(self):
"""
This tests that the workflow action view for a GroupApprovalTask won't allow rejection from a user not in the
specified group/a superuser
"""
# Remove privileges from user
self.login(user=self.submitter)
# Post
response = self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'reject', self.page.current_workflow_task_state.id)))
# Check that the user received a permission denied response
self.assertRedirects(response, '/admin/')
def test_collect_workflow_action_data_get(self):
"""
This tests that a GET request to the collect_workflow_action_data view (for the approve action) returns a modal with a form for extra data entry:
adding a comment
"""
response = self.client.get(reverse('wagtailadmin_pages:collect_workflow_action_data', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/workflow_action_modal.html')
html = json.loads(response.content)['html']
self.assertTagInHTML('<form action="' + reverse('wagtailadmin_pages:collect_workflow_action_data', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)) + '" method="POST" novalidate>', html)
self.assertIn('Comment', html)
def test_collect_workflow_action_data_post(self):
"""
This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data
"""
response = self.client.post(
reverse('wagtailadmin_pages:collect_workflow_action_data', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)),
{'comment': "This is my comment"}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['step'], 'success')
self.assertEqual(response_json['cleaned_data'], {'comment': "This is my comment"})
def test_workflow_action_via_edit_view(self):
"""
Posting to the 'edit' view with 'action-workflow-action' set should perform the given workflow action in addition to updating page content
"""
# Post
self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), {
'title': "This title was edited while approving",
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-workflow-action': "True",
'workflow-action-name': 'approve',
'workflow-action-extra-data': '{"comment": "my comment"}'
})
# Check that the workflow was approved
workflow_state = WorkflowState.objects.get(page=self.page, requested_by=self.submitter)
self.assertEqual(workflow_state.status, workflow_state.STATUS_APPROVED)
# Check that the task was approved
task_state = workflow_state.current_task_state
self.assertEqual(task_state.status, task_state.STATUS_APPROVED)
# Check that the comment was added to the task state correctly
self.assertEqual(task_state.comment, 'my comment')
# Check that page edits made at the same time as the action have been saved
page = Page.objects.get(id=self.page.id)
self.assertEqual(page.get_latest_revision_as_page().title, "This title was edited while approving")
def test_workflow_report(self):
response = self.client.get(reverse('wagtailadmin_reports:workflow'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hello world!")
self.assertContains(response, "test_workflow")
self.assertContains(response, "Sebastian Mitter")
self.assertContains(response, "March 31, 2020")
response = self.client.get(reverse('wagtailadmin_reports:workflow_tasks'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hello world!")
def test_workflow_report_filtered(self):
# the moderator can review the task, so the workflow state should show up even when reports are filtered by reviewable
response = self.client.get(reverse('wagtailadmin_reports:workflow'), {'reviewable': 'true'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hello world!")
self.assertContains(response, "test_workflow")
self.assertContains(response, "Sebastian Mitter")
self.assertContains(response, "March 31, 2020")
response = self.client.get(reverse('wagtailadmin_reports:workflow_tasks'), {'reviewable': 'true'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Hello world!")
# the submitter cannot review the task, so the workflow state shouldn't show up when reports are filtered by reviewable
self.login(self.submitter)
response = self.client.get(reverse('wagtailadmin_reports:workflow'), {'reviewable': 'true'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Hello world!")
self.assertNotContains(response, "Sebastian Mitter")
self.assertNotContains(response, "March 31, 2020")
response = self.client.get(reverse('wagtailadmin_reports:workflow_tasks'), {'reviewable': 'true'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Hello world!")
class TestNotificationPreferences(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.submitter = self.create_user(
username='submitter',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.submitter)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
self.moderator2 = self.create_user(
username='moderator2',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.user_set.add(self.moderator2)
self.superuser = self.create_superuser(
username='superuser',
email='[email protected]',
password='password',
)
self.superuser_profile = UserProfile.get_for_user(self.superuser)
self.moderator2_profile = UserProfile.get_for_user(self.moderator2)
self.submitter_profile = UserProfile.get_for_user(self.submitter)
# Create a page
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.workflow, self.task_1 = self.create_workflow_and_tasks()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
def create_workflow_and_tasks(self):
workflow = Workflow.objects.create(name='test_workflow')
task_1 = GroupApprovalTask.objects.create(name='test_task_1')
task_1.groups.set(Group.objects.filter(name='Moderators'))
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
return workflow, task_1
def submit(self):
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-submit': "True",
}
return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
def approve(self):
return self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)))
def reject(self):
return self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'reject', self.page.current_workflow_task_state.id)))
def test_vanilla_profile(self):
# Check that the vanilla profile has rejected notifications on
self.assertEqual(self.submitter_profile.rejected_notifications, True)
# Check that the vanilla profile has approved notifications on
self.assertEqual(self.submitter_profile.approved_notifications, True)
@override_settings(WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS=True)
def test_submitted_email_notifications_sent(self):
"""Test that 'submitted' notifications for WorkflowState and TaskState are both sent correctly"""
self.login(self.submitter)
self.submit()
self.assertEqual(len(mail.outbox), 4)
task_submission_emails = [email for email in mail.outbox if "task" in email.subject]
task_submission_emailed_addresses = [address for email in task_submission_emails for address in email.to]
workflow_submission_emails = [email for email in mail.outbox if "workflow" in email.subject]
workflow_submission_emailed_addresses = [address for email in workflow_submission_emails for address in email.to]
self.assertEqual(len(task_submission_emails), 3)
# the moderator is in the Group assigned to the GroupApproval task, so should get an email
self.assertIn(self.moderator.email, task_submission_emailed_addresses)
self.assertIn(self.moderator2.email, task_submission_emailed_addresses)
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a task email
self.assertIn(self.superuser.email, task_submission_emailed_addresses)
# the submitter triggered this workflow update, so should not get an email
self.assertNotIn(self.submitter.email, task_submission_emailed_addresses)
self.assertEqual(len(workflow_submission_emails), 1)
# the moderator should not get a workflow email
self.assertNotIn(self.moderator.email, workflow_submission_emailed_addresses)
self.assertNotIn(self.moderator2.email, workflow_submission_emailed_addresses)
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a workflow email
self.assertIn(self.superuser.email, workflow_submission_emailed_addresses)
# as the submitter was the triggering user, the submitter should not get an email notification
self.assertNotIn(self.submitter.email, workflow_submission_emailed_addresses)
@override_settings(WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS=False)
def test_submitted_email_notifications_superuser_settings(self):
"""Test that 'submitted' notifications for WorkflowState and TaskState are not sent to superusers if
`WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS=False`"""
self.login(self.submitter)
self.submit()
task_submission_emails = [email for email in mail.outbox if "task" in email.subject]
task_submission_emailed_addresses = [address for email in task_submission_emails for address in email.to]
workflow_submission_emails = [email for email in mail.outbox if "workflow" in email.subject]
workflow_submission_emailed_addresses = [address for email in workflow_submission_emails for address in email.to]
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS` off, the superuser should not get a task email
self.assertNotIn(self.superuser.email, task_submission_emailed_addresses)
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS` off, the superuser should not get a workflow email
self.assertNotIn(self.superuser.email, workflow_submission_emailed_addresses)
@override_settings(WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS=True)
def test_submit_notification_preferences_respected(self):
# moderator2 doesn't want emails
self.moderator2_profile.submitted_notifications = False
self.moderator2_profile.save()
# superuser doesn't want emails
self.superuser_profile.submitted_notifications = False
self.superuser_profile.save()
# Submit
self.login(self.submitter)
self.submit()
# Check that only one moderator got a task submitted email
workflow_submission_emails = [email for email in mail.outbox if "workflow" in email.subject]
workflow_submission_emailed_addresses = [address for email in workflow_submission_emails for address in email.to]
task_submission_emails = [email for email in mail.outbox if "task" in email.subject]
task_submission_emailed_addresses = [address for email in task_submission_emails for address in email.to]
self.assertNotIn(self.moderator2.email, task_submission_emailed_addresses)
# Check that the superuser didn't receive a workflow or task email
self.assertNotIn(self.superuser.email, task_submission_emailed_addresses)
self.assertNotIn(self.superuser.email, workflow_submission_emailed_addresses)
def test_approved_notifications(self):
self.login(self.submitter)
self.submit()
# Approve
self.login(self.moderator)
self.approve()
# Submitter must receive a workflow approved email
workflow_approved_emails = [email for email in mail.outbox if ("workflow" in email.subject and "approved" in email.subject)]
self.assertEqual(len(workflow_approved_emails), 1)
self.assertIn(self.submitter.email, workflow_approved_emails[0].to)
def test_approved_notifications_preferences_respected(self):
# Submitter doesn't want 'approved' emails
self.submitter_profile.approved_notifications = False
self.submitter_profile.save()
self.login(self.submitter)
self.submit()
# Approve
self.login(self.moderator)
self.approve()
# Submitter must not receive a workflow approved email, so there should be no emails in workflow_approved_emails
workflow_approved_emails = [email for email in mail.outbox if ("workflow" in email.subject and "approved" in email.subject)]
self.assertEqual(len(workflow_approved_emails), 0)
def test_rejected_notifications(self):
self.login(self.submitter)
self.submit()
# Reject
self.login(self.moderator)
self.reject()
# Submitter must receive a workflow rejected email
workflow_rejected_emails = [email for email in mail.outbox if ("workflow" in email.subject and "rejected" in email.subject)]
self.assertEqual(len(workflow_rejected_emails), 1)
self.assertIn(self.submitter.email, workflow_rejected_emails[0].to)
def test_rejected_notification_preferences_respected(self):
# Submitter doesn't want 'rejected' emails
self.submitter_profile.rejected_notifications = False
self.submitter_profile.save()
self.login(self.submitter)
self.submit()
# Reject
self.login(self.moderator)
self.reject()
# Submitter must not receive a workflow rejected email
workflow_rejected_emails = [email for email in mail.outbox if ("workflow" in email.subject and "rejected" in email.subject)]
self.assertEqual(len(workflow_rejected_emails), 0)
class TestDisableViews(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.submitter = self.create_user(
username='submitter',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.submitter)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
self.moderator2 = self.create_user(
username='moderator2',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
moderators.user_set.add(self.moderator2)
self.superuser = self.create_superuser(
username='superuser',
email='[email protected]',
password='password',
)
# Create a page
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.workflow, self.task_1, self.task_2 = self.create_workflow_and_tasks()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
def create_workflow_and_tasks(self):
workflow = Workflow.objects.create(name='test_workflow')
task_1 = GroupApprovalTask.objects.create(name='test_task_1')
task_1.groups.set(Group.objects.filter(name='Moderators'))
task_2 = GroupApprovalTask.objects.create(name='test_task_2')
task_2.groups.set(Group.objects.filter(name='Moderators'))
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
WorkflowTask.objects.create(workflow=workflow, task=task_2, sort_order=2)
return workflow, task_1, task_2
def submit(self):
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
'action-submit': "True",
}
return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), post_data)
def approve(self):
return self.client.post(reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, 'approve', self.page.current_workflow_task_state.id)))
def test_disable_workflow(self):
"""Test that deactivating a workflow sets it to inactive and cancels in progress states"""
self.login(self.submitter)
self.submit()
self.login(self.superuser)
self.approve()
response = self.client.post(reverse('wagtailadmin_workflows:disable', args=(self.workflow.pk,)))
self.assertEqual(response.status_code, 302)
self.workflow.refresh_from_db()
self.assertEqual(self.workflow.active, False)
states = WorkflowState.objects.filter(page=self.page, workflow=self.workflow)
self.assertEqual(states.filter(status=WorkflowState.STATUS_IN_PROGRESS).count(), 0)
self.assertEqual(states.filter(status=WorkflowState.STATUS_CANCELLED).count(), 1)
self.assertEqual(TaskState.objects.filter(workflow_state__workflow=self.workflow, status=TaskState.STATUS_IN_PROGRESS).count(), 0)
def test_disable_task(self):
"""Test that deactivating a task sets it to inactive and cancels in progress states"""
self.login(self.submitter)
self.submit()
self.login(self.superuser)
response = self.client.post(reverse('wagtailadmin_workflows:disable_task', args=(self.task_1.pk,)))
self.assertEqual(response.status_code, 302)
self.task_1.refresh_from_db()
self.assertEqual(self.task_1.active, False)
states = TaskState.objects.filter(workflow_state__page=self.page, task=self.task_1.task_ptr)
self.assertEqual(states.filter(status=TaskState.STATUS_IN_PROGRESS).count(), 0)
self.assertEqual(states.filter(status=TaskState.STATUS_CANCELLED).count(), 1)
# Check that the page's WorkflowState has moved on to the next active task
self.assertEqual(self.page.current_workflow_state.current_task_state.task.specific, self.task_2)
def test_enable_workflow(self):
self.login(self.superuser)
self.workflow.active = False
self.workflow.save()
response = self.client.post(reverse('wagtailadmin_workflows:enable', args=(self.workflow.pk,)))
self.assertEqual(response.status_code, 302)
self.workflow.refresh_from_db()
self.assertEqual(self.workflow.active, True)
def test_enable_task(self):
self.login(self.superuser)
self.task_1.active = False
self.task_1.save()
response = self.client.post(reverse('wagtailadmin_workflows:enable_task', args=(self.task_1.pk,)))
self.assertEqual(response.status_code, 302)
self.task_1.refresh_from_db()
self.assertEqual(self.task_1.active, True)
class TestTaskChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_get(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/chooser.html")
# Check that the "select task type" view was shown in the "new" tab
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/select_task_type.html")
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/results.html")
self.assertTemplateNotUsed(response, "wagtailadmin/workflows/task_chooser/includes/create_form.html")
self.assertFalse(response.context['searchform'].is_searching())
def test_search(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser') + '?q=foo')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/results.html")
self.assertTemplateNotUsed(response, "wagtailadmin/workflows/task_chooser/chooser.html")
self.assertTrue(response.context['searchform'].is_searching())
def test_pagination(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser') + '?p=2')
self.assertEqual(response.status_code, 200)
# When pagination is used, only the results template should be rendered
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/results.html")
self.assertTemplateNotUsed(response, "wagtailadmin/workflows/task_chooser/chooser.html")
self.assertFalse(response.context['searchform'].is_searching())
def test_get_with_create_model_selected(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.GroupApprovalTask')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/chooser.html")
# Check that the "create" view was shown in the "new" tab
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/create_form.html")
self.assertTemplateNotUsed(response, "wagtailadmin/workflows/task_chooser/includes/select_task_type.html")
def test_get_with_non_task_create_model_selected(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.Page')
self.assertEqual(response.status_code, 404)
def test_get_with_base_task_create_model_selected(self):
# Task is technically a subclass of itself so we need an extra test for it
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.Task')
self.assertEqual(response.status_code, 404)
@mock.patch('wagtail.admin.views.workflows.get_task_types')
def test_get_with_single_task_model(self, get_task_types):
# When a single task type exists there's no need to specify create_model
get_task_types.return_value = [GroupApprovalTask]
response = self.client.get(reverse('wagtailadmin_workflows:task_chooser'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/chooser.html")
# Check that the "create" view was shown in the "new" tab
self.assertTemplateUsed(response, "wagtailadmin/workflows/task_chooser/includes/create_form.html")
self.assertTemplateNotUsed(response, "wagtailadmin/workflows/task_chooser/includes/select_task_type.html")
# POST requests are for creating new tasks
def get_post_data(self):
return {
'create-task-name': 'Editor approval task',
'create-task-groups': [str(Group.objects.get(name="Editors").id)],
}
def test_post_with_create_model_selected(self):
response = self.client.post(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.GroupApprovalTask', self.get_post_data())
self.assertEqual(response.status_code, 201)
# Check that the task was created
task = Task.objects.get(name="Editor approval task", active=True)
# Check the response JSON
self.assertEqual(response.json(), {
"step": "task_chosen",
"result": {
"id": task.id,
"name": "Editor approval task",
"edit_url": reverse('wagtailadmin_workflows:edit_task', args=[task.id])
}
})
@mock.patch('wagtail.admin.views.workflows.get_task_types')
def test_post_with_single_task_model(self, get_task_types):
# When a single task type exists there's no need to specify create_model
get_task_types.return_value = [GroupApprovalTask]
response = self.client.post(reverse('wagtailadmin_workflows:task_chooser'), self.get_post_data())
self.assertEqual(response.status_code, 201)
# Check that the task was created
task = Task.objects.get(name="Editor approval task", active=True)
# Check the response JSON
self.assertEqual(response.json(), {
"step": "task_chosen",
"result": {
"id": task.id,
"name": "Editor approval task",
"edit_url": reverse('wagtailadmin_workflows:edit_task', args=[task.id])
}
})
def test_post_without_create_model_selected(self):
response = self.client.post(reverse('wagtailadmin_workflows:task_chooser'), self.get_post_data())
self.assertEqual(response.status_code, 400)
# Check that the task wasn't created
self.assertFalse(Task.objects.filter(name="Editor approval task", active=True).exists())
def test_post_with_non_task_create_model_selected(self):
response = self.client.post(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.Page', self.get_post_data())
self.assertEqual(response.status_code, 404)
# Check that the task wasn't created
self.assertFalse(Task.objects.filter(name="Editor approval task", active=True).exists())
def test_post_with_base_task_create_model_selected(self):
# Task is technically a subclass of itself so we need an extra test for it
response = self.client.post(reverse('wagtailadmin_workflows:task_chooser') + '?create_model=wagtailcore.Task', self.get_post_data())
self.assertEqual(response.status_code, 404)
# Check that the task wasn't created
self.assertFalse(Task.objects.filter(name="Editor approval task", active=True).exists())
class TestTaskChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.login()
self.task = SimpleTask.objects.create(name="test_task")
def test_get(self):
response = self.client.get(reverse('wagtailadmin_workflows:task_chosen', args=[self.task.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(response.json(), {
'result': {
'edit_url': reverse('wagtailadmin_workflows:edit_task', args=[self.task.id]),
'id': self.task.id,
'name': 'test_task'
},
'step': 'task_chosen'
})
class TestWorkflowUsageView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.workflow = Workflow.objects.get()
self.root_page = Page.objects.get(depth=1)
self.home_page = Page.objects.get(depth=2)
self.child_page_with_another_workflow = self.home_page.add_child(instance=SimplePage(title="Another page", content="I'm another page"))
self.another_workflow = Workflow.objects.create(name="Another workflow")
self.another_workflow.workflow_pages.create(page=self.child_page_with_another_workflow)
def test_get(self):
response = self.client.get(reverse('wagtailadmin_workflows:usage', args=[self.workflow.id]))
self.assertEqual(response.status_code, 200)
object_set = set(page.id for page in response.context['used_by'].object_list)
self.assertIn(self.root_page.id, object_set)
self.assertIn(self.home_page.id, object_set)
self.assertNotIn(self.child_page_with_another_workflow.id, object_set)
@freeze_time("2020-06-01 12:00:00")
class TestWorkflowStatus(TestCase, WagtailTestUtils):
def setUp(self):
delete_existing_workflows()
self.submitter = self.create_user(
username='submitter',
email='[email protected]',
password='password',
)
editors = Group.objects.get(name='Editors')
editors.user_set.add(self.submitter)
self.moderator = self.create_user(
username='moderator',
email='[email protected]',
password='password',
)
moderators = Group.objects.get(name='Moderators')
moderators.user_set.add(self.moderator)
self.superuser = self.create_superuser(
username='superuser',
email='[email protected]',
password='password',
)
self.login(self.superuser)
# Create a page
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.workflow, self.task_1, self.task_2 = self.create_workflow_and_tasks()
WorkflowPage.objects.create(workflow=self.workflow, page=self.page)
self.edit_url = reverse('wagtailadmin_pages:edit', args=(self.page.id, ))
def create_workflow_and_tasks(self):
workflow = Workflow.objects.create(name='test_workflow')
task_1 = GroupApprovalTask.objects.create(name='test_task_1')
task_1.groups.set(Group.objects.filter(name='Moderators'))
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
task_2 = GroupApprovalTask.objects.create(name='test_task_2')
task_2.groups.set(Group.objects.filter(name='Editors'))
WorkflowTask.objects.create(workflow=workflow, task=task_2, sort_order=2)
return workflow, task_1, task_2
def submit(self, action='action-submit'):
post_data = {
'title': str(self.page.title),
'slug': str(self.page.slug),
'content': str(self.page.content),
action: "True",
}
return self.client.post(self.edit_url, post_data)
def workflow_action(self, action):
post_data = {
'action': action,
'comment': 'good work' if action == 'approve' else 'needs some changes',
'next': self.edit_url
}
return self.client.post(
reverse('wagtailadmin_pages:workflow_action', args=(self.page.id, action, self.page.current_workflow_task_state.id)),
post_data,
follow=True
)
def test_workflow_status_modal(self):
workflow_status_url = reverse('wagtailadmin_pages:workflow_status', args=(self.page.id, ))
# The page workflow status view should return permission denied when the page is but a draft
response = self.client.get(workflow_status_url)
self.assertRedirects(response, '/admin/')
# Submit for moderation
self.submit()
response = self.client.get(workflow_status_url)
self.assertEqual(response.status_code, 200)
html = response.json().get('html')
self.assertIn(self.task_1.name, html)
self.assertIn('{}: In progress'.format(self.task_1.name), html)
self.assertIn(self.task_2.name, html)
self.assertIn('{}: Not started'.format(self.task_2.name), html)
self.assertIn(reverse('wagtailadmin_pages:history', args=(self.page.id, )), html)
self.assertTemplateUsed(response, 'wagtailadmin/workflows/workflow_status.html')
def test_status_through_workflow_cycle(self):
self.login(self.superuser)
response = self.client.get(self.edit_url)
self.assertContains(response, 'Draft', 1)
self.page.save_revision()
response = self.client.get(self.edit_url)
self.assertContains(response, 'Draft saved', 1)
self.submit()
response = self.client.get(self.edit_url)
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.task_1.name))
response = self.workflow_action('approve')
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.task_2.name))
response = self.workflow_action('reject')
self.assertContains(response, 'Changes requested')
# resubmit
self.submit()
response = self.client.get(self.edit_url)
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.task_2.name))
response = self.workflow_action('approve')
self.assertContains(response, 'Published')
def test_status_after_cancel(self):
# start workflow, then cancel
self.submit()
self.submit('action-cancel-workflow')
response = self.client.get(self.edit_url)
self.assertContains(response, 'Draft saved')
def test_status_after_restart(self):
self.submit()
response = self.workflow_action('approve')
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.task_2.name))
self.workflow_action('reject')
self.submit('action-restart-workflow')
response = self.client.get(self.edit_url)
self.assertRegex(response.content.decode('utf-8'), r'Awaiting[\s|\n]+{}'.format(self.task_1.name))
def test_workflow_status_modal_task_comments(self):
workflow_status_url = reverse('wagtailadmin_pages:workflow_status', args=(self.page.id,))
self.submit()
self.workflow_action('reject')
response = self.client.get(workflow_status_url)
self.assertIn('needs some changes', response.json().get('html'))
self.submit()
self.workflow_action('approve')
response = self.client.get(workflow_status_url)
self.assertIn('good work', response.json().get('html'))
def test_workflow_edit_locked_message(self):
self.submit()
self.login(self.submitter)
response = self.client.get(self.edit_url)
needle = "This page is awaiting <b>\'test_task_1\'</b> in the <b>\'test_workflow\'</b> workflow. Only reviewers for this task can edit the page."
self.assertTagInHTML(needle, str(response.content), count=1)
self.login(self.moderator)
response = self.client.get(self.edit_url)
self.assertNotInHTML(needle, str(response.content))
|
|
from mock import call, ANY
from nose.tools import istest
from provy.more.debian import UserRole
from tests.unit.tools.helpers import ProvyTestCase
example_groups = """
root
daemon
bin
sys
adm
tty
disk
lp
mail
"""
example_users = """
root
daemon
bin
sys
sync
games
man
lp
mail
"""
example_groups_for_user = """
foo : foo adm cdrom sudo dip plugdev lpadmin sambashare
"""
class UserRoleTest(ProvyTestCase):
def setUp(self):
super(UserRoleTest, self).setUp()
self.role = UserRole(None, {})
@istest
def checks_that_a_group_exists(self):
with self.execute_mock() as execute:
execute.return_value = example_groups
self.assertTrue(self.role.group_exists('daemon'))
execute.assert_called_with("cat /etc/group | cut -d ':' -f 1", stdout=False, sudo=True)
@istest
def checks_that_a_group_doesnt_exist(self):
with self.execute_mock() as execute:
execute.return_value = example_groups
self.assertFalse(self.role.group_exists('iis'))
@istest
def checks_group_by_exact_name(self):
with self.execute_mock() as execute:
execute.return_value = example_groups
self.assertFalse(self.role.group_exists('roo'))
self.assertFalse(self.role.group_exists('roots'))
@istest
def checks_that_a_user_exists(self):
with self.execute_mock() as execute:
execute.return_value = example_users
self.assertTrue(self.role.user_exists('daemon'))
execute.assert_called_with("cat /etc/passwd | cut -d ':' -f 1", stdout=False, sudo=True)
@istest
def checks_that_a_user_doesnt_exist(self):
with self.execute_mock() as execute:
execute.return_value = example_users
self.assertFalse(self.role.user_exists('iis'))
@istest
def checks_user_by_exact_name(self):
with self.execute_mock() as execute:
execute.return_value = example_users
self.assertFalse(self.role.user_exists('roo'))
self.assertFalse(self.role.user_exists('roots'))
@istest
def checks_that_a_user_is_in_a_certain_group(self):
with self.execute_mock() as execute:
execute.return_value = example_groups_for_user
self.assertTrue(self.role.user_in_group('foo', 'sudo'))
execute.assert_called_with("groups foo", stdout=False, sudo=True)
@istest
def checks_that_a_user_is_not_in_a_certain_group(self):
with self.execute_mock() as execute:
execute.return_value = example_groups_for_user
self.assertFalse(self.role.user_in_group('foo', 'root'))
@istest
def checks_that_a_user_is_in_a_certain_group_by_exact_name(self):
with self.execute_mock() as execute:
execute.return_value = example_groups_for_user
self.assertFalse(self.role.user_in_group('foo', 'sud'))
self.assertFalse(self.role.user_in_group('foo', 'sudoer'))
@istest
def cannot_check_user_in_groups_if_username_doesnt_exist(self):
with self.execute_mock() as execute:
execute.return_value = 'groups: foo: User unexistant'
self.assertRaises(ValueError, self.role.user_in_group, 'foo', 'bar')
@istest
def ensures_a_group_is_created(self):
with self.mock_role_methods('group_exists', 'execute'):
self.role.group_exists.return_value = False
self.role.ensure_group('foo')
self.role.group_exists.assert_called_once_with('foo')
self.role.execute.assert_called_once_with('groupadd foo', sudo=True, stdout=False)
@istest
def ensures_a_group_is_created_with_group_id(self):
with self.mock_role_methods('group_exists', 'execute'):
self.role.group_exists.return_value = False
self.role.ensure_group('foo', group_id=123)
self.role.group_exists.assert_called_once_with('foo')
self.role.execute.assert_called_once_with('groupadd --gid 123 foo', sudo=True, stdout=False)
@istest
def doesnt_create_group_if_it_already_exists(self):
with self.mock_role_methods('group_exists', 'execute'):
self.role.group_exists.return_value = True
self.role.ensure_group('foo')
self.assertFalse(self.role.execute.called)
@istest
def ensures_the_user_enters_the_provided_groups_when_not_there_already(self):
with self.mock_role_methods('user_in_group', 'execute'):
self.role.user_in_group.side_effect = [True, False]
self.role.ensure_user_groups('foo', ['bar', 'baz'])
self.assertEqual(self.role.user_in_group.mock_calls, [
call('foo', 'bar'),
call('foo', 'baz'),
])
self.role.execute.assert_called_once_with('usermod -G baz foo', sudo=True, stdout=False)
@istest
def ensures_user_is_created_when_not_created_yet(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'execute', 'set_user_password'):
with self.provisioning_to('debian'):
self.role.user_exists.return_value = False
self.role.ensure_user(username='foo-user', identified_by='foo-pass', groups=['foo-group', 'bar-group'])
self.assertEqual(self.role.ensure_group.mock_calls, [
call('foo-group'),
call('bar-group'),
])
self.assertEqual(self.role.execute.mock_calls, [
call('useradd -g foo-group -s /bin/bash -d /home/foo-user -m foo-user', stdout=False, sudo=True)
])
self.role.set_user_password.assert_called_once_with(
'foo-user', 'foo-pass', False
)
self.role.ensure_user_groups.assert_called_once_with('foo-user', ['foo-group', 'bar-group'])
@istest
def ensures_user_is_created_with_only_group_as_username(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'execute', 'set_user_password'):
with self.provisioning_to('debian'):
self.role.user_exists.return_value = False
self.role.ensure_user(username='foo-user')
self.assertEqual(self.role.execute.mock_calls, [
call('useradd -g foo-user -s /bin/bash -d /home/foo-user -m foo-user', stdout=False, sudo=True),
])
@istest
def ensures_user_is_created_with_different_home(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'execute', 'set_user_password'):
with self.provisioning_to('debian'):
self.role.user_exists.return_value = False
self.role.ensure_user(username='foo-user', home_folder='/srv/bar')
self.assertEqual(self.role.execute.mock_calls, [
call('useradd -g foo-user -s /bin/bash -d /srv/bar -m foo-user', stdout=False, sudo=True),
])
@istest
def doesnt_add_but_set_user_as_admin_for_debian_when_it_already_exists_but_is_not_admin_yet(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'user_in_group', 'execute', "set_user_password"):
with self.provisioning_to('debian'):
self.role.user_exists.return_value = True
self.role.user_in_group.return_value = False
self.role.ensure_user(username='foo-user', is_admin=True)
self.role.user_in_group.assert_called_once_with('foo-user', 'admin')
self.assertEqual(self.role.execute.mock_calls, [
call('usermod -G admin foo-user', sudo=True, stdout=False),
])
@istest
def doesnt_add_but_set_user_as_admin_for_ubuntu_when_it_already_exists_but_is_not_admin_yet(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'user_in_group', 'execute', 'set_user_password'):
with self.provisioning_to('ubuntu'):
self.role.user_exists.return_value = True
self.role.user_in_group.return_value = False
self.role.ensure_user(username='foo-user', is_admin=True)
self.role.user_in_group.assert_called_once_with('foo-user', 'sudo')
self.assertEqual(self.role.execute.mock_calls, [
call('usermod -G sudo foo-user', sudo=True, stdout=False),
])
@istest
def just_add_user_to_groups_if_its_already_admin(self):
with self.mock_role_methods('ensure_group', 'ensure_user_groups', 'user_exists', 'user_in_group', 'execute', 'set_user_password'):
with self.provisioning_to('ubuntu'):
self.role.user_exists.return_value = True
self.role.user_in_group.return_value = True
self.role.ensure_user(username='foo-user', is_admin=True, groups=['foo-group', 'bar-group'])
self.assertFalse(self.role.execute.called)
self.role.ensure_user_groups.assert_called_once_with('foo-user', ['foo-group', 'bar-group'])
@istest
def check_set_user_password_when_password_is_encrypted(self):
encrypted_password = "$6$SqAoXRvk$spgLlL/WL/vcb16ZZ4cMdF5uN90IjH0PpYKdMhqyW.BxXJEVc5RyvnpWcT.OKKJO2vsp32.CWDEd45K6r05bL0"
with self.mock_role_methods("create_remote_temp_file", 'put_file', 'execute', "remove_file"):
self.role.create_remote_temp_file.return_value = "/tmp/random"
self.role.set_user_password("foo", encrypted_password, encrypted=True)
self.role.put_file.assert_called_once_with(
ANY,
"/tmp/random",
sudo=True,
stdout=False
)
self.assertIn(
call('cat "/tmp/random" | chpasswd -e', sudo=True, stdout=False),
self.role.execute.mock_calls
)
@istest
def check_set_user_password_when_password_is_not_encrypted(self):
with self.mock_role_methods("create_remote_temp_file", 'put_file', 'execute', "remove_file"):
self.role.create_remote_temp_file.return_value = "/tmp/random"
self.role.set_user_password("foo", "foo-pass")
self.role.put_file.assert_called_once_with(
ANY,
"/tmp/random",
sudo=True,
stdout=False
)
self.assertIn(
call('cat "/tmp/random" | chpasswd ', sudo=True, stdout=False),
self.role.execute.mock_calls
)
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import date, datetime, time, timedelta
import pytest
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.errors import IndicoError
from indico.modules.rb import rb_settings
from indico.modules.rb.models.blocked_rooms import BlockedRoom
from indico.modules.rb.models.photos import Photo
from indico.modules.rb.models.reservations import RepeatFrequency, ReservationState
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.testing.util import bool_matrix
from indico.util.date_time import get_day_end, get_day_start
pytest_plugins = 'indico.modules.rb.testing.fixtures'
_notset = object()
@pytest.mark.parametrize('need_confirmation', (True, False))
def test_is_auto_confirm(create_room, need_confirmation):
room = create_room(reservations_need_confirmation=need_confirmation)
assert room.is_auto_confirm != need_confirmation
assert Room.query.filter_by(is_auto_confirm=need_confirmation).first() is None
assert Room.query.filter_by(is_auto_confirm=not need_confirmation).first() == room
def test_has_photo(db, dummy_room):
assert not dummy_room.has_photo
dummy_room.photo = Photo()
db.session.flush()
assert dummy_room.has_photo
@pytest.mark.parametrize(('building', 'floor', 'number', 'verbose_name', 'expected_name'), (
('1', '2', '3', None, '1/2-3'),
('1', '2', 'X', None, '1/2-X'),
('1', 'X', '3', None, '1/X-3'),
('X', '2', '3', None, 'X/2-3'),
('1', '2', '3', 'Test', '1/2-3 - Test'),
('1', '2', '3', 'm\xf6p', '1/2-3 - m\xf6p')
))
def test_full_name(create_room, building, floor, number, verbose_name, expected_name):
room = create_room(building=building, floor=floor, number=number, verbose_name=verbose_name)
assert room.full_name == expected_name
@pytest.mark.parametrize(('name',), (
(None,),
('1/2-3',),
('Test',)
))
def test_name_stays_same(create_room, name):
room = create_room(verbose_name=name)
assert room.name == '1/2-3'
@pytest.mark.parametrize(('protection_mode', 'expected'), (
(ProtectionMode.protected, False),
(ProtectionMode.public, True),
))
def test_is_public(dummy_room, protection_mode, expected):
dummy_room.protection_mode = protection_mode
assert dummy_room.is_public == expected
def test_location_name(dummy_room, dummy_location):
assert dummy_room.location_name == dummy_location.name
def test_owner(dummy_room, dummy_user):
assert dummy_room.owner == dummy_user
def test_owner_after_change(dummy_room, dummy_user):
dummy_room.owner = dummy_user
assert dummy_room.owner == dummy_user
@pytest.mark.parametrize(('name', 'expected'), (
('foo', True),
('bar', True),
('xxx', False), # existent
('yyy', False), # not existent
))
def test_has_equipment(create_equipment_type, dummy_room, name, expected):
dummy_room.available_equipment.append(create_equipment_type('foo'))
dummy_room.available_equipment.append(create_equipment_type('bar'))
create_equipment_type('xxx')
assert dummy_room.has_equipment(name) == expected
def test_get_attribute_by_name(create_room_attribute, dummy_room):
attr = create_room_attribute('foo')
assert dummy_room.get_attribute_by_name('foo') is None
dummy_room.set_attribute_value('foo', 'bar')
assert dummy_room.get_attribute_by_name('foo').attribute == attr
def test_has_attribute(create_room_attribute, dummy_room):
create_room_attribute('foo')
assert not dummy_room.has_attribute('foo')
dummy_room.set_attribute_value('foo', 'bar')
assert dummy_room.has_attribute('foo')
@pytest.mark.parametrize(('value', 'expected'), (
('', _notset),
(None, _notset),
(0, _notset),
([], _notset),
('foo', 'foo'),
(123, 123),
(True, True),
(['a', 'b'], ['a', 'b']),
))
def test_get_attribute_value(create_room_attribute, dummy_room, value, expected):
assert dummy_room.get_attribute_value('foo', _notset) is _notset
create_room_attribute('foo')
assert dummy_room.get_attribute_value('foo', _notset) is _notset
dummy_room.set_attribute_value('foo', value)
assert dummy_room.get_attribute_value('foo', _notset) == expected
def test_set_attribute_value(create_room_attribute, dummy_room):
# setting an attribute that doesn't exist fails
with pytest.raises(ValueError):
dummy_room.set_attribute_value('foo', 'something')
create_room_attribute('foo')
# the value can be cleared even if it is not set
dummy_room.set_attribute_value('foo', None)
assert dummy_room.get_attribute_value('foo', _notset) is _notset
# set it to some value
dummy_room.set_attribute_value('foo', 'test')
assert dummy_room.get_attribute_value('foo') == 'test'
# set to some other value while we have an existing association entry
dummy_room.set_attribute_value('foo', 'something')
assert dummy_room.get_attribute_value('foo') == 'something'
# clear it
dummy_room.set_attribute_value('foo', None)
assert dummy_room.get_attribute_value('foo', _notset) is _notset
def test_find_with_attribute(dummy_room, create_room, create_room_attribute):
assert Room.query.all() == [dummy_room] # one room without the attribute
assert not Room.find_with_attribute('foo')
create_room_attribute('foo')
assert not Room.find_with_attribute('foo')
expected = set()
for room in [create_room(), create_room()]:
value = f'bar-{room.id}'
room.set_attribute_value('foo', value)
expected.add((room, value))
assert set(Room.find_with_attribute('foo')) == expected
def test_get_with_data_errors():
with pytest.raises(ValueError):
Room.get_with_data(foo='bar')
@pytest.mark.parametrize('only_active', (True, False))
def test_get_with_data(db, create_room, create_equipment_type, only_active):
eq = create_equipment_type('eq')
rooms = {
'inactive': {'room': create_room(is_deleted=True), 'equipment': []},
'no_eq': {'room': create_room(), 'equipment': []},
'all_eq': {'room': create_room(), 'equipment': [eq]}
}
room_types = {room_data['room']: type_ for type_, room_data in rooms.items()}
for room in rooms.values():
room['room'].available_equipment = room['equipment']
db.session.flush()
results = list(Room.get_with_data(only_active=only_active))
assert len(results) == len(rooms) - only_active
for row in results:
room = row.pop('room')
room_type = room_types[room]
if room_type == 'inactive':
assert not only_active
@pytest.mark.parametrize(
('has_booking', 'has_blocking',
'has_pre_booking', 'include_pre_bookings',
'has_pending_blocking', 'include_pending_blockings',
'filtered'),
set(bool_matrix('00.0.0', expect=False) + # nothing confirmed/pending
bool_matrix('000.0.', expect=False) + # nothing pending included
bool_matrix('1.....', expect=True) + # confirmed booking
bool_matrix('.1....', expect=True) + # active blocking
bool_matrix('00....', expect=lambda x: all(x[2:4]) or all(x[4:6]))) # included pending booking/blocking
)
def test_filter_available(dummy_room, create_reservation, create_blocking,
has_booking, has_blocking,
has_pre_booking, include_pre_bookings,
has_pending_blocking, include_pending_blockings, filtered):
if has_booking:
create_reservation(start_dt=datetime.combine(date.today(), time(8)),
end_dt=datetime.combine(date.today(), time(10)))
if has_pre_booking:
create_reservation(start_dt=datetime.combine(date.today(), time(10)),
end_dt=datetime.combine(date.today(), time(12)),
state=ReservationState.pending)
if has_blocking:
create_blocking(state=BlockedRoom.State.accepted)
if has_pending_blocking:
create_blocking(state=BlockedRoom.State.pending)
availabilty_filter = Room.filter_available(get_day_start(date.today()), get_day_end(date.today()),
(RepeatFrequency.NEVER, 0), include_blockings=True,
include_pre_bookings=include_pre_bookings,
include_pending_blockings=include_pending_blockings)
assert set(Room.query.filter(availabilty_filter)) == (set() if filtered else {dummy_room})
@pytest.mark.parametrize(('is_admin', 'is_owner', 'max_advance_days', 'days_delta', 'success'), (
(True, False, 10, 15, True),
(False, True, 10, 15, True),
(False, False, None, 15, True),
(False, False, 0, 15, True),
(False, False, 10, -5, True),
(False, False, 10, 10, False),
(False, False, 10, 15, False)
))
def test_check_advance_days(create_user, dummy_room, is_admin, is_owner, max_advance_days, days_delta, success):
user = create_user(123, rb_admin=is_admin)
dummy_room.max_advance_days = max_advance_days
end_date = date.today() + timedelta(days=days_delta)
if is_owner:
dummy_room.update_principal(user, full_access=True)
if success:
assert dummy_room.check_advance_days(end_date, user, quiet=True)
assert dummy_room.check_advance_days(end_date, user)
else:
assert not dummy_room.check_advance_days(end_date, user, quiet=True)
with pytest.raises(IndicoError):
dummy_room.check_advance_days(end_date, user)
def test_check_advance_days_no_user(dummy_room):
dummy_room.max_advance_days = 10
end_date = date.today() + timedelta(days=15)
assert not dummy_room.check_advance_days(end_date, quiet=True)
@pytest.mark.parametrize(('is_admin', 'is_owner', 'fits', 'success'), bool_matrix('...', expect=any))
def test_check_bookable_hours(db, dummy_room, create_user, is_admin, is_owner, fits, success):
user = create_user(123, rb_admin=is_admin)
if is_owner:
dummy_room.update_principal(user, full_access=True)
dummy_room.bookable_hours = [BookableHours(start_time=time(12), end_time=time(14))]
db.session.flush()
booking_hours = (time(12), time(13)) if fits else (time(8), time(9))
if success:
assert dummy_room.check_bookable_hours(booking_hours[0], booking_hours[1], user, quiet=True)
assert dummy_room.check_bookable_hours(booking_hours[0], booking_hours[1], user)
else:
assert not dummy_room.check_bookable_hours(booking_hours[0], booking_hours[1], user, quiet=True)
with pytest.raises(IndicoError):
dummy_room.check_bookable_hours(booking_hours[0], booking_hours[1], user)
def test_check_bookable_hours_hours(dummy_room):
assert dummy_room.check_bookable_hours(time(8), time(9), quiet=True)
def test_check_bookable_hours_no_user(db, dummy_room):
dummy_room.bookable_hours = [BookableHours(start_time=time(12), end_time=time(14))]
db.session.flush()
assert not dummy_room.check_bookable_hours(time(8), time(9), quiet=True)
@pytest.mark.parametrize('reservations_need_confirmation', (True, False))
@pytest.mark.parametrize('is_owner', (True, False))
@pytest.mark.parametrize('is_reservable', (True, False))
def test_permissions_manager_owner(dummy_room, create_user, reservations_need_confirmation, is_owner, is_reservable):
user = create_user(123)
dummy_room.protection_mode = ProtectionMode.public
dummy_room.reservations_need_confirmation = reservations_need_confirmation
dummy_room.is_reservable = is_reservable
if is_owner:
dummy_room.owner = user
else:
dummy_room.update_principal(user, full_access=True)
assert dummy_room.can_book(user) == is_reservable
assert dummy_room.can_prebook(user) == (reservations_need_confirmation and is_reservable)
assert dummy_room.can_override(user)
assert dummy_room.can_moderate(user)
def test_permissions_manager_explicit_prebook(dummy_room, create_user):
user = create_user(123)
dummy_room.protection_mode = ProtectionMode.public
dummy_room.update_principal(user, full_access=True, permissions={'prebook'})
assert dummy_room.can_prebook(user)
@pytest.mark.parametrize('reservations_need_confirmation', (True, False))
def test_permissions_public_room(dummy_room, create_user, reservations_need_confirmation):
user = create_user(123)
dummy_room.protection_mode = ProtectionMode.public
dummy_room.reservations_need_confirmation = reservations_need_confirmation
assert dummy_room.can_book(user) == (not reservations_need_confirmation)
assert dummy_room.can_prebook(user) == reservations_need_confirmation
assert not dummy_room.can_override(user)
assert not dummy_room.can_moderate(user)
def test_permissions_protected_room(dummy_room, create_user):
user = create_user(123)
dummy_room.protection_mode = ProtectionMode.protected
assert not dummy_room.can_book(user)
assert not dummy_room.can_prebook(user)
assert not dummy_room.can_override(user)
assert not dummy_room.can_moderate(user)
@pytest.mark.parametrize('reservations_need_confirmation', (True, False))
def test_permissions_protected_room_admin(dummy_room, create_user, reservations_need_confirmation):
user = create_user(123)
rb_settings.acls.add_principal('admin_principals', user)
dummy_room.protection_mode = ProtectionMode.protected
dummy_room.reservations_need_confirmation = reservations_need_confirmation
assert dummy_room.can_book(user)
assert dummy_room.can_prebook(user) == reservations_need_confirmation
assert dummy_room.can_override(user)
assert dummy_room.can_moderate(user)
@pytest.mark.parametrize('permission', ('book', 'prebook', 'override', 'moderate'))
def test_permissions_protected_room_acl(dummy_room, create_user, permission):
user = create_user(123)
dummy_room.protection_mode = ProtectionMode.protected
dummy_room.update_principal(user, permissions={permission})
for p in ('book', 'prebook', 'override', 'moderate'):
granted = p == permission
assert getattr(dummy_room, 'can_' + p)(user) == granted
def test_permissions_no_user(dummy_room):
assert not dummy_room.can_book(None)
assert not dummy_room.can_prebook(None)
assert not dummy_room.can_override(None)
assert not dummy_room.can_moderate(None)
assert not dummy_room.can_edit(None)
assert not dummy_room.can_delete(None)
@pytest.mark.parametrize('is_admin', (True, False))
def test_admin_permissions(dummy_room, create_user, is_admin):
user = create_user(123)
if is_admin:
rb_settings.acls.add_principal('admin_principals', user)
assert dummy_room.can_edit(user) == is_admin
assert dummy_room.can_delete(user) == is_admin
@pytest.mark.parametrize('acl_perm', (None, 'book', 'prebook', 'override', 'moderate', '*'))
@pytest.mark.parametrize('protection_mode', (ProtectionMode.public, ProtectionMode.protected))
@pytest.mark.parametrize('reservations_need_confirmation', (True, False))
@pytest.mark.parametrize('is_reservable', (True, False))
@pytest.mark.parametrize('is_owner', (True, False))
@pytest.mark.parametrize('is_admin', (True, False))
@pytest.mark.parametrize('allow_admin', (True, False))
@pytest.mark.parametrize('bulk_possible', (True, False))
def test_get_permissions_for_user(dummy_room, create_user, monkeypatch, bulk_possible, allow_admin, is_admin, is_owner,
is_reservable, reservations_need_confirmation, protection_mode, acl_perm):
monkeypatch.setattr(User, 'can_get_all_multipass_groups', bulk_possible)
user = create_user(123)
if is_owner:
dummy_room.owner = user
if is_admin:
rb_settings.acls.add_principal('admin_principals', user)
dummy_room.protection_mode = protection_mode
dummy_room.is_reservable = is_reservable
dummy_room.reservations_need_confirmation = reservations_need_confirmation
if acl_perm == '*':
dummy_room.update_principal(user, full_access=True)
elif acl_perm:
dummy_room.update_principal(user, permissions={acl_perm})
perms = Room.get_permissions_for_user(user, allow_admin=allow_admin)
assert perms[dummy_room.id] == {
'book': dummy_room.can_book(user, allow_admin=allow_admin),
'prebook': dummy_room.can_prebook(user, allow_admin=allow_admin),
'override': dummy_room.can_override(user, allow_admin=allow_admin),
'moderate': dummy_room.can_moderate(user, allow_admin=allow_admin),
'manage': dummy_room.can_manage(user, allow_admin=allow_admin),
}
|
|
#!/usr/bin/env python
'''
This module implements a simple and naive Gramps XML file (.gramps) parser.
Author: Chris Laws
'''
from __future__ import unicode_literals
from future.builtins import str
import datetime
import dateutil.parser
import gzip
import logging
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
logger = logging.getLogger(__name__)
indent = " "
def default_date_parser(datestring):
''' Convert a date string into a datetime object '''
# some dates are missing the day so use a default such that
# a valid datetime object can be created.
if len(datestring.split("-")) == 2:
logger.debug(
"{0} missing item from date string, using day 01 for"
" compatibility".format(datestring))
datestring = "{0}-01".format(datestring)
# Dates are used in many different formats, use the
# dateutil parser in an effort to successfully
# parse a useful date.
return dateutil.parser.parse(datestring)
class DateParser(object):
def __init__(self):
self.handlers = {}
# register a default handler to use as a fallback.
self.register('default', default_date_parser)
def register(self, cal_format, handler):
'''
Register a handler function for a specific date_type. For example,
if your dates are in `French Republican` format use can use this
method to register a handler function that will convert one of
these dates into a valid datetime.
:param cal_format: a string identifying the calendar format type.
For example, `French Republican`.
:param handler: a callable that can convert a date string into a
valid datetime object.
'''
logger.debug(
'Registering a date handler for format: %s', cal_format)
if cal_format in self.handlers:
raise Exception(
'Duplicate date handlers detected for: %s', cal_format)
self.handlers[cal_format] = handler
def parse(self, datestring, cal_format=None):
''' Parse a date string and return a datetime object.
:param format: the format of the date string. For example, Islamic,
French Republican, etc.
'''
cformat = cal_format or 'default'
if cformat not in self.handlers:
logger.warning(
'No date parser registered for %s, falling back to default',
cformat)
cformat = 'default'
handler = self.handlers.get(cformat)
return handler(datestring)
date_processor = DateParser()
def generate_timestring(dt):
'''
Required because datetime.strftime barfs on years prior to 1900
'''
format = "%Y-%m-%d"
if dt.year > 1900:
return dt.strftime(format)
else:
format = format.replace('%Y', str(dt.year))
dt = datetime.datetime(1900, dt.month, dt.day, dt.hour,
dt.minute, dt.second)
return dt.strftime(format)
class Place(object):
'''
A Gramps place object.
Example of a Gramps place structure:
<places>
<placeobj handle="_bcd2a83849845c12c13" change="1297580946" id="P0806">
<ptitle>Morwell, Victoria, Australia</ptitle>
<coord long="146.3947107" lat="-38.2345742"/>
</placeobj>
'''
def __init__(self, store):
self.store = store
self.handle = None
self.id = None
self.type = None
self.title = None
self.lat = None
self.lon = None
@property
def coordinates(self):
'''
Return a tuple of lat, lon for the location
'''
if self.lat and self.lon:
return (self.lat, self.lon)
return None
def __str__(self):
o = []
o.append("Place")
title = ""
if self.title:
title = self.title
lat_lon = ""
if self.lat and self.lon:
lat_lon = " (lat={0}, lon={1})".format(self.lat, self.lon)
o.append("{0}{1}{2}".format(indent, title, lat_lon))
return "\n".join(o)
class Event(object):
'''
A Gramps event object.
Example of a Gramps event structure:
<event handle="_bb2a73da89376f2e069" change="1287656448" id="E1000">
<type>Death</type>
<dateval val="1955-06-04"/>
<place hlink="_bb2a73da908569b4132"/>
<noteref hlink="_bb2a73da9362223d031"/>
<sourceref hlink="_bb60df55dd862a3e6b1" conf="4">
<spage>1955/012559</spage>
<noteref hlink="_bb60eb134ff61992598"/>
<dateval val="1955-06-04"/>
</sourceref>
</event>
'''
def __init__(self, store):
self.store = store
self.handle = None
self.id = None
self.type = None
self.description = None
self.date = None
self.date_type = None
self.date_cformat = None
# handles
self.place_handle = None
self.note_handles = []
self.source_handles = []
@property
def datetime(self):
'''
Return a datetime object for this event date
'''
if self.date:
try:
return date_processor.parse(
self.date, cal_format=self.date_cformat)
except Exception:
logger.exception(
"Problem parsing date: {0}, cal_format={1}".format(
self.date, self.cformat))
raise
else:
return None
def datetime_as_string(self):
return generate_timestring(self.datetime)
@property
def place(self):
if self.place_handle:
return self.store.get_place(self.place_handle)
return None
def __str__(self):
o = []
o.append("Event")
dateStr = "unknown"
if self.date:
if self.date_type:
dateStr = "{0} {1}".format(self.date_type, self.date)
else:
dateStr = self.date
o.append("{0}{1}, {2}".format(indent, self.type, dateStr))
placeStr = "unknown"
if self.place:
thePlace = self.store.get_place(self.place)
if thePlace:
p = []
for line in str(thePlace).split("\n"):
p.append("{0}{1}".format(indent, line))
placeStr = "\n".join(p)
o.append(placeStr)
else:
o.append("{0}Place".format(indent * 2))
o.append("{0}None".format(indent * 3))
if self.description:
o.append("{0}description={1}".format(indent, self.description))
return "\n".join(o)
class Person(object):
'''
A person object
'''
def __init__(self, store):
self.store = store
self.handle = None
self.id = None
self.gender = None
self.firstnames = []
self.prefix = None
self.surname = None
self._birth = None
self._death = None
# handles
self.event_handles = []
self.child_of_handle = None
self.parent_in_handles = []
self.notes = []
self._events = None
@property
def name(self):
'''
Return a string containing the full name of this person
i.e. firstname middlenames surname
'''
if len(self.firstnames) > 1:
firstnames = " ".join(self.firstnames)
else:
firstnames = "".join(self.firstnames)
return "{0} {1}".format(firstnames, self.surname)
@property
def name_with_dates(self):
'''
Return a string containing this persons name and their
birth and death dates.
i.e firstname surname (b. date, d. date)
'''
if self.death is None:
return "{0} (b. {1})".format(self.name, self.birth)
else:
return "{0} (b. {1}, d. {2})".format(self.name,
self.birth,
self.death)
@property
def birth(self):
'''
Return a birth date string for this person (if available).
Include any prefixes such as bef, aft, abt, etc.
'''
if self._birth is None:
# search through events
for event in self.events:
if event.type == 'Birth':
if event.date:
if event.date_type:
self._birth = "{0} {1}".format(event.date_type,
event.date)
else:
self._birth = event.date
else:
self._birth = "unknown"
return self._birth
@property
def birth_datetime(self):
'''
Return a birth date string for this person (if available).
Include any prefixes such as bef, aft, abt, etc.
'''
# search through events
for event in self.events:
if event.type == 'Birth':
return event.datetime
return None
@property
def death(self):
'''
Return a death date string for this person (if available).
Include any prefixes such as bef, aft, abt, etc.
'''
if self._death is None:
# search through events
for event in self.events:
if event.type == 'Death':
if event.date:
if event.date_type:
self._death = "{0} {1}".format(event.date_type,
event.date)
else:
self._death = event.date
else:
self._death = "unknown"
return self._death
@property
def death_datetime(self):
'''
Return a death date string for this person (if available).
Include any prefixes such as bef, aft, abt, etc.
'''
# search through events
for event in self.events:
if event.type == 'Death':
return event.datetime
return None
@property
def events(self):
if self._events is None:
self._events = []
if self.event_handles:
for event_handle in self.event_handles:
event = self.store.get_event(event_handle)
self._events.append(event)
return self._events
def associated_events(self, includeEventsWithNoDate=False):
'''
Return a time ordered list of tuples for each event that this person
was involved with. This set includes direct event involvement
(eg. birth) and indirect involvement (eg. birth of younger sibling).
Each item in the list is a tuple containing a Person or Family object
and an Event object.
'''
dated_events = []
undated_events = []
SiblingCutoffDatetime = None
directPersonEvent = True
for event in self.events:
if event.datetime:
if event.type in ['Immigration', 'Emmigration']:
# This flag is used later to ensure we don't associate
# siblings with this person's events after an immigration
# event as it is assumed that the person would not be
# involved/around these events.
SiblingCutoffDatetime = event.datetime
dated_events.append((self, event, directPersonEvent))
else:
if includeEventsWithNoDate:
undated_events.append((self, event, directPersonEvent))
else:
logger.debug(
"Discarding direct person event {0} for {1} as it "
"has no date".format(event.type, self.name))
pass
# now retrieve associated events that this person was involved with
directPersonEvent = False
if self.parent_in_handles:
logger.debug(
"{0} is a parent in {1} families".format(
self.name, len(self.parent_in_handles)))
for parent_handle in self.parent_in_handles:
family = self.store.get_family(parent_handle)
# Add any family events such as marriage, divorce
logger.debug(
"Family {0} has {1} family events".format(
family.name, len(family.events)))
for event in family.events:
if event.datetime:
dated_events.append(
(family, event, directPersonEvent))
else:
if includeEventsWithNoDate:
undated_events.append(
(family, event, directPersonEvent))
else:
logger.debug(
"Discarding associated family event {0} for "
"{1} as it has no date".format(
event.type, family.name))
pass
logger.debug(
"Family {0} has {1} children".format(
family.name, len(family.children)))
# add birth of children
if family.children:
for child in family.children:
for event in child.events:
if event.type == 'Birth':
if event.datetime:
dated_events.append(
(child, event, directPersonEvent))
else:
if includeEventsWithNoDate:
undated_events.append(
(child, event, directPersonEvent))
else:
logger.debug(
"Discarding associated family "
"event {0} for {1} as it has no "
"date".format(
event.type, child.name))
pass
if self.child_of_handle:
# potentially associate younger sibling location events too
# as this person was likely around those locations too.
family = self.store.get_family(self.child_of_handle)
logger.debug(
"Family {0} had {1} children".format(
family.name, len(family.children)))
for sibling in family.children:
if sibling.handle != self.handle:
for event in sibling.events:
if event.type == 'Birth':
if event.datetime:
if event.datetime > self.birth_datetime:
# don't associate sibling birth events if they
# occur after the person has immigrated/emmigrated.
if SiblingCutoffDatetime is None:
dated_events.append(
(sibling, event, directPersonEvent))
else:
if event.datetime < SiblingCutoffDatetime:
dated_events.append(
(sibling, event, directPersonEvent))
else:
if includeEventsWithNoDate:
undated_events.append(
(sibling, event, directPersonEvent))
else:
logger.debug(
"Discarding associated family event "
"{0} for {1} as it has no date" % (
event.type, sibling.name))
pass
# sort events in time order. This can only be done after
# making sure that we only have events with dates.
def get_datetime(dated_event_tuple):
person_or_family_object, event, directEvent = dated_event_tuple
return event.datetime
dated_events.sort(key=get_datetime)
events = dated_events
# tack undated events onto end of time ordered list if requested
if includeEventsWithNoDate:
events.extend(undated_events)
return events
def ancestors(self, ancestors=None):
"""
Return an unordered list of this person's handle and those of their
ancestors.
"""
logger.debug("Collecting ancestors for {0}".format(self.name))
if ancestors is None:
ancestors = []
ancestors.append(self.handle)
if self.child_of_handle:
family = self.store.get_family(self.child_of_handle)
# walk up the father's tree
if family.father:
family.father.ancestors(ancestors)
# walk up the mother's tree
if family.mother:
family.mother.ancestors(ancestors)
return ancestors
def descendents(self):
'''
Return an unordered list of this person's handle and those of their
descendents.
'''
raise NotImplementedError
def __str__(self):
o = []
o.append("Person")
o.append("{0}{1}".format(indent, self.name_with_dates))
if self.child_of_handle:
theFamily = self.store.get_family(self.child_of_handle)
o.append("{0}Child of {1}".format(indent, theFamily.name))
else:
o.append("{0}Child of unknown".format(indent))
if self.parent_in_handles:
for p in self.parent_in_handles:
theFamily = self.store.get_family(p)
o.append("{0}Parent in {1}".format(indent, theFamily.name))
if self.events:
o.append("{0}Events:".format(indent))
indent2 = indent * 2
lines = []
for event in self.events:
for line in str(event).split("\n"):
lines.append("{0}{1}".format(indent2, line))
eventStr = "\n".join(lines)
o.append(eventStr)
return "\n".join(o)
class Family(object):
'''
A Gramps family object
Example of a Gramps family structure:
<family handle="_bbd9a6fc3005c442174" change="1296473477" id="F0414">
<rel type="Unknown"/>
<father hlink="_bbd9a89f2d86cb5d966"/>
<mother hlink="_bbd9aa0bf5828e2063d"/>
<eventref hlink="_bbd9aac4f234de2e484" role="Family"/>
<childref hlink="_bbd99985f4654c844c2"/>
<childref hlink="_bbd9b4d182d06ba9642"/>
<childref hlink="_bbd9b59cb0709454032"/>
<childref hlink="_bbd9b32db1501cb7968"/>
<childref hlink="_bbd9fd3f1404b1ac595"/>
</family>
'''
def __init__(self, store):
self.store = store
self.handle = None
self.id = None
self.father_handle = None
self.mother_handle = None
self.relationship = None
self.event_handles = []
self.children_handles = []
self.step_children_handles = []
self.source_handles = []
self._mother = None
self._father = None
self._children = None
self._events = None
@property
def name(self):
'''
Return a string containing the father and mother name for this family
'''
if self.mother:
m = self.mother.name
else:
m = "unknown"
if self.father:
f = self.father.name
else:
f = "unknown"
family_name = "{0} & {1}".format(f, m)
return family_name
@property
def name_with_dates(self):
'''
Return a string containing the father and mother name of this family
which include the birth and death dates.
'''
if self.mother:
m = self.mother.name_with_dates
else:
m = "unknown"
if self.father:
f = self.father.name_with_dates
else:
f = "unknown"
family_name = "{0} & {1}".format(f, m)
return family_name
@property
def mother(self):
if self._mother is None:
# search for mother person
if self.mother_handle:
self._mother = self.store.get_person(self.mother_handle)
return self._mother
@property
def father(self):
if self._father is None:
# search for father person
if self.father_handle:
self._father = self.store.get_person(self.father_handle)
return self._father
@property
def children(self):
if self._children is None:
self._children = []
if self.children_handles:
# search for children persons
for child_handle in self.children_handles:
child = self.store.get_person(child_handle)
if child:
self._children.append(child)
return self._children
@property
def events(self):
if self._events is None:
self._events = []
if self.event_handles:
for event_handle in self.event_handles:
event = self.store.get_event(event_handle)
self._events.append(event)
return self._events
def __str__(self):
o = []
o.append("Family")
o.append("{0}{1}".format(indent, self.name_with_dates))
o.append("{0}relationship={1}".format(indent, self.relationship))
# TODO: display eventref here
if self.children:
o.append("{0}Children:".format(indent))
indent2 = indent * 2
for child in self.children:
indented_child_lines = []
for line in str(child).split("\n"):
indented_child_lines.append("{0}{1}".format(indent2, line))
childStr = "\n".join(indented_child_lines)
o.append(childStr)
else:
o.append("{0}Children: None".format(indent))
return "\n".join(o)
class Store(object):
'''
Stores information extracted by the Gramps database parser
'''
def __init__(self):
self.persons = {}
self.families = {}
self.events = {}
self.places = {}
self.notes = {}
self.sources = {}
def get_person(self, handle):
'''
Return the person with the specified handle
'''
return self.persons.get(handle, None)
def get_family(self, handle):
'''
Return the family with the specified handle
'''
return self.families.get(handle, None)
def get_event(self, handle):
'''
Return the event with the specified handle
'''
return self.events.get(handle, None)
def get_place(self, handle):
'''
Return the place with the specified handle
'''
return self.places.get(handle, None)
def get_source(self, handle):
'''
Return the source with the specified handle
'''
return self.sources.get(handle, None)
def get_note(self, handle):
'''
Return the note with the specified handle
'''
return self.notes.get(handle, None)
def find_person(self, search_name):
'''
Return the handle for the first person found with a
matching name.
Return None if no match is found.
'''
logger.debug("Searching for {0}".format(search_name))
search_person_handle = None
for person_handle in self.persons:
person = self.get_person(person_handle)
if person.name == search_name:
search_person_handle = person.handle
logger.debug("Found {0} with handle {1}".format(search_name,
person.handle))
break
return search_person_handle
class NS:
'''
Namespace helper to append the gramps namespace onto tags.
This makes writing the search paths easier.
'''
def __init__(self, uri):
self.uri = uri
def __getattr__(self, tag):
return self.uri + tag
def __call__(self, path):
prefix = None
if path.startswith(".//"):
items = path[3:].split("/")
prefix = './/'
else:
items = path.split("/")
ns_tags = []
for tag in items:
ns_tag = getattr(self, tag)
ns_tags.append(ns_tag)
ns_path = "/".join(ns_tags)
if prefix:
ns_path = './/' + ns_path
return ns_path
def to_pretty_xml(elem):
"""
Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
rough_string = etree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
class Parser(object):
def parse(self, gramps_file):
"""
@return: a store object populated with content extracted from the database.
"""
logger.info("Loading Gramps database from {0}".format(gramps_file))
store = Store()
with gzip.GzipFile(filename=gramps_file, mode="rb", compresslevel=9) as fd:
data = fd.read()
root = etree.fromstring(data)
# Detect the namespace so we know what to place in front
# of the known tag names.
detected_namespace = ""
items = root.tag.split("}")
if len(items) == 2:
namespace_candidate, tag = items
if "{" in namespace_candidate:
# There is a namespace prefix
detected_namespace = '{%s}' % namespace_candidate[1:]
GrampsNS = NS(detected_namespace)
# Extract person entries into Person objects and store them
# in the persons dict keyed by the person's handle.
#
personNodes = root.findall(GrampsNS('.//people/person'))
for personNode in personNodes:
p = Person(store)
p.id = personNode.attrib.get('id')
genderNode = personNode.find(GrampsNS('gender'))
p.gender = genderNode.text
handle = personNode.attrib.get('handle')
p.handle = handle
store.persons[handle] = p
nameNode = personNode.find(GrampsNS('name'))
if nameNode:
firstnameNode = nameNode.find(GrampsNS('first'))
if firstnameNode is not None:
p.firstnames = firstnameNode.text.split(" ")
else:
pass # No first name node found
surnameNode = nameNode.find(GrampsNS('surname'))
if surnameNode is not None:
p.surname = surnameNode.text
p.prefix = surnameNode.attrib.get('prefix')
else:
pass # No surname node found
else:
pass # No name node found
for eventNode in personNode.findall(GrampsNS('eventref')):
event_handle = eventNode.attrib.get('hlink')
p.event_handles.append(event_handle)
for parentinNode in personNode.findall(GrampsNS('parentin')):
parentin_handle = parentinNode.attrib.get('hlink')
p.parent_in_handles.append(parentin_handle)
childofNode = personNode.find(GrampsNS('childof'))
if childofNode is not None:
p.child_of_handle = childofNode.attrib.get('hlink')
for noteNode in personNode.findall(GrampsNS('noteref')):
note_handle = noteNode.attrib.get('hlink')
p.notes.append(note_handle)
familyNodes = root.findall(GrampsNS('.//families/family'))
for familyNode in familyNodes:
f = Family(store)
f.id = familyNode.attrib.get('id')
motherNode = familyNode.find(GrampsNS('mother'))
if motherNode is not None:
f.mother_handle = motherNode.attrib.get('hlink')
fatherNode = familyNode.find(GrampsNS('father'))
if fatherNode is not None:
f.father_handle = fatherNode.attrib.get('hlink')
relationshipNode = familyNode.find(GrampsNS('rel'))
if relationshipNode is not None:
f.relationship = relationshipNode.attrib.get('type')
for eventNode in familyNode.findall(GrampsNS('eventref')):
f.event_handles.append(eventNode.attrib.get('hlink'))
handle = familyNode.attrib.get('handle')
f.handle = handle
store.families[handle] = f
for childNode in familyNode.findall(GrampsNS('childref')):
child_handle = childNode.attrib.get('hlink')
if childNode.attrib.get('frel') == 'Stepchild':
f.step_children_handles.append(child_handle)
else:
f.children_handles.append(child_handle)
for sourceNode in familyNode.findall(GrampsNS('sourceref')):
source_handle = sourceNode.attrib.get('hlink')
f.source_handles.append(source_handle)
eventNodes = root.findall(GrampsNS('.//events/event'))
for eventNode in eventNodes:
e = Event(store)
e.id = personNode.attrib.get('id')
handle = eventNode.attrib.get('handle')
e.handle = handle
store.events[handle] = e
typeNode = eventNode.find(GrampsNS('type'))
if typeNode is not None:
e.type = typeNode.text
datevalNode = eventNode.find(GrampsNS('dateval'))
if datevalNode is not None:
e.date = datevalNode.attrib.get('val')
e.date_type = datevalNode.attrib.get('type')
e.cformat = datevalNode.attrib.get('cformat')
descriptionNode = eventNode.find(GrampsNS('description'))
if descriptionNode is not None:
e.description = descriptionNode.text
placeNode = eventNode.find(GrampsNS('place'))
if placeNode is not None:
e.place_handle = placeNode.attrib.get('hlink')
for noteNode in eventNode.findall(GrampsNS('noteref')):
note_handle = noteNode.attrib.get('hlink')
e.note_handles.append(note_handle)
for sourceNode in eventNode.findall(GrampsNS('sourceref')):
source_handle = sourceNode.attrib.get('hlink')
e.source_handles.append(source_handle)
placeNodes = root.findall(GrampsNS('.//places/placeobj'))
for placeNode in placeNodes:
p = Place(store)
p.id = placeNode.attrib.get('id')
handle = placeNode.attrib.get('handle')
p.handle = handle
store.places[handle] = p
titleNode = placeNode.find(GrampsNS('ptitle'))
if titleNode is not None:
p.title = titleNode.text
coordNode = placeNode.find(GrampsNS('coord'))
if coordNode is not None:
p.lat = coordNode.attrib.get('lat')
p.lon = coordNode.attrib.get('long')
# TODO:
# extract sources
# extract notes
# etc
return store
parser = Parser()
|
|
import struct
import hashlib
import subprocess
import re
from binascii import hexlify, unhexlify
from seth.args import args, hexdump
from seth.consts import TERM_PRIV_KEY
class RC4(object):
def __init__(self, key):
x = 0
self.sbox = list(range(256))
for i in range(256):
x = (x + self.sbox[i] + key[i % len(key)]) % 256
self.sbox[i], self.sbox[x] = self.sbox[x], self.sbox[i]
self.i = self.j = 0
self.encrypted_packets = 0
def decrypt(self, data):
if self.encrypted_packets >= 4096:
self.update_key()
out = []
for char in data:
self.i = (self.i + 1) % 256
self.j = (self.j + self.sbox[self.i]) % 256
self.sbox[self.i], self.sbox[self.j] = self.sbox[self.j], self.sbox[self.i]
out.append(char ^ self.sbox[(self.sbox[self.i] + self.sbox[self.j]) % 256])
self.encrypted_packets += 1
return bytes(bytearray(out))
def update_key(self):
print("Updating session keys")
pad1 = b"\x36"*40
pad2 = b"\x5c"*48
# TODO finish this
def reencrypt_client_random(crypto, bytes):
"""Replace the original encrypted client random (encrypted with OUR
public key) with the client random encrypted with the original public
key"""
reenc_client_rand = rsa_encrypt(crypto["client_rand"],
crypto["pubkey"]) + b"\x00"*8
result = bytes.replace(crypto["enc_client_rand"],
reenc_client_rand)
return result
def generate_rsa_key(keysize):
p = subprocess.Popen(
["openssl", "genrsa", str(keysize)],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL
)
key_pipe = subprocess.Popen(
["openssl", "rsa", "-noout", "-text"],
stdin=p.stdout,
stdout=subprocess.PIPE
)
p.stdout.close()
output = key_pipe.communicate()[0]
# parse the text output
key = None
result = {}
for line in output.split(b'\n'):
field = line.split(b':')[:2]
if len(field) == 2 and field[0] in [
b'modulus',
b'privateExponent',
b'publicExponent'
]:
key = field[0].decode()
result[key] = field[1]
elif not line[:1] == b" ":
key = None
if line[:4] == b" "*4 and key in result:
result[key] += line[4:]
for f in ["modulus", "privateExponent"]:
b = result[f].replace(b':', b'')
b = unhexlify(b)
result[f] = int.from_bytes(b, "big")
m = re.match(b'.* ([0-9]+) ', result['publicExponent'])
result['publicExponent'] = int(m.groups(1)[0])
return result
def rsa_encrypt(bytes, key):
r = int.from_bytes(bytes, "little")
e = key["publicExponent"]
n = key["modulus"]
c = pow(r, e, n)
return c.to_bytes(2048, "little").rstrip(b"\x00")
def rsa_decrypt(bytes, key):
s = int.from_bytes(bytes, "little")
d = key["privateExponent"]
n = key["modulus"]
m = pow(s, d, n)
return m.to_bytes(2048, "little").rstrip(b"\x00")
def is_fast_path(bytes):
if len(bytes) <= 1: return False
return bytes[0] % 4 == 0 and bytes[1] in [len(bytes), 0x80]
def decrypt(bytes, From="Client"):
cleartext = b""
if is_fast_path(bytes):
is_encrypted = (bytes[0] >> 7 == 1)
has_opt_length = (bytes[1] >= 0x80)
offset = 2
if has_opt_length:
offset += 1
if is_encrypted:
offset += 8
cleartext = rc4_decrypt(bytes[offset:], From=From)
else: # slow path
offset = 13
if len(bytes) <= 15: return bytes
if bytes[offset] >= 0x80: offset += 1
offset += 1
security_flags = struct.unpack('<H', bytes[offset:offset+2])[0]
is_encrypted = (security_flags & 0x0008)
if is_encrypted:
offset += 12
cleartext = rc4_decrypt(bytes[offset:], From=From)
if not cleartext == b"":
if args.debug:
print("Cleartext: ")
hexdump(cleartext)
return bytes[:offset] + cleartext
else:
return bytes
def sym_encryption_enabled(crypto):
if "client_rand" in crypto:
return (not crypto["client_rand"] == b"")
else:
return False
def generate_session_keys(crypto):
# Ch. 5.3.5.1
def salted_hash(s, i):
sha1 = hashlib.sha1()
sha1.update(i + s + crypto["client_rand"] +
crypto["server_rand"])
md5 = hashlib.md5()
md5.update(s + sha1.digest())
return md5.digest()
def final_hash(k):
md5 = hashlib.md5()
md5.update(k + crypto["client_rand"] +
crypto["server_rand"])
return md5.digest()
# Non-Fips, 128bit key
pre_master_secret = (crypto["client_rand"][:24] +
crypto["server_rand"][:24])
master_secret = (salted_hash(pre_master_secret, b"A") +
salted_hash(pre_master_secret, b"BB") +
salted_hash(pre_master_secret, b"CCC"))
session_key_blob = (salted_hash(master_secret, b"X") +
salted_hash(master_secret, b"YY") +
salted_hash(master_secret, b"ZZZ"))
mac_key, server_encrypt_key, server_decrypt_key = [
session_key_blob[i*16:(i+1)*16] for i in range(3)
]
server_encrypt_key = final_hash(server_encrypt_key)
server_decrypt_key = final_hash(server_decrypt_key)
client_encrypt_key = server_decrypt_key
client_decrypt_key = server_encrypt_key
crypto["mac_key"] = mac_key
crypto["server_encrypt_key"] = server_encrypt_key
crypto["server_decrypt_key"] = server_decrypt_key
crypto["client_encrypt_key"] = client_encrypt_key
crypto["client_decrypt_key"] = client_decrypt_key
# TODO handle shorter keys than 128 bit
print("Session keys generated")
init_rc4_sbox(crypto)
def init_rc4_sbox(crypto):
print("Initializing RC4 s-box")
# TODO: get rid of global variables
global RC4_CLIENT
global RC4_SERVER
RC4_CLIENT = RC4(crypto["server_decrypt_key"])
RC4_SERVER = RC4(crypto["client_decrypt_key"])
def rc4_decrypt(data, From="Client"):
if From == "Client":
return RC4_CLIENT.decrypt(data)
else:
return RC4_SERVER.decrypt(data)
def sign_certificate(cert, sign_len):
"""Signs the certificate with the private key"""
m = hashlib.md5()
m.update(cert)
m = m.digest() + b"\x00" + b"\xff"*45 + b"\x01"
m = int.from_bytes(m, "little")
d = int.from_bytes(TERM_PRIV_KEY["d"], "little")
n = int.from_bytes(TERM_PRIV_KEY["n"], "little")
s = pow(m, d, n)
return s.to_bytes(sign_len, "little")
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
# Modified for wxPython Pheonix 3.0.0.0
# Keith Smith 15 Oct 2013
""" The WX backend.
"""
# NOTICE: wx has the same general problem with OpenGl being kinda
# unmanaged and frames not being drawn on Gnome. However, wx seems
# relatively well workable with only applying a Refresh command
# on each Activate command of the main window.
import os
import visvis
from visvis import BaseFigure, events, constants
from visvis.core.misc import getResourceDir
import wx
from wx.glcanvas import GLCanvas, GLContext
KEYMAP = { wx.WXK_SHIFT: constants.KEY_SHIFT,
wx.WXK_ALT: constants.KEY_ALT,
wx.WXK_CONTROL: constants.KEY_CONTROL,
wx.WXK_LEFT: constants.KEY_LEFT,
wx.WXK_UP: constants.KEY_UP,
wx.WXK_RIGHT: constants.KEY_RIGHT,
wx.WXK_DOWN: constants.KEY_DOWN,
wx.WXK_PAGEUP: constants.KEY_PAGEUP,
wx.WXK_PAGEDOWN: constants.KEY_PAGEDOWN,
wx.WXK_RETURN: constants.KEY_ENTER,
wx.WXK_ESCAPE: constants.KEY_ESCAPE,
wx.WXK_DELETE: constants.KEY_DELETE
}
# Make uppercase letters be lowercase
for i in range(ord('A'), ord('Z')):
KEYMAP[i] = i+32
def modifiers(event):
"""Convert the WX modifier state into a tuple of active modifier keys."""
mod = ()
if event.ShiftDown():
mod += constants.KEY_SHIFT,
if event.CmdDown():
mod += constants.KEY_CONTROL,
if event.AltDown():
mod += constants.KEY_ALT,
return mod
#===============================================================================
# This is the Widget class prior to wxPython Phoenix
#===============================================================================
class GLWidget(GLCanvas):
""" Implementation of the WX GLCanvas, which passes a number of
events to the Figure object that wraps it.
This is the original version with automatic glContext switching
"""
def __init__(self, figure, parent, *args, **kwargs):
# make sure the window is double buffered (Thanks David!)
kwargs.update({'attribList' : [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_DOUBLEBUFFER]})
# call GLCanvas' init method
GLCanvas.__init__(self, parent, *args, **kwargs)
self._glContext = GLContext(self )
self._glContext.SetCurrent(self)
self.figure = figure
# find root window
root = self.GetParent()
while root.GetParent():
root = root.GetParent()
# make bindings for events
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_RIGHT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
#
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnResize)
root.Bind(wx.EVT_CLOSE, self.OnClose) # Note root
#
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SET_FOCUS, self.OnFocus)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
root.Bind(wx.EVT_ACTIVATE, self.OnActivate) # Note root
# Needs to focus to catch key events
self.SetFocus()
# if lost, tough luck (thus the comment)
#self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnMouseUp)
# onpaint is called when shown is called by figure() function.
def GetContext(self):
return self._glContext
def OnLeftDown(self, event):
x,y = event.GetPosition()
self.CaptureMouse() # make sure to capture release outside
self.figure._GenerateMouseEvent('down', x, y, 1, modifiers(event))
def OnLeftUp(self, event):
x,y = event.GetPosition()
try:
self.ReleaseMouse()
except Exception:
pass
self.figure._GenerateMouseEvent('up', x, y, 1, modifiers(event))
def OnRightDown(self, event):
x,y = event.GetPosition()
self.CaptureMouse() # make sure to capture release outside
self.figure._GenerateMouseEvent('down', x, y, 2, modifiers(event))
def OnRightUp(self, event):
x,y = event.GetPosition()
try:
self.ReleaseMouse()
except Exception:
pass
self.figure._GenerateMouseEvent('up', x, y, 2, modifiers(event))
def OnDoubleClick(self, event):
but = 0
x,y = event.GetPosition()
if event.LeftDClick():
but = 1
elif event.RightDClick():
but = 2
self.figure._GenerateMouseEvent('double', x, y, but, modifiers(event))
def OnMotion(self, event):
if self.figure:
# poduce event
x,y = event.GetPosition()
self.figure._GenerateMouseEvent('motion', x, y, 0, modifiers(event))
def OnMouseWheel(self, event):
numDegrees = event.GetWheelRotation() / 8.0
numSteps = numDegrees / 15.0
# There is event.GetWheelAxis() but only in newer versions of wx
# Mine has not so I am not able to even test it...
horizontal, vertical = 0, numSteps
if self.figure:
x,y = event.GetPosition()
self.figure._GenerateMouseEvent('scroll', x, y, horizontal, vertical, modifiers(event))
def OnKeyDown(self, event):
key, text = self._ProcessKey(event)
self.figure._GenerateKeyEvent('keydown', key, text, modifiers(event))
def OnKeyUp(self, event):
key, text = self._ProcessKey(event)
self.figure._GenerateKeyEvent('keyup', key, text, modifiers(event))
def _ProcessKey(self,event):
""" evaluates the keycode of wx, and transform to visvis key.
Also produce text version.
return key, text. """
key = event.GetKeyCode()
# special cases for shift control and alt -> map to 17 18 19
if key in KEYMAP:
return KEYMAP[key], ''
else:
# other key, try producing text
if (65 <= key <= 90) and not event.ShiftDown():
key += 32
try:
return key, chr(key)
except ValueError:
return key, ''
def OnEnter(self, event):
if self.figure:
ev = self.figure.eventEnter
ev.Set(0,0,0)
ev.Fire()
def OnLeave(self, event):
if self.figure:
ev = self.figure.eventLeave
ev.Set(0,0,0)
ev.Fire()
def OnResize(self, event):
if self.figure:
self.figure._OnResize()
event.Skip()
def OnClose(self, event):
if self.figure:
self.figure.Destroy()
parent = self.GetParent()
self.Destroy() # Hide and delete window
# Prevent frame from sticking when there is no wx event loop
if isinstance(parent, FigureFrame):
parent.Hide()
event.Skip()
def OnFocus(self, event):
if self.figure:
BaseFigure._currentNr = self.figure.nr
event.Skip()
def OnPaint(self, event):
# I read that you should always create a PaintDC when implementing
# an OnPaint event handler.
a = wx.PaintDC(self)
if self.GetContext():
self.figure.OnDraw()
event.Skip()
def OnActivate(self, event):
# When the title bar is dragged in ubuntu
if event.GetActive():
self.Refresh()
event.Skip()
def OnEraseBackground(self, event):
pass # This prevents flicker on Windows
#===============================================================================
# This is the new widget class for wxPython Phoenix
#===============================================================================
class GLWidgetPhoenix(GLWidget):
""" Implementation of the WX GLCanvas, which passes a number of
events to the Figure object that wraps it.
This new Phoenix version must explicitly set the glContext
in OnFocus and OnPaint
"""
def OnFocus(self, event):
if self.figure and self.IsShownOnScreen():
# Must set glContext for Phoenix
self.SetCurrent(self._glContext)
BaseFigure._currentNr = self.figure.nr
event.Skip()
def OnPaint(self, event):
# I read that you should always create a PaintDC when implementing
# an OnPaint event handler.
# a = wx.PaintDC(self)
# Must set glContext for Phoenix, but only if shown on screen
if self.GetContext() and self.IsShownOnScreen():
self.SetCurrent(self._glContext)
self.figure.OnDraw()
event.Skip()
class Figure(BaseFigure):
""" This is the wxPython implementation of the figure class.
A Figure represents the OpenGl context and is the root
of the visualization tree; a Figure Wibject does not have a parent.
A Figure can be created with the function vv.figure() or vv.gcf().
"""
def __init__(self, parent, *args, **kwargs):
self._widget = None
self._widget_args = (parent, args, kwargs)
if kwargs.get('create_widget', True):
self.CreateWidget()
# call original init AFTER we created the widget
BaseFigure.__init__(self)
def CreateWidget(self):
""" Create the Figure's widget if necessary, and return the
widget. """
if self._widget is None:
# Make sure there is a native app and the timer is started
# (also when embedded)
app.Create()
# create widget
updatePosition = False
parent, args, kwargs = self._widget_args
if 'create_widget' in kwargs:
updatePosition = True
del(kwargs['create_widget'])
# Based on switch set in App the correct widget class is called
if app._phoenix:
self._widget = GLWidgetPhoenix(self, parent, *args, **kwargs)
else:
self._widget = GLWidget(self, parent, *args, **kwargs)
if updatePosition:
self.position._Changed()
return self._widget
def _SetCurrent(self):
""" make this scene the current context """
if not self._destroyed and self._widget is not None:
try:
self._widget.SetCurrent()
except Exception:
# can happen when trying to call this method after
# the window was destroyed.
pass
def _SwapBuffers(self):
""" Swap the memory and screen buffer such that
what we rendered appears on the screen """
if self._widget and not self._destroyed:
self._widget.SwapBuffers()
def _SetTitle(self, title):
""" Set the title of the figure... """
if self._widget and not self._destroyed:
window = self._widget.GetParent()
if hasattr(window,'SetTitle'):
window.SetTitle(title)
def _SetPosition(self, x, y, w, h):
""" Set the position of the widget. """
# select widget to resize. If it
if self._widget and not self._destroyed:
widget = self._widget
if isinstance(widget.GetParent(), FigureFrame):
widget = widget.GetParent()
# apply
#widget.SetDimensions(x, y, w, h)
widget.Move(x,y)
widget.SetClientSize(w,h)
def _GetPosition(self):
""" Get the position of the widget. """
# select widget to resize. If it
if self._widget and not self._destroyed:
widget = self._widget
if isinstance(widget.GetParent(), FigureFrame):
widget = widget.GetParent()
# get and return
#tmp = widget.GetRect()
#return tmp.left, tmp.top, tmp.width, tmp.height
size = widget.GetClientSize()
pos = widget.GetPosition()
return pos[0], pos[1], size[0], size[1]
return 0, 0, 0, 0
def _RedrawGui(self):
if self._widget:
self._widget.Refresh()
def _ProcessGuiEvents(self):
app.ProcessEvents()
def _Close(self, widget):
if widget is None:
widget = self._widget
if widget and widget.GetParent():
try:
widget.GetParent().Hide()
widget.GetParent().Close()
except wx.PyAssertionError:
# Prevent "wxEVT_MOUSE_CAPTURE_LOST not being processed" error.
pass
class FigureFrame(wx.Frame):
""" Define a Frame. This is only to be able to tell whether
the Figure object is used as a widget or as a Figure on its
own. """
pass
def newFigure():
""" Create a window with a figure widget.
"""
# Make sure there is a native app. Need here too, because we need to
# create the figure frame first
app.Create()
# Create frame
refSize = tuple( visvis.settings.figureSize )
frame = FigureFrame(None, -1, "Figure", size=refSize)
# Correct size. The given size includes the window manager's frame
size = frame.GetClientSize()
w = refSize[0] + (refSize[0] - size[0])
h = refSize[1] + (refSize[1] - size[1])
frame.SetSize((w,h))
# Inser figure
figure = Figure(frame)
# Set icon
try:
iconFile = os.path.join(getResourceDir(), 'visvis_icon_wx.png')
frame.SetIcon(wx.Icon(iconFile, wx.BITMAP_TYPE_PNG))
except Exception:
pass
# Show AFTER canvas is added
frame.Show()
# Apply a draw, so that OpenGl can initialize before we will really
# do some drawing. Otherwis textures end up showing in black.
figure.DrawNow()
app.ProcessEvents() # Fixes issue 43
return figure
class VisvisEventsTimer(wx.Timer):
def Notify(self):
events.processVisvisEvents()
class App(events.App):
""" App()
Application class to wrap the GUI applications in a class
with a simple interface that is the same for all backends.
This is the wxPython implementation.
Modifications added to check for wxPython Phoenix
"""
def __init__(self):
# Timer to enable timers in visvis. Should be created AFTER the app
self._timer = None
# check which version of wx is installed, set _phoenix switch
# and select the correct _ProcessEvents
if "phoenix" in wx.PlatformInfo:
self._phoenix = True
self._ProcessEvents = self._ProcessEventsPhoenix
else:
self._phoenix = False
self._ProcessEvents = self._ProcessEventsOriginal
def _GetNativeApp(self):
# Get native app in save way. Taken from guisupport.py,
# but use wx.App() class because PySimpleApp is deprecated
app = wx.GetApp()
if app is None:
app = wx.App(False)
# Store so it won't be deleted, but not on a visvis object,
# or an application may produce error when closed
wx.app_instance = app
# Start timer
if self._timer is None:
self._timer = VisvisEventsTimer()
self._timer.Start(10, False)
# Return
return app
def _ProcessEventsOriginal(self):
# This is the original API
# Get app
app = self._GetNativeApp()
# Keep reference of old eventloop instance
old = wx.EventLoop.GetActive()
# Create new eventloop and process
eventLoop = wx.EventLoop()
wx.EventLoop.SetActive(eventLoop)
while eventLoop.Pending():
eventLoop.Dispatch()
# Process idle
app.ProcessIdle() # otherwise frames do not close
# Set back the original
wx.EventLoop.SetActive(old)
def _ProcessEventsPhoenix(self):
# this version uses the new Phoenix API
# Get app
app = self._GetNativeApp()
# Keep reference of old eventloop instance
old = wx.EventLoopBase.GetActive()
# Create new eventloop and process
eventLoop = app.GetTraits().CreateEventLoop()
wx.EventLoopActivator(eventLoop)
while eventLoop.Pending():
eventLoop.Dispatch()
# Process idle
eventLoop.ProcessIdle() # otherwise frames do not close
# Set back the original
wx.EventLoopActivator(old)
def _Run(self):
app = self._GetNativeApp()
if hasattr(app, '_in_event_loop') and app._in_event_loop:
pass # Already in event loop
else:
app.MainLoop()
# Create application instance now
app = App()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for GCS File System."""
from __future__ import absolute_import
import logging
import unittest
from builtins import zip
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import mock
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import FileMetadata
from apache_beam.options.pipeline_options import PipelineOptions
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp import gcsfilesystem
except ImportError:
gcsfilesystem = None # type: ignore
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(gcsfilesystem is None, 'GCP dependencies are not installed')
class GCSFileSystemTest(unittest.TestCase):
def setUp(self):
pipeline_options = PipelineOptions()
self.fs = gcsfilesystem.GCSFileSystem(pipeline_options=pipeline_options)
def test_scheme(self):
self.assertEqual(self.fs.scheme(), 'gs')
self.assertEqual(gcsfilesystem.GCSFileSystem.scheme(), 'gs')
def test_join(self):
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', 'to', 'file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', 'to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path', '/to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', 'to', 'file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', 'to/file'))
self.assertEqual('gs://bucket/path/to/file',
self.fs.join('gs://bucket/path/', '/to/file'))
with self.assertRaises(ValueError):
self.fs.join('/bucket/path/', '/to/file')
def test_split(self):
self.assertEqual(('gs://foo/bar', 'baz'),
self.fs.split('gs://foo/bar/baz'))
self.assertEqual(('gs://foo', ''),
self.fs.split('gs://foo/'))
self.assertEqual(('gs://foo', ''),
self.fs.split('gs://foo'))
with self.assertRaises(ValueError):
self.fs.split('/no/gcs/prefix')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.return_value = {
'gs://bucket/file1': 1,
'gs://bucket/file2': 2
}
expected_results = set([
FileMetadata('gs://bucket/file1', 1),
FileMetadata('gs://bucket/file2', 2)
])
match_result = self.fs.match(['gs://bucket/'])[0]
self.assertEqual(
set(match_result.metadata_list),
expected_results)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples_limit(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
limit = 1
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.return_value = {
'gs://bucket/file1': 1
}
expected_results = set([
FileMetadata('gs://bucket/file1', 1)
])
match_result = self.fs.match(['gs://bucket/'], [limit])[0]
self.assertEqual(
set(match_result.metadata_list),
expected_results)
self.assertEqual(
len(match_result.metadata_list),
limit)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiples_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
exception = IOError('Failed')
gcsio_mock.list_prefix.side_effect = exception
with self.assertRaisesRegex(BeamIOError,
r'^Match operation failed') as error:
self.fs.match(['gs://bucket/'])
self.assertRegex(str(error.exception.exception_details),
r'gs://bucket/.*%s' % exception)
gcsio_mock.list_prefix.assert_called_once_with('gs://bucket/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_match_multiple_patterns(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.list_prefix.side_effect = [
{'gs://bucket/file1': 1},
{'gs://bucket/file2': 2},
]
expected_results = [
[FileMetadata('gs://bucket/file1', 1)],
[FileMetadata('gs://bucket/file2', 2)]
]
result = self.fs.match(['gs://bucket/file1*', 'gs://bucket/file2*'])
self.assertEqual(
[mr.metadata_list for mr in result],
expected_results)
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_create(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
# Issue file copy
_ = self.fs.create('gs://bucket/from1', 'application/octet-stream')
gcsio_mock.open.assert_called_once_with(
'gs://bucket/from1', 'wb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_open(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
# Issue file copy
_ = self.fs.open('gs://bucket/from1', 'application/octet-stream')
gcsio_mock.open.assert_called_once_with(
'gs://bucket/from1', 'rb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_file(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket/from1']
destinations = ['gs://bucket/to1']
# Issue file copy
self.fs.copy(sources, destinations)
gcsio_mock.copy.assert_called_once_with(
'gs://bucket/from1', 'gs://bucket/to1')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_file_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket/from1']
destinations = ['gs://bucket/to1']
exception = IOError('Failed')
gcsio_mock.copy.side_effect = exception
# Issue batch rename.
expected_results = {(s, d):exception for s, d in zip(sources, destinations)}
# Issue batch copy.
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed') as error:
self.fs.copy(sources, destinations)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.copy.assert_called_once_with(
'gs://bucket/from1', 'gs://bucket/to1')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_copy_tree(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = ['gs://bucket1/']
destinations = ['gs://bucket2/']
# Issue directory copy
self.fs.copy(sources, destinations)
gcsio_mock.copytree.assert_called_once_with(
'gs://bucket1/', 'gs://bucket2/')
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_rename(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
destinations = [
'gs://bucket/to1',
'gs://bucket/to2',
'gs://bucket/to3',
]
gcsio_mock.copy_batch.side_effect = [[
('gs://bucket/from1', 'gs://bucket/to1', None),
('gs://bucket/from2', 'gs://bucket/to2', None),
('gs://bucket/from3', 'gs://bucket/to3', None),
]]
gcsio_mock.delete_batch.side_effect = [[
('gs://bucket/from1', None),
('gs://bucket/from2', None),
('gs://bucket/from3', None),
]]
# Issue batch rename.
self.fs.rename(sources, destinations)
gcsio_mock.copy_batch.assert_called_once_with([
('gs://bucket/from1', 'gs://bucket/to1'),
('gs://bucket/from2', 'gs://bucket/to2'),
('gs://bucket/from3', 'gs://bucket/to3'),
])
gcsio_mock.delete_batch.assert_called_once_with([
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
])
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_rename_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
sources = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
destinations = [
'gs://bucket/to1',
'gs://bucket/to2',
'gs://bucket/to3',
]
exception = IOError('Failed')
gcsio_mock.delete_batch.side_effect = [[(f, exception) for f in sources]]
gcsio_mock.copy_batch.side_effect = [[
('gs://bucket/from1', 'gs://bucket/to1', None),
('gs://bucket/from2', 'gs://bucket/to2', None),
('gs://bucket/from3', 'gs://bucket/to3', None),
]]
# Issue batch rename.
expected_results = {(s, d):exception for s, d in zip(sources, destinations)}
# Issue batch rename.
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed') as error:
self.fs.rename(sources, destinations)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.copy_batch.assert_called_once_with([
('gs://bucket/from1', 'gs://bucket/to1'),
('gs://bucket/from2', 'gs://bucket/to2'),
('gs://bucket/from3', 'gs://bucket/to3'),
])
gcsio_mock.delete_batch.assert_called_once_with([
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
])
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_delete(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
gcsio_mock.size.return_value = 0
files = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
# Issue batch delete.
self.fs.delete(files)
gcsio_mock.delete_batch.assert_called()
@mock.patch('apache_beam.io.gcp.gcsfilesystem.gcsio')
def test_delete_error(self, mock_gcsio):
# Prepare mocks.
gcsio_mock = mock.MagicMock()
gcsfilesystem.gcsio.GcsIO = lambda: gcsio_mock
exception = IOError('Failed')
gcsio_mock.delete_batch.side_effect = exception
gcsio_mock.size.return_value = 0
files = [
'gs://bucket/from1',
'gs://bucket/from2',
'gs://bucket/from3',
]
expected_results = {f:exception for f in files}
# Issue batch delete.
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation failed') as error:
self.fs.delete(files)
self.assertEqual(error.exception.exception_details, expected_results)
gcsio_mock.delete_batch.assert_called()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
#!/usr/bin/python
import Settings
import bs4
from datetime import datetime
import xml.etree.ElementTree as ET
import traceback
import sys
import os
import os.path
from xml.dom import minidom
######################################
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
######################################
# Extract the act descriptions from the HTML. Returns the results as
# a list of dicts. The dictionary keys are 'head' and 'body'. For example:
# [ { 'head' : 'Prologue', 'body' : 'Ira talks' }, { 'head' : 'Act 1', 'body' : ... } ... ]
#
def get_acts(soup):
acts = []
act_num = 0
while True:
found = soup.find(id="act-" + str(act_num))
if found is None:
break
if isinstance(found, bs4.Tag):
# Remove all of the odd tags that we don't want
[s.extract() for s in found.findAll('div', attrs={'class' : 'audio-player'}) ]
[s.extract() for s in found.findAll('span', attrs={'class' : 'tags'}) ]
[s.extract() for s in found.findAll('ul', attrs={'class' : 'act-contributors'}) ]
[s.extract() for s in found.findAll('ul', attrs={'class' : 'actions'}) ]
head = found.find('div', attrs={'class' : 'act-head'}).getText().strip()
body = found.find('div', attrs={'class' : 'act-body'}).getText().strip()
act = { 'head' : head, 'body' : body }
acts.append(act)
act_num += 1
else:
raise Exception("getActs() hit on some HTML which wasn't a tag")
return acts
######################################
# Searches a given beautifulsoup tree for a tag(s) with certain attributes and returns only the non-tag content
######################################
def get_raw_content(soup_body, tag, attributes):
# TODO: This might be able to be replaced by getText()
resultset = soup_body.find(tag, attributes)
if resultset is None:
raise LookupError("Couldn't find a tag named '{0}' with attributes '{1}'".format(tag,attributes))
try:
for i,result in enumerate(resultset):
if isinstance(result,bs4.NavigableString):
pass
elif isinstance(result,bs4.Tag):
resultset[i] = result.replaceWithChildren()
else:
print "Got some strange type"
resultset[i] = "ERROR"
value = " ".join([unicode(a).strip() for a in resultset])
return value
except Exception as e:
print "Caught exception in getRawContent: {0}".format(e)
raise LookupError("Problem in getRawContent when searching for tag named '{0}' with attributes '{1}'".format(tag,attributes))
#####################################
def process_episode(number):
audiofile = Settings.local_audio_filename(number)
htmlfile = Settings.local_html_filename(number)
# Make sure that we have the html file and (if desired) the mp3 file
if Settings.CACHE_MP3S and not os.path.isfile(htmlfile):
raise Exception("The HTML file for episode {0} is missing".format(number))
if not os.path.isfile(audiofile):
raise Exception("The MP3 file for episode {0} is missing".format(number))
try:
file_contents = open(htmlfile, 'r').read().decode('utf-8')
soup = bs4.BeautifulSoup(file_contents)
except Exception as e:
print "Problem trying to read {0}".format(htmlfile)
raise e
try:
# Get size of mp3 file
# TODO: Come up with some way to get the size of the remote files
filesize = os.path.getsize(audiofile) if Settings.CACHE_MP3S else 28000000
content_div = soup.find("div", {"id" : "content"})
if content_div is None:
raise LookupError("Couldn't find a div named 'content_div'")
acts = get_acts(soup)
# Combine all act text into a single string. *Within* a single act, separate the
# lines by newlines. *Between* acts, separate them by double-newlines
# we might need to stick a '
' after each \n
all_acts_text = '\n\n'.join(['===========================\n' + act['head'] + '\n' + act['body'] for act in acts])
# Start building our item
item = ET.Element('item')
# title tag
title = ET.SubElement(item, 'title')
title.text = get_raw_content(content_div, "h1", {"class" : "node-title"})
description = ET.SubElement(item, 'description')
description.text = get_raw_content(content_div, "div", {"class" : "description"}) + '\n' + all_acts_text
# pubDate tag
# Dates in the html are in the form of "Dec 22, 1995". Parse them to turn them into the RFC format
datestring = get_raw_content(content_div, "div", {"class" : "date"})
dateobj = datetime.strptime(datestring, "%b %d, %Y")
pubDate = ET.SubElement(item, 'pubDate')
pubDate.text = dateobj.strftime("%a, %d %b %Y 00:00:00 +0000")
url = Settings.local_audio_url(number) if Settings.CACHE_MP3S else Settings.remote_audio_url(number)
# link tag
link = ET.SubElement(item, 'link')
link.text = url
# guid tag
guid = ET.SubElement(item, 'guid')
guid.text = url
# enclosure tag (how to actually find the audio clip)
enclosure = ET.SubElement(item, 'enclosure')
enclosure.set('url',url)
enclosure.set('length',str(filesize))
enclosure.set('type','audio/mpeg')
# itunes:summary tag (this shows where the liner-notes or lyrics normally go)
# summary = ET.SubElement(item, 'itunes:summary')
# summary.text = all_acts_text
# subtitle = ET.SubElement(item, 'itunes:subtitle')
# subtitle.text = all_acts_text
# resultset = soup.find_all("div", {"class", "act-body"})
# print "Acts: {0}".format(len(resultset))
return item
except ValueError as e:
print "Caught an error when trying to process episode {0}".format(number)
raise Exception("Problem processing episode {0}".format(number))
########################################
def generate_xml():
tree = ET.parse(Settings.local_base_xml_filename())
root = tree.getroot()
channel = root.find('channel')
# Alter the title of this podcast to append ' (Cached)'
title = channel.find('title')
title.text += ' (Cached)'
# Remove any existing items from the channel tag in the base XML file
items = channel.findall("item")
for item in items:
channel.remove(item)
# Now... add every episode we've got
for number in range(1, Settings.get_highest_episode()+1):
print "Processing " + str(number)
try:
channel.append(process_episode(number))
except Exception as e:
print "Something bad happened while processing episode " + str(number)
print "{0}".format(e)
print "{0}".format(sys.exc_info()[0])
print "{0}".format(sys.exc_info()[1])
traceback.print_tb(sys.exc_info()[2])
#output = prettify(root).encode('utf-8')
#output = prettify(channel).encode('utf-8')
#with open(LOCAL_RSS_FILE, "w") as f:
# f.write(output)
tree.write(Settings.local_xml_filename())
print "You can download the mirrored podcast from:"
print " " + Settings.local_xml_url()
if __name__ == '__main__':
generate_xml()
|
|
import datetime
import functools
import json
import os
import sys
from typing import Any, Optional, cast
from unittest import mock
import freezegun # type: ignore
import pytest
from pip._vendor.packaging.version import parse as parse_version
from pip._internal import self_outdated_check
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.link import Link
from pip._internal.network.session import PipSession
from pip._internal.self_outdated_check import (
SelfCheckState,
logger,
pip_self_version_check,
)
from tests.lib.path import Path
class MockBestCandidateResult:
def __init__(self, best: InstallationCandidate) -> None:
self.best_candidate = best
class MockPackageFinder:
BASE_URL = "https://pypi.org/simple/pip-{0}.tar.gz"
PIP_PROJECT_NAME = "pip"
INSTALLATION_CANDIDATES = [
InstallationCandidate(
PIP_PROJECT_NAME,
"6.9.0",
Link(BASE_URL.format("6.9.0")),
),
InstallationCandidate(
PIP_PROJECT_NAME,
"3.3.1",
Link(BASE_URL.format("3.3.1")),
),
InstallationCandidate(
PIP_PROJECT_NAME,
"1.0",
Link(BASE_URL.format("1.0")),
),
]
@classmethod
def create(cls, *args: Any, **kwargs: Any) -> "MockPackageFinder":
return cls()
def find_best_candidate(self, project_name: str) -> MockBestCandidateResult:
return MockBestCandidateResult(self.INSTALLATION_CANDIDATES[0])
class MockDistribution:
def __init__(self, installer: str, version: str) -> None:
self.installer = installer
self.version = parse_version(version)
class MockEnvironment:
def __init__(self, installer: str, installed_version: Optional[str]) -> None:
self.installer = installer
self.installed_version = installed_version
def get_distribution(self, name: str) -> Optional[MockDistribution]:
if self.installed_version is None:
return None
return MockDistribution(self.installer, self.installed_version)
def _options() -> mock.Mock:
"""Some default options that we pass to
self_outdated_check.pip_self_version_check"""
return mock.Mock(
find_links=[],
index_url="default_url",
extra_index_urls=[],
no_index=False,
pre=False,
cache_dir="",
deprecated_features_enabled=[],
)
@pytest.mark.parametrize(
[
"stored_time",
"installed_ver",
"new_ver",
"installer",
"check_if_upgrade_required",
"check_warn_logs",
],
[
# Test we return None when installed version is None
("1970-01-01T10:00:00Z", None, "1.0", "pip", False, False),
# Need an upgrade - upgrade warning should print
("1970-01-01T10:00:00Z", "1.0", "6.9.0", "pip", True, True),
# Upgrade available, pip installed via rpm - warning should not print
("1970-01-01T10:00:00Z", "1.0", "6.9.0", "rpm", True, False),
# No upgrade - upgrade warning should not print
("1970-01-9T10:00:00Z", "6.9.0", "6.9.0", "pip", False, False),
],
)
def test_pip_self_version_check(
monkeypatch: pytest.MonkeyPatch,
stored_time: str,
installed_ver: Optional[str],
new_ver: str,
installer: str,
check_if_upgrade_required: bool,
check_warn_logs: bool,
) -> None:
monkeypatch.setattr(
self_outdated_check,
"get_default_environment",
functools.partial(MockEnvironment, installer, installed_ver),
)
monkeypatch.setattr(
self_outdated_check,
"PackageFinder",
MockPackageFinder,
)
monkeypatch.setattr(logger, "warning", mock.Mock())
monkeypatch.setattr(logger, "debug", mock.Mock())
fake_state = mock.Mock(
state={"last_check": stored_time, "pypi_version": installed_ver},
save=mock.Mock(),
)
monkeypatch.setattr(self_outdated_check, "SelfCheckState", lambda **kw: fake_state)
with freezegun.freeze_time(
"1970-01-09 10:00:00",
ignore=[
"six.moves",
"pip._vendor.six.moves",
"pip._vendor.requests.packages.urllib3.packages.six.moves",
],
):
pip_self_version_check(PipSession(), _options())
# See that we saved the correct version
if check_if_upgrade_required:
assert fake_state.save.call_args_list == [
mock.call(new_ver, datetime.datetime(1970, 1, 9, 10, 00, 00)),
]
elif installed_ver:
# Make sure no Exceptions
assert not cast(mock.Mock, logger.debug).call_args_list
# See that save was not called
assert fake_state.save.call_args_list == []
# Ensure we warn the user or not
if check_warn_logs:
assert cast(mock.Mock, logger.warning).call_count == 1
else:
assert cast(mock.Mock, logger.warning).call_count == 0
statefile_name_case_1 = "fcd2d5175dd33d5df759ee7b045264230205ef837bf9f582f7c3ada7"
statefile_name_case_2 = "902cecc0745b8ecf2509ba473f3556f0ba222fedc6df433acda24aa5"
@pytest.mark.parametrize(
"key,expected",
[
("/hello/world/venv", statefile_name_case_1),
("C:\\Users\\User\\Desktop\\venv", statefile_name_case_2),
],
)
def test_get_statefile_name_known_values(key: str, expected: str) -> None:
assert expected == self_outdated_check._get_statefile_name(key)
def _get_statefile_path(cache_dir: str, key: str) -> str:
return os.path.join(
cache_dir, "selfcheck", self_outdated_check._get_statefile_name(key)
)
def test_self_check_state_no_cache_dir() -> None:
state = SelfCheckState(cache_dir="")
assert state.state == {}
assert state.statefile_path is None
def test_self_check_state_key_uses_sys_prefix(monkeypatch: pytest.MonkeyPatch) -> None:
key = "helloworld"
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState("")
assert state.key == key
def test_self_check_state_reads_expected_statefile(
monkeypatch: pytest.MonkeyPatch, tmpdir: Path
) -> None:
cache_dir = tmpdir / "cache_dir"
cache_dir.mkdir()
key = "helloworld"
statefile_path = _get_statefile_path(str(cache_dir), key)
last_check = "1970-01-02T11:00:00Z"
pypi_version = "1.0"
content = {
"key": key,
"last_check": last_check,
"pypi_version": pypi_version,
}
Path(statefile_path).parent.mkdir()
with open(statefile_path, "w") as f:
json.dump(content, f)
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState(str(cache_dir))
assert state.state["last_check"] == last_check
assert state.state["pypi_version"] == pypi_version
def test_self_check_state_writes_expected_statefile(
monkeypatch: pytest.MonkeyPatch, tmpdir: Path
) -> None:
cache_dir = tmpdir / "cache_dir"
cache_dir.mkdir()
key = "helloworld"
statefile_path = _get_statefile_path(str(cache_dir), key)
last_check = datetime.datetime.strptime(
"1970-01-02T11:00:00Z", self_outdated_check.SELFCHECK_DATE_FMT
)
pypi_version = "1.0"
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState(str(cache_dir))
state.save(pypi_version, last_check)
with open(statefile_path) as f:
saved = json.load(f)
expected = {
"key": key,
"last_check": last_check.strftime(self_outdated_check.SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
assert expected == saved
|
|
import warnings
import pytest
from pandas._config import config as cf
from pandas._config.config import OptionError
import pandas as pd
class TestConfig:
@classmethod
def setup_class(cls):
from copy import deepcopy
cls.cf = cf
cls.gc = deepcopy(getattr(cls.cf, "_global_config"))
cls.do = deepcopy(getattr(cls.cf, "_deprecated_options"))
cls.ro = deepcopy(getattr(cls.cf, "_registered_options"))
def setup_method(self, method):
setattr(self.cf, "_global_config", {})
setattr(self.cf, "options", self.cf.DictWrapper(self.cf._global_config))
setattr(self.cf, "_deprecated_options", {})
setattr(self.cf, "_registered_options", {})
# Our test fixture in conftest.py sets "chained_assignment"
# to "raise" only after all test methods have been setup.
# However, after this setup, there is no longer any
# "chained_assignment" option, so re-register it.
self.cf.register_option("chained_assignment", "raise")
def teardown_method(self, method):
setattr(self.cf, "_global_config", self.gc)
setattr(self.cf, "_deprecated_options", self.do)
setattr(self.cf, "_registered_options", self.ro)
def test_api(self):
# the pandas object exposes the user API
assert hasattr(pd, "get_option")
assert hasattr(pd, "set_option")
assert hasattr(pd, "reset_option")
assert hasattr(pd, "describe_option")
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
v(12)
v(None)
msg = r"Value must be one of None\|12"
with pytest.raises(ValueError, match=msg):
v(1.1)
def test_register_option(self):
self.cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Option 'a' has already been registered"
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Path prefix to option 'a' is already an option"
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a.b.c.d1", 1, "doc")
with pytest.raises(OptionError, match=msg):
self.cf.register_option("a.b.c.d2", 1, "doc")
# no python keywords
msg = "for is a python keyword"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("for", 0)
with pytest.raises(ValueError, match=msg):
self.cf.register_option("a.for.b", 0)
# must be valid identifier (ensure attribute access works)
msg = "oh my goddess! is not a valid identifier"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("Oh my Goddess!", 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option("k.b.c.d1", 1, "doc")
self.cf.register_option("k.b.c.d2", 1, "doc")
def test_describe_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b", 1, "doc2")
self.cf.deprecate_option("b")
self.cf.register_option("c.d.e1", 1, "doc3")
self.cf.register_option("c.d.e2", 1, "doc4")
self.cf.register_option("f", 1)
self.cf.register_option("g.h", 1)
self.cf.register_option("k", 2)
self.cf.deprecate_option("g.h", rkey="k")
self.cf.register_option("l", "foo")
# non-existent keys raise KeyError
msg = r"No such keys\(s\)"
with pytest.raises(OptionError, match=msg):
self.cf.describe_option("no.such.key")
# we can get the description for any key we registered
assert "doc" in self.cf.describe_option("a", _print_desc=False)
assert "doc2" in self.cf.describe_option("b", _print_desc=False)
assert "precated" in self.cf.describe_option("b", _print_desc=False)
assert "doc3" in self.cf.describe_option("c.d.e1", _print_desc=False)
assert "doc4" in self.cf.describe_option("c.d.e2", _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
assert "vailable" in self.cf.describe_option("f", _print_desc=False)
assert "vailable" in self.cf.describe_option("g.h", _print_desc=False)
assert "precated" in self.cf.describe_option("g.h", _print_desc=False)
assert "k" in self.cf.describe_option("g.h", _print_desc=False)
# default is reported
assert "foo" in self.cf.describe_option("l", _print_desc=False)
# current value is reported
assert "bar" not in self.cf.describe_option("l", _print_desc=False)
self.cf.set_option("l", "bar")
assert "bar" in self.cf.describe_option("l", _print_desc=False)
def test_case_insensitive(self):
self.cf.register_option("KanBAN", 1, "doc")
assert "doc" in self.cf.describe_option("kanbaN", _print_desc=False)
assert self.cf.get_option("kanBaN") == 1
self.cf.set_option("KanBan", 2)
assert self.cf.get_option("kAnBaN") == 2
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
self.cf.get_option("no_such_option")
self.cf.deprecate_option("KanBan")
assert self.cf._is_deprecated("kAnBaN")
def test_get_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
# gets of existing keys succeed
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
self.cf.get_option("no_such_option")
def test_set_option(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
self.cf.set_option("b.b", 1.1)
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
assert self.cf.get_option("b.b") == 1.1
msg = r"No such keys\(s\): 'no.such.key'"
with pytest.raises(OptionError, match=msg):
self.cf.set_option("no.such.key", None)
def test_set_option_empty_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option()
def test_set_option_uneven_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a.b", 2, "b.c")
def test_set_option_invalid_single_argument_type(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
self.cf.set_option(2)
def test_set_option_multiple(self):
self.cf.register_option("a", 1, "doc")
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("b.b", None, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
assert self.cf.get_option("b.b") is None
self.cf.set_option("a", "2", "b.c", None, "b.b", 10.0)
assert self.cf.get_option("a") == "2"
assert self.cf.get_option("b.c") is None
assert self.cf.get_option("b.b") == 10.0
def test_validation(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("d", 1, "doc", validator=self.cf.is_nonnegative_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_text)
msg = "Value must have type '<class 'int'>'"
with pytest.raises(ValueError, match=msg):
self.cf.register_option("a.b.c.d2", "NO", "doc", validator=self.cf.is_int)
self.cf.set_option("a", 2) # int is_int
self.cf.set_option("b.c", "wurld") # str is_str
self.cf.set_option("d", 2)
self.cf.set_option("d", None) # non-negative int can be None
# None not is_int
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a", None)
with pytest.raises(ValueError, match=msg):
self.cf.set_option("a", "ab")
msg = "Value must be a nonnegative integer or None"
with pytest.raises(ValueError, match=msg):
self.cf.register_option(
"a.b.c.d3", "NO", "doc", validator=self.cf.is_nonnegative_int
)
with pytest.raises(ValueError, match=msg):
self.cf.register_option(
"a.b.c.d3", -2, "doc", validator=self.cf.is_nonnegative_int
)
msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
with pytest.raises(ValueError, match=msg):
self.cf.set_option("b.c", 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option("b", lambda: None, "doc", validator=validator)
self.cf.set_option("b", "%.1f".format) # Formatter is callable
self.cf.set_option("b", None) # Formatter is none (default)
with pytest.raises(ValueError, match="Value must be a callable"):
self.cf.set_option("b", "%.1f")
def test_reset_option(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_str)
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("a")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("b.c")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
def test_reset_option_all(self):
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_str)
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
self.cf.set_option("a", 2)
self.cf.set_option("b.c", "wurld")
assert self.cf.get_option("a") == 2
assert self.cf.get_option("b.c") == "wurld"
self.cf.reset_option("all")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b.c") == "hullo"
def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option("foo")
assert self.cf._is_deprecated("foo")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with pytest.raises(KeyError, match="No such keys.s.: 'foo'"):
self.cf.get_option("foo")
assert len(w) == 1 # should have raised one warning
assert "deprecated" in str(w[-1]) # we get the default message
self.cf.register_option("a", 1, "doc", validator=self.cf.is_int)
self.cf.register_option("b.c", "hullo", "doc2")
self.cf.register_option("foo", "hullo", "doc2")
self.cf.deprecate_option("a", removal_ver="nifty_ver")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.get_option("a")
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the default message
assert "nifty_ver" in str(w[-1]) # with the removal_ver quoted
msg = "Option 'a' has already been defined as deprecated"
with pytest.raises(OptionError, match=msg):
self.cf.deprecate_option("a")
self.cf.deprecate_option("b.c", "zounds!")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.get_option("b.c")
assert len(w) == 1 # should have raised one warning
assert "zounds!" in str(w[-1]) # we get the custom message
# test rerouting keys
self.cf.register_option("d.a", "foo", "doc2")
self.cf.register_option("d.dep", "bar", "doc2")
assert self.cf.get_option("d.a") == "foo"
assert self.cf.get_option("d.dep") == "bar"
self.cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert self.cf.get_option("d.dep") == "foo"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cf.set_option("d.dep", "baz") # should overwrite "d.a"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert self.cf.get_option("d.dep") == "baz"
assert len(w) == 1 # should have raised one warning
assert "eprecated" in str(w[-1]) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option("a", 1, "doc1")
self.cf.register_option("b", 2, "doc2")
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b") == 2
self.cf.set_option("a", 3)
self.cf.set_option("b", 4)
assert self.cf.get_option("a") == 3
assert self.cf.get_option("b") == 4
assert self.cf.get_option("base.a") == 3
assert self.cf.get_option("base.b") == 4
assert "doc1" in self.cf.describe_option("base.a", _print_desc=False)
assert "doc2" in self.cf.describe_option("base.b", _print_desc=False)
self.cf.reset_option("base.a")
self.cf.reset_option("base.b")
with self.cf.config_prefix("base"):
assert self.cf.get_option("a") == 1
assert self.cf.get_option("b") == 2
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option("d.a", "foo", cb=callback)
self.cf.register_option("d.b", "foo", cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
assert k[-1] == "d.a"
assert v[-1] == "fooz"
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
assert k[-1] == "d.b"
assert v[-1] == "boo"
del k[-1], v[-1]
self.cf.reset_option("d.b")
assert k[-1] == "d.b"
def test_set_ContextManager(self):
def eq(val):
assert self.cf.get_option("a") == val
self.cf.register_option("a", 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
# Test that option_context can be used as a decorator too (#34253).
@self.cf.option_context("a", 123)
def f():
eq(123)
f()
def test_attribute_access(self):
holder = []
def f3(key):
holder.append(True)
self.cf.register_option("a", 0)
self.cf.register_option("c", 0, cb=f3)
options = self.cf.options
assert options.a == 0
with self.cf.option_context("a", 15):
assert options.a == 15
options.a = 500
assert self.cf.get_option("a") == 500
self.cf.reset_option("a")
assert options.a == self.cf.get_option("a", 0)
msg = "You can only set the value of existing options"
with pytest.raises(OptionError, match=msg):
options.b = 1
with pytest.raises(OptionError, match=msg):
options.display = 1
# make sure callback kicks when using this form of setting
options.c = 1
assert len(holder) == 1
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
option_name = "a"
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
assert self.cf.get_option(option_name) == original_value
# Ensure the correct value is available inside the context.
with ctx:
assert self.cf.get_option(option_name) == context_value
# Ensure the current context is reset
assert self.cf.get_option(option_name) == original_value
def test_dictwrapper_getattr(self):
options = self.cf.options
# GH 19789
with pytest.raises(OptionError, match="No such option"):
options.bananas
assert not hasattr(options, "bananas")
|
|
"""
Command line interface to RR.
"""
import argparse
import datetime
from .active import ActiveRR
from .brrr import BestRace
from .crrr import CoolRunning
from .csrr import CompuScore
from .nyrr import NewYorkRR
def run_active():
the_description = 'Process Active race results'
parser = argparse.ArgumentParser(description=the_description)
parser.add_argument('-d', '--day', dest='day', nargs=2, help='day range')
parser.add_argument('-m', '--month', dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
parser.add_argument('-o', '--output', dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-s', '--states',
dest='states',
nargs='+',
default=['NJ'],
help='state, default is NJ')
parser.add_argument('-y', '--year', dest='year',
default=datetime.date.today().year, help='year')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
parser.add_argument('--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
states = [state.upper() for state in args.states]
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
# Make the range the entire month up until now.
start_date = datetime.date(year, month, 1)
stop_date = datetime.date(year, month, datetime.datetime.now().day)
o = ActiveRR(date_range=[start_date, stop_date],
membership_list=args.membership_list,
verbose=args.verbose,
states=states,
output_file=args.output_file)
o.run()
def run_bestrace():
# -ml cannot be used with -d, -m, or -y
# But -y and -m have defaults.
the_description = 'Process BestRace race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--day', dest='day',
nargs=2, help='day range')
parser.add_argument('--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-m', '--month', dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
parser.add_argument('-o', '--output', dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-y', '--year', dest='year',
default=datetime.date.today().year, help='year')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
# Make the range the entire month up until now.
start_date = datetime.date(year, month, 1)
stop_date = datetime.date(year, month, datetime.datetime.now().day)
o = BestRace(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
def run_coolrunning():
# -ml cannot be used with -d, -m, or -y
# But -y and -m have defaults.
the_description = 'Process Coolrunning race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-s', '--states',
dest='states',
nargs='+',
default=['ma'],
help='state, default is ma')
parser.add_argument('--ml',
dest='membership_list',
help='membership list',
required=True)
group.add_argument('--rl',
dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
start_date = None
stop_date = None
o = CoolRunning(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
states=args.states,
verbose=args.verbose)
o.run()
def run_compuscore():
# --ml cannot be used with -m, or -y
the_description = 'Process Compuscore race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
default=[datetime.date.today().day,
datetime.date.today().day],
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
o = CompuScore(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
def run_nyrr():
# --ml cannot be used with -m, or -y
the_description = 'Process NYRR race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
default=[datetime.date.today().day,
datetime.date.today().day],
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('--team',
dest='team',
default='RARI',
help='team code (i.e. "RARI")')
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
o = NewYorkRR(start_date=start_date,
stop_date=stop_date,
team=args.team,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
|
|
#!/usr/bin/env python
# inspired by - http://mxr.mozilla.org/mozilla-central/source/testing/xpcshell/runxpcshelltests.py
import sys, os, os.path, signal, re
import jsshellhelper
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# Uses jsshell https://developer.mozilla.org/en/Introduction_to_the_JavaScript_shell
class ProcessingTests(object):
testharnessdir = os.path.dirname(os.path.abspath(__file__))
toolsdir = os.path.dirname(os.path.abspath(__file__))
testsPassed = 0
testsFailed = 0
testsFailedKnown = 0
def __init__(self):
self.knownFailures = set()
f = open(os.path.join(self.toolsdir, '..', 'test', 'KNOWN-FAILURES'), 'r')
for line in f.readlines():
if line.startswith('#') or line.lstrip().rstrip() == '':
continue
self.knownFailures.add(line.rstrip('\r\n'))
f.close()
def isKnownFailure(self, testpath):
# Assumes abs path for testpath
normalpath = pathNormalizer.normalize(testpath)
if normalpath[normalpath.index('/test/')+1:] in self.knownFailures:
return True
else:
return False
def shouldSkipTest(self, testPattern, testPath):
if testPattern:
# we support *.js and * .pde tests, as well as passing dirs.
# assume a dir name doesn't end with .js or .pde
if testPattern.endswith('.js') or testPattern.endswith('.pde'):
if testPath.endswith(testPattern):
return False
else:
# assume this is a dir, so just look for the pattern in the path
if testPath.find(testPattern) > -1:
return False
return True
def runParserTests(self, jsshell, testPattern=None, summaryOnly=False, processingPath=None):
"""Get all .pjs in test/parser/ files as JSON, and run through the test harness, faking a DOM"""
jsshell = os.path.abspath(jsshell)
parsertestdir = os.path.join(self.toolsdir, '..', 'test', 'parser')
processing_js = None
if processingPath:
processing_js =os.path.join(self.toolsdir, '..', processingPath.replace('/', os.sep))
else:
processing_js = os.path.join(self.toolsdir, '..', 'processing.js')
for root, dirs, filenames in os.walk(parsertestdir):
for filename in filenames:
sys.stdout.flush()
sys.stderr.flush()
# If a single test file name is given, only test that file
fullpath = os.path.abspath(os.path.join(root, filename))
if testPattern and self.shouldSkipTest(testPattern, fullpath):
continue
if filename.endswith('.pde'):
tmpFile = jsshellhelper.createEscapedFile(fullpath)
one_test = 'var parserTest = {name:"' + fullpath + '", body: __unescape_string()};\n'
testCmd = [jsshell,
'-f', os.path.join(self.toolsdir, 'fake-dom.js'),
'-f', processing_js, #os.path.join(self.toolsdir, '..', 'processing.js'),
'-f', os.path.join(self.toolsdir, 'cleaner.js'),
'-f', tmpFile,
'-e', one_test,
'-f', os.path.join(self.toolsdir, 'test-harness.js')]
proc = Popen(testCmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if stderr:
# we failed to parse, and died in the js shell
if summaryOnly:
if self.isKnownFailure(fullpath):
sys.stdout.write('K')
self.testsFailedKnown += 1
else:
sys.stdout.write('F')
self.testsFailed += 1
sys.stdout.flush()
else:
if self.isKnownFailure(fullpath):
print "KNOWN-FAILURE: " + fullpath
self.testsFailedKnown += 1
else:
print "TEST-FAILED: " + fullpath
print stderr
self.testsFailed += 1
elif stdout:
# TEST-SUMMARY: passed/failed
m = re.search('^TEST-SUMMARY: (\d+)/(\d+)', stdout, re.MULTILINE)
if m and m.group:
self.testsPassed += int(m.group(1))
if self.isKnownFailure(fullpath):
self.testsFailedKnown += int(m.group(2))
else:
self.testsFailed += int(m.group(2))
if int(m.group(2)) > 0:
if summaryOnly:
if self.isKnownFailure(fullpath):
sys.stdout.write('K')
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
if self.isKnownFailure(fullpath):
print "KNOWN-FAILURE: " + fullpath
else:
print "TEST-FAILED: " + fullpath
print re.sub("\n?TEST-SUMMARY: (\d+)\/(\d+)\n?", "", stdout)
print stderr
else:
if summaryOnly:
if self.isKnownFailure(fullpath):
# we should pass if we are expecting to fail!
sys.stdout.write('!')
else:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.isKnownFailure(fullpath):
# we shouldn't pass if we are expecting to fail!
print "TEST-FAILED (known failure passed!): " + fullpath
self.testsPassed -= 1
self.testsFailed += 1
else:
print "TEST-PASSED: " + fullpath
else:
# Shouldn't happen!
self.testsFailed += 1
if summaryOnly:
sys.stdout.write('F')
sys.stdout.flush()
else:
print "TEST-FAILED: " + fullpath + ". Test died:"
print stdout
jsshellhelper.cleanUp(tmpFile)
def runUnitTests(self, jsshell, testPattern=None, summaryOnly=False, processingPath=None):
"""Run all .js unit tests in test/unit through the test harness."""
# TODO: add support for doing .pjs unit tests.
unittestdir = os.path.join(self.toolsdir, '..', 'test', 'unit')
jsshell = os.path.abspath(jsshell)
processing_js = None
if processingPath:
processing_js =os.path.join(self.toolsdir, '..', processingPath.replace('/', os.sep))
else:
processing_js = os.path.join(self.toolsdir, '..', 'processing.js')
for root, dirs, filenames in os.walk(unittestdir):
for filename in filenames:
sys.stdout.flush()
sys.stderr.flush()
# If a single test file name is given, only test that file
fullpath = os.path.abspath(os.path.join(root, filename))
if testPattern and self.shouldSkipTest(testPattern, fullpath):
continue
tmpFile = None
testCmd = None
if filename.endswith('.js'):
# Read the test file so we can wrap it properly:
f = open(fullpath, 'r')
testFile = ''.join(f.readlines()).replace("'", "\'").replace('"', '\"');
f.close()
# We wrap all tests in a function so as to replace the context with the Processing context
wrapper = "function _testWrapper(ctx) { with (ctx) { %s \n _runTest(); }}\n" % testFile
testCmd = [jsshell, '-e', 'var _testName = "%s"; %s;' % (fullpath, wrapper),
'-f', os.path.join(self.toolsdir, 'fake-dom.js'),
'-f', processing_js, #os.path.join(self.toolsdir, '..', 'processing.js'),
'-f', os.path.join(self.toolsdir, 'test-harness.js')]
elif filename.endswith('.pde'):
tmpFile = jsshellhelper.createEscapedFile(fullpath)
testCmd = [jsshell,
'-f', os.path.join(self.toolsdir, 'fake-dom.js'),
'-f', processing_js, #os.path.join(self.toolsdir, '..', 'processing.js'),
'-f', os.path.join(self.toolsdir, 'test-harness-lib.js'),
'-f', os.path.join(self.toolsdir, 'cleaner.js'),
'-f', tmpFile,
'-e', 'eval(Processing(canvas, \'UnitTests();\' + __unescape_string() + \'_printTestSummary();\'));']
else:
continue
proc = Popen(testCmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if stdout:
# TEST-SUMMARY: passed/failed
m = re.search('^TEST-SUMMARY: (\d+)/(\d+)', stdout, re.MULTILINE)
if m and m.group:
self.testsPassed += int(m.group(1))
if self.isKnownFailure(fullpath):
self.testsFailedKnown += int(m.group(2))
else:
self.testsFailed += int(m.group(2))
if int(m.group(2)) > 0:
if summaryOnly:
if self.isKnownFailure(fullpath):
sys.stdout.write('K')
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
if self.isKnownFailure(fullpath):
print "KNOWN-FAILURE: " + fullpath
else:
print "TEST-FAILED: " + fullpath
print re.sub("\n?TEST-SUMMARY: (\d+)\/(\d+)\n?", "", stdout)
print stderr
else:
if summaryOnly:
if self.isKnownFailure(fullpath):
# we should pass if we are expecting to fail!
sys.stdout.write('!')
else:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.isKnownFailure(fullpath):
# we shouldn't pass if we are expecting to fail!
print "TEST-FAILED (known failure passed!): " + fullpath
self.testsPassed -= 1
self.testsFailed += 1
else:
print "TEST-PASSED: " + fullpath
else:
# Shouldn't happen!
self.testsFailed += 1
if summaryOnly:
sys.stdout.write('F')
sys.stdout.flush()
else:
print "TEST-FAILED: " + fullpath + ". Test died:"
print stdout
elif stderr:
# Shouldn't happen!
self.testsFailed += 1
if summaryOnly:
sys.stdout.write('F')
sys.stdout.flush()
else:
print "TEST-FAILED: " + fullpath + ". Test exited early:"
print stderr
if tmpFile:
jsshellhelper.cleanUp(tmpFile)
class DefaultPathNormalizer:
def normalize(self, path):
return path
class WinPathNormalizer:
def normalize(self, path):
backslsh = path.replace('\\', '/')
if backslsh[1] == ':':
return '/' + backslsh[0] + backslsh[2:]
else:
return backslsh
def createPathNormalizer():
if os.sep == '\\':
return WinPathNormalizer()
else:
return DefaultPathNormalizer()
# normalizes path to standard form: /dir/subdir/file.ext
pathNormalizer = createPathNormalizer()
def main():
parser = OptionParser()
parser.add_option("-s", "--summary-only",
action="store_true", dest="summaryOnly", default=False,
help="only print test summary info.")
parser.add_option("-p", "--parser-only",
action="store_true", dest="parserOnly", default=False,
help="only run parser tests.")
parser.add_option("-u", "--unit-only",
action="store_true", dest="unitOnly", default=False,
help="only run unit tests.")
parser.add_option("-t", "--single-test",
type="string", dest="testPattern", default=None,
help="single test filename or dir to be tested")
parser.add_option("-l", "--library",
type="string", dest="processingPath", default=None,
help="use a different processing.js library")
options, args = parser.parse_args()
if len(args) < 1:
print >>sys.stderr, """Usage: %s <path to jsshell>
or: %s --test singletest.pjs <path to jsshell>""" % (sys.argv[0], sys.argv[0])
sys.exit(1)
ptests = ProcessingTests()
if options.parserOnly:
ptests.runParserTests(args[0], testPattern=options.testPattern, summaryOnly=options.summaryOnly, processingPath=options.processingPath)
elif options.unitOnly:
ptests.runUnitTests(args[0], testPattern=options.testPattern, summaryOnly=options.summaryOnly, processingPath=options.processingPath)
else:
ptests.runParserTests(args[0], testPattern=options.testPattern, summaryOnly=options.summaryOnly, processingPath=options.processingPath)
ptests.runUnitTests(args[0], testPattern=options.testPattern, summaryOnly=options.summaryOnly, processingPath=options.processingPath)
print "\nTEST SUMMARY: %s passed, %s failed (%s known), %s total" % (ptests.testsPassed,
ptests.testsFailed,
ptests.testsFailedKnown,
(ptests.testsPassed + ptests.testsFailed + ptests.testsFailedKnown))
if __name__ == '__main__':
main()
|
|
#
# Copyright 2014 Cisco Systems,Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context
from oslotest import base
from oslotest import mockpatch
from ceilometer.agent import manager
from ceilometer.agent import plugin_base
from ceilometer.network.services import discovery
from ceilometer.network.services import lbaas
class _BaseTestLBPollster(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(_BaseTestLBPollster, self).setUp()
self.addCleanup(mock.patch.stopall)
self.context = context.get_admin_context()
self.manager = manager.AgentManager()
plugin_base._get_keystone = mock.Mock()
plugin_base._get_keystone.service_catalog.get_endpoints = (
mock.MagicMock(return_value={'network': mock.ANY}))
class TestLBPoolPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBPoolPollster, self).setUp()
self.pollster = lbaas.LBPoolPollster()
fake_pools = self.fake_get_pools()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'INACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb02',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'PENDING_CREATE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'UNKNOWN',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'error',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb_error',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
def test_pool_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(3, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_pools()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_pool_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(set(['network.services.lb.pool']),
set([s.name for s in samples]))
def test_pool_discovery(self):
discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager)
self.assertEqual(4, len(discovered_pools))
for pool in self.fake_get_pools():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_pools)
else:
self.assertIn(pool, discovered_pools)
class TestLBVipPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBVipPollster, self).setUp()
self.pollster = lbaas.LBVipPollster()
fake_vips = self.fake_get_vips()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'vip_get_all',
return_value=fake_vips))
@staticmethod
def fake_get_vips():
return [{'status': 'ACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.2',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip'},
{'status': 'INACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.3',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip02'},
{'status': 'PENDING_CREATE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.4',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'UNKNOWN',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'error',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip_error'},
]
def test_vip_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(3, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_vips()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_vip_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(set(['network.services.lb.vip']),
set([s.name for s in samples]))
def test_vip_discovery(self):
discovered_vips = discovery.LBVipsDiscovery().discover(self.manager)
self.assertEqual(4, len(discovered_vips))
for pool in self.fake_get_vips():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_vips)
else:
self.assertIn(pool, discovered_vips)
class TestLBMemberPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBMemberPollster, self).setUp()
self.pollster = lbaas.LBMemberPollster()
fake_members = self.fake_get_members()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'member_get_all',
return_value=fake_members))
@staticmethod
def fake_get_members():
return [{'status': 'ACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.3',
'status_description': None,
'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'INACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.5',
'status_description': None,
'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'PENDING_CREATE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'UNKNOWN',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'error',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(3, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_members()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(set(['network.services.lb.member']),
set([s.name for s in samples]))
def test_members_discovery(self):
discovered_members = discovery.LBMembersDiscovery().discover(
self.manager)
self.assertEqual(4, len(discovered_members))
for pool in self.fake_get_members():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_members)
else:
self.assertIn(pool, discovered_members)
class TestLBHealthProbePollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBHealthProbePollster, self).setUp()
self.pollster = lbaas.LBHealthMonitorPollster()
fake_health_monitor = self.fake_get_health_monitor()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'health_monitor_get_all',
return_value=fake_health_monitor))
@staticmethod
def fake_get_health_monitor():
return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365',
'admin_state_up': True,
'tenant_id': "d5d2817dae6b42159be9b665b64beb0e",
'delay': 2,
'max_retries': 5,
'timeout': 5,
'pools': [],
'type': 'PING',
}]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(1, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_health_monitor()[0][field],
samples[0].resource_metadata[field])
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(set(['network.services.lb.health_monitor']),
set([s.name for s in samples]))
def test_probes_discovery(self):
discovered_probes = discovery.LBHealthMonitorsDiscovery().discover(
self.manager)
self.assertEqual(discovered_probes, self.fake_get_health_monitor())
class TestLBStatsPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBStatsPollster, self).setUp()
fake_pool_stats = self.fake_pool_stats()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_stats',
return_value=fake_pool_stats))
fake_pools = self.fake_get_pools()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
@staticmethod
def fake_pool_stats():
return {'stats': {'active_connections': 2L,
'bytes_in': 1L,
'bytes_out': 3L,
'total_connections': 4L
}
}
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def _check_get_samples(self, factory, sample_name, expected_volume,
expected_type):
pollster = factory()
cache = {}
samples = list(pollster.get_samples(self.manager, cache,
self.fake_get_pools()))
self.assertEqual(1, len(samples))
self.assertIsNotNone(samples)
self.assertIn('lbstats', cache)
self.assertEqual(set([sample_name]), set([s.name for s in samples]))
match = [s for s in samples if s.name == sample_name]
self.assertEqual(1, len(match), 'missing counter %s' % sample_name)
self.assertEqual(expected_volume, match[0].volume)
self.assertEqual(expected_type, match[0].type)
def test_lb_total_connections(self):
self._check_get_samples(lbaas.LBTotalConnectionsPollster,
'network.services.lb.total.connections',
4L, 'cumulative')
def test_lb_active_connections(self):
self._check_get_samples(lbaas.LBActiveConnectionsPollster,
'network.services.lb.active.connections',
2L, 'gauge')
def test_lb_incoming_bytes(self):
self._check_get_samples(lbaas.LBBytesInPollster,
'network.services.lb.incoming.bytes',
1L, 'cumulative')
def test_lb_outgoing_bytes(self):
self._check_get_samples(lbaas.LBBytesOutPollster,
'network.services.lb.outgoing.bytes',
3L, 'cumulative')
|
|
# -*- coding: utf-8 -*-
"""
@name: new_program.py
@vers: 0.1.0
@author: dthor
@created: Tue Dec 30 12:46:46 2014
@descr: A new file
Usage:
new_program.py
Options:
-h --help # Show this screen.
--version # Show version.
"""
from __future__ import print_function, division
from __future__ import absolute_import
#from __future__ import unicode_literals
#from docopt import docopt
import wx
from configobj import ConfigObj
import os
__author__ = "Douglas Thor"
__version__ = "v0.1.0"
# Section, Item, Hovertext, Default Value
# Note that this is in display order
OPTIONS = (("Warning Limits", "High", "", 30),
("Warning Limits", "Low", "", 10),
("Warning Limits", "Email Address", "", ""),
("Critical Limits", "High", "", 50),
("Critical Limits", "Low", "", 40),
("Critical Limits", "Email Address", "", ""),
("Misc. Options", "Read Frequency (s)", "", 10),
("Misc. Options", "Data Path", "", ""),
("Misc. Options", "Display Length (hr)", "", 30),
("Misc. Options", "Moving Average Length (pts)", "", 60),
("Misc. Options", "Email Interval (s)", "", 3600),
("Misc. Options", "Calculate Maximum over X points", "", 60),
)
class MonitorPreferences(wx.Dialog):
""" The main panel for the Monitor Preferences """
def __init__(self,
parent
):
wx.Dialog.__init__(self,
parent,
wx.ID_ANY,
title="Preferences",
size=(500, 500),
)
self.parent = parent
self.controls = {}
self.init_ui()
def init_ui(self):
""" Create the UI """
self.panel = wx.Panel(self)
self.vbox = wx.BoxSizer(wx.VERTICAL)
prev_sect_name = ""
for sect_name, item, hovertext, _ in OPTIONS:
# I don't know if I like this, but it works.
if prev_sect_name != sect_name:
self.controls[sect_name] = {}
prev_sect_name = sect_name
sbox = wx.StaticBox(self, wx.ID_ANY, sect_name)
svbox = wx.StaticBoxSizer(sbox, wx.VERTICAL)
self.vbox.Add(svbox)
control = PreferencesItem(self, wx.ID_ANY, item)
svbox.Add(control)
self.controls[sect_name][item] = control
self.read_config_file()
self.btn_box = wx.BoxSizer(wx.HORIZONTAL)
self.btn_apply = wx.Button(self, wx.ID_APPLY)
self.btn_ok = wx.Button(self, wx.ID_OK)
self.btn_cancel = wx.Button(self, wx.ID_CANCEL)
self.btn_box.Add((20, -1), 1, wx.EXPAND)
self.btn_box.Add(self.btn_apply, 0)
self.btn_box.Add(self.btn_ok, 0)
self.btn_box.Add(self.btn_cancel, 0)
self.vbox.Add(self.btn_box)
self.SetSizer(self.vbox)
self._bind_events()
def _bind_events(self):
""" Bind various events """
# Buttons
# Since I use the wx.ID_OK (and other) ids for the buttons, they
# are automatically bound to various events. All I need to do is
# override those events. However, it seems that both ID_OK and
# ID_CANCEL just call the EVT_CLOSE event, so I guess I need to
# override them anyway. I also don't know what ID_APPLY calls.
self.btn_apply.Bind(wx.EVT_BUTTON, self.on_apply)
self.btn_ok.Bind(wx.EVT_BUTTON, self.on_ok)
self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
def on_apply(self, event):
""" Actions to perform when the Apply button is pressed """
print("Apply pressed")
self.update_config_file("config.ini")
def on_ok(self, event):
""" Actions to perform when the OK button is pressed """
print("OK pressed")
self.update_config_file("config.ini")
self.on_close(event)
def on_cancel(self, event):
""" Actions to perform when the Cancel button is pressed """
print("Cancel pressed")
self.on_close(event)
def on_close(self, event):
""" Close the window """
print("close called")
self.Destroy()
def read_config_file(self, fname=None):
if fname is None:
fname = "config.ini"
if not os.path.exists(fname):
# then we create the config file, using our parameters
print("Config file not found")
self.update_config_file(fname)
else:
# read from the config file
print("Config file found!")
config = ConfigObj(fname)
for sect_name, item, hovertext, _ in OPTIONS:
val = config[sect_name][item]
self.controls[sect_name][item].ctrl.SetValue(val)
def create_config_file(self, fname):
""" Creates the configuration file """
print("Creating config file")
config = ConfigObj()
config.filename = fname
prev_sect_name = ""
for sect_name, item, _, default in OPTIONS:
# I don't know if I like this, but it works.
if prev_sect_name != sect_name:
config[sect_name] = {}
prev_sect_name = sect_name
config[sect_name][item] = default
self.controls[sect_name][item].ctrl.SetValue(str(default))
config.write()
def update_config_file(self, fname):
""" Update the configuration file with current control values """
print("Updating config file")
config = ConfigObj()
config.filename = fname
prev_sect_name = ""
for sect_name, item, _, _ in OPTIONS:
# I don't know if I like this, but it works.
if prev_sect_name != sect_name:
config[sect_name] = {}
prev_sect_name = sect_name
val = self.controls[sect_name][item].ctrl.GetValue()
config[sect_name][item] = val
config.write()
class PreferencesItem(wx.Panel):
""" A Preferences Item """
def __init__(self, parent, wx_id, label, hovertext=""):
wx.Panel.__init__(self, parent)
self.label = label
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add((30, -1), 0, wx.EXPAND)
self.lbl = wx.StaticText(self,
label=self.label,
size=(220, -1),
)
self.ctrl = wx.TextCtrl(self, wx.ID_ANY, "0", size=(300, -1))
self.hbox.Add(self.lbl, 0, wx.EXPAND)
self.hbox.Add(self.ctrl, 0, wx.EXPAND)
self.SetSizer(self.hbox)
def main():
""" Main Code """
# docopt(__doc__, version=__version__)
class ExampleFrame(wx.Frame):
""" Base Frame """
def __init__(self):
wx.Frame.__init__(self,
None, # Window Parent
wx.ID_ANY, # id
)
self.Bind(wx.EVT_CLOSE, self.OnQuit)
# Create the wafer map
pref_dialog = MonitorPreferences(self)
pref_dialog.ShowModal()
# pref_dialog.Destroy()
def OnQuit(self, event):
self.Destroy()
app = wx.App()
frame = ExampleFrame()
# frame.Show()
frame.Destroy()
app.MainLoop()
if __name__ == "__main__":
main()
|
|
"""Tests for classes defining properties of ground domains, e.g. ZZ, QQ, ZZ[x] ... """
from sympy import S, sqrt, sin, oo, nan, Poly, Integer, Rational
from sympy.abc import x, y, z
from sympy.polys.domains import (ZZ, QQ, RR, CC, FF, GF,
PolynomialRing, FractionField, EX)
from sympy.polys.rings import ring
from sympy.polys.fields import field
from sympy.polys.domains.modularinteger import ModularIntegerFactory
from sympy.polys.polyerrors import (
UnificationFailed,
GeneratorsNeeded,
GeneratorsError,
CoercionFailed,
NotInvertible,
DomainError)
from sympy.utilities.pytest import raises, XFAIL
ALG = QQ.algebraic_field(sqrt(2), sqrt(3))
def unify(K0, K1):
return K0.unify(K1)
def test_Domain_unify():
F3 = GF(3)
assert unify(F3, F3) == F3
assert unify(F3, ZZ) == ZZ
assert unify(F3, QQ) == QQ
assert unify(F3, ALG) == ALG
assert unify(F3, RR) == RR
assert unify(F3, CC) == CC
assert unify(F3, ZZ[x]) == ZZ[x]
assert unify(F3, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(F3, EX) == EX
assert unify(ZZ, F3) == ZZ
assert unify(ZZ, ZZ) == ZZ
assert unify(ZZ, QQ) == QQ
assert unify(ZZ, ALG) == ALG
assert unify(ZZ, RR) == RR
assert unify(ZZ, CC) == CC
assert unify(ZZ, ZZ[x]) == ZZ[x]
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ, EX) == EX
assert unify(QQ, F3) == QQ
assert unify(QQ, ZZ) == QQ
assert unify(QQ, QQ) == QQ
assert unify(QQ, ALG) == ALG
assert unify(QQ, RR) == RR
assert unify(QQ, CC) == CC
assert unify(QQ, ZZ[x]) == QQ[x]
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, EX) == EX
assert unify(RR, F3) == RR
assert unify(RR, ZZ) == RR
assert unify(RR, QQ) == RR
assert unify(RR, ALG) == RR
assert unify(RR, RR) == RR
assert unify(RR, CC) == CC
assert unify(RR, ZZ[x]) == RR[x]
assert unify(RR, ZZ.frac_field(x)) == RR.frac_field(x)
assert unify(RR, EX) == EX
assert unify(CC, F3) == CC
assert unify(CC, ZZ) == CC
assert unify(CC, QQ) == CC
assert unify(CC, ALG) == CC
assert unify(CC, RR) == CC
assert unify(CC, CC) == CC
assert unify(CC, ZZ[x]) == CC[x]
assert unify(CC, ZZ.frac_field(x)) == CC.frac_field(x)
assert unify(CC, EX) == EX
assert unify(ZZ[x], F3) == ZZ[x]
assert unify(ZZ[x], ZZ) == ZZ[x]
assert unify(ZZ[x], QQ) == QQ[x]
assert unify(ZZ[x], ALG) == ALG[x]
assert unify(ZZ[x], RR) == RR[x]
assert unify(ZZ[x], CC) == CC[x]
assert unify(ZZ[x], ZZ[x]) == ZZ[x]
assert unify(ZZ[x], ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ[x], EX) == EX
assert unify(ZZ.frac_field(x), F3) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x), ALG) == ALG.frac_field(x)
assert unify(ZZ.frac_field(x), RR) == RR.frac_field(x)
assert unify(ZZ.frac_field(x), CC) == CC.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ[x]) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), EX) == EX
assert unify(EX, F3) == EX
assert unify(EX, ZZ) == EX
assert unify(EX, QQ) == EX
assert unify(EX, ALG) == EX
assert unify(EX, RR) == EX
assert unify(EX, CC) == EX
assert unify(EX, ZZ[x]) == EX
assert unify(EX, ZZ.frac_field(x)) == EX
assert unify(EX, EX) == EX
def test_Domain_unify_composite():
assert unify(ZZ.poly_ring(x), ZZ) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(ZZ, ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(QQ, ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(ZZ, ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(QQ, ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(ZZ, ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ, ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x, z)) == ZZ.poly_ring(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.poly_ring(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.poly_ring(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x, z)) == QQ.frac_field(x, y, z)
def test_Domain_unify_algebraic():
sqrt5 = QQ.algebraic_field(sqrt(5))
sqrt7 = QQ.algebraic_field(sqrt(7))
sqrt57 = QQ.algebraic_field(sqrt(5), sqrt(7))
assert sqrt5.unify(sqrt7) == sqrt57
assert sqrt5.unify(sqrt5[x, y]) == sqrt5[x, y]
assert sqrt5[x, y].unify(sqrt5) == sqrt5[x, y]
assert sqrt5.unify(sqrt5.frac_field(x, y)) == sqrt5.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt5) == sqrt5.frac_field(x, y)
assert sqrt5.unify(sqrt7[x, y]) == sqrt57[x, y]
assert sqrt5[x, y].unify(sqrt7) == sqrt57[x, y]
assert sqrt5.unify(sqrt7.frac_field(x, y)) == sqrt57.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt7) == sqrt57.frac_field(x, y)
def test_Domain_unify_with_symbols():
raises(UnificationFailed, lambda: ZZ[x, y].unify_with_symbols(ZZ, (y, z)))
raises(UnificationFailed, lambda: ZZ.unify_with_symbols(ZZ[x, y], (y, z)))
def test_Domain__contains__():
assert (0 in EX) is True
assert (0 in ZZ) is True
assert (0 in QQ) is True
assert (0 in RR) is True
assert (0 in CC) is True
assert (0 in ALG) is True
assert (0 in ZZ[x, y]) is True
assert (0 in QQ[x, y]) is True
assert (0 in RR[x, y]) is True
assert (-7 in EX) is True
assert (-7 in ZZ) is True
assert (-7 in QQ) is True
assert (-7 in RR) is True
assert (-7 in CC) is True
assert (-7 in ALG) is True
assert (-7 in ZZ[x, y]) is True
assert (-7 in QQ[x, y]) is True
assert (-7 in RR[x, y]) is True
assert (17 in EX) is True
assert (17 in ZZ) is True
assert (17 in QQ) is True
assert (17 in RR) is True
assert (17 in CC) is True
assert (17 in ALG) is True
assert (17 in ZZ[x, y]) is True
assert (17 in QQ[x, y]) is True
assert (17 in RR[x, y]) is True
assert (-S(1)/7 in EX) is True
assert (-S(1)/7 in ZZ) is False
assert (-S(1)/7 in QQ) is True
assert (-S(1)/7 in RR) is True
assert (-S(1)/7 in CC) is True
assert (-S(1)/7 in ALG) is True
assert (-S(1)/7 in ZZ[x, y]) is False
assert (-S(1)/7 in QQ[x, y]) is True
assert (-S(1)/7 in RR[x, y]) is True
assert (S(3)/5 in EX) is True
assert (S(3)/5 in ZZ) is False
assert (S(3)/5 in QQ) is True
assert (S(3)/5 in RR) is True
assert (S(3)/5 in CC) is True
assert (S(3)/5 in ALG) is True
assert (S(3)/5 in ZZ[x, y]) is False
assert (S(3)/5 in QQ[x, y]) is True
assert (S(3)/5 in RR[x, y]) is True
assert (3.0 in EX) is True
assert (3.0 in ZZ) is True
assert (3.0 in QQ) is True
assert (3.0 in RR) is True
assert (3.0 in CC) is True
assert (3.0 in ALG) is True
assert (3.0 in ZZ[x, y]) is True
assert (3.0 in QQ[x, y]) is True
assert (3.0 in RR[x, y]) is True
assert (3.14 in EX) is True
assert (3.14 in ZZ) is False
assert (3.14 in QQ) is True
assert (3.14 in RR) is True
assert (3.14 in CC) is True
assert (3.14 in ALG) is True
assert (3.14 in ZZ[x, y]) is False
assert (3.14 in QQ[x, y]) is True
assert (3.14 in RR[x, y]) is True
assert (oo in EX) is True
assert (oo in ZZ) is False
assert (oo in QQ) is False
assert (oo in RR) is True
assert (oo in CC) is True
assert (oo in ALG) is False
assert (oo in ZZ[x, y]) is False
assert (oo in QQ[x, y]) is False
assert (oo in RR[x, y]) is True
assert (-oo in EX) is True
assert (-oo in ZZ) is False
assert (-oo in QQ) is False
assert (-oo in RR) is True
assert (-oo in CC) is True
assert (-oo in ALG) is False
assert (-oo in ZZ[x, y]) is False
assert (-oo in QQ[x, y]) is False
assert (-oo in RR[x, y]) is True
assert (sqrt(7) in EX) is True
assert (sqrt(7) in ZZ) is False
assert (sqrt(7) in QQ) is False
assert (sqrt(7) in RR) is True
assert (sqrt(7) in CC) is True
assert (sqrt(7) in ALG) is False
assert (sqrt(7) in ZZ[x, y]) is False
assert (sqrt(7) in QQ[x, y]) is False
assert (sqrt(7) in RR[x, y]) is True
assert (2*sqrt(3) + 1 in EX) is True
assert (2*sqrt(3) + 1 in ZZ) is False
assert (2*sqrt(3) + 1 in QQ) is False
assert (2*sqrt(3) + 1 in RR) is True
assert (2*sqrt(3) + 1 in CC) is True
assert (2*sqrt(3) + 1 in ALG) is True
assert (2*sqrt(3) + 1 in ZZ[x, y]) is False
assert (2*sqrt(3) + 1 in QQ[x, y]) is False
assert (2*sqrt(3) + 1 in RR[x, y]) is True
assert (sin(1) in EX) is True
assert (sin(1) in ZZ) is False
assert (sin(1) in QQ) is False
assert (sin(1) in RR) is True
assert (sin(1) in CC) is True
assert (sin(1) in ALG) is False
assert (sin(1) in ZZ[x, y]) is False
assert (sin(1) in QQ[x, y]) is False
assert (sin(1) in RR[x, y]) is True
assert (x**2 + 1 in EX) is True
assert (x**2 + 1 in ZZ) is False
assert (x**2 + 1 in QQ) is False
assert (x**2 + 1 in RR) is False
assert (x**2 + 1 in CC) is False
assert (x**2 + 1 in ALG) is False
assert (x**2 + 1 in ZZ[x]) is True
assert (x**2 + 1 in QQ[x]) is True
assert (x**2 + 1 in RR[x]) is True
assert (x**2 + 1 in ZZ[x, y]) is True
assert (x**2 + 1 in QQ[x, y]) is True
assert (x**2 + 1 in RR[x, y]) is True
assert (x**2 + y**2 in EX) is True
assert (x**2 + y**2 in ZZ) is False
assert (x**2 + y**2 in QQ) is False
assert (x**2 + y**2 in RR) is False
assert (x**2 + y**2 in CC) is False
assert (x**2 + y**2 in ALG) is False
assert (x**2 + y**2 in ZZ[x]) is False
assert (x**2 + y**2 in QQ[x]) is False
assert (x**2 + y**2 in RR[x]) is False
assert (x**2 + y**2 in ZZ[x, y]) is True
assert (x**2 + y**2 in QQ[x, y]) is True
assert (x**2 + y**2 in RR[x, y]) is True
assert (S(3)/2*x/(y + 1) - z in QQ[x, y, z]) is False
def test_Domain_get_ring():
assert ZZ.has_assoc_Ring is True
assert QQ.has_assoc_Ring is True
assert ZZ[x].has_assoc_Ring is True
assert QQ[x].has_assoc_Ring is True
assert ZZ[x, y].has_assoc_Ring is True
assert QQ[x, y].has_assoc_Ring is True
assert ZZ.frac_field(x).has_assoc_Ring is True
assert QQ.frac_field(x).has_assoc_Ring is True
assert ZZ.frac_field(x, y).has_assoc_Ring is True
assert QQ.frac_field(x, y).has_assoc_Ring is True
assert EX.has_assoc_Ring is False
assert RR.has_assoc_Ring is False
assert ALG.has_assoc_Ring is False
assert ZZ.get_ring() == ZZ
assert QQ.get_ring() == ZZ
assert ZZ[x].get_ring() == ZZ[x]
assert QQ[x].get_ring() == QQ[x]
assert ZZ[x, y].get_ring() == ZZ[x, y]
assert QQ[x, y].get_ring() == QQ[x, y]
assert ZZ.frac_field(x).get_ring() == ZZ[x]
assert QQ.frac_field(x).get_ring() == QQ[x]
assert ZZ.frac_field(x, y).get_ring() == ZZ[x, y]
assert QQ.frac_field(x, y).get_ring() == QQ[x, y]
assert EX.get_ring() == EX
raises(DomainError, lambda: RR.get_ring())
raises(DomainError, lambda: ALG.get_ring())
def test_Domain_get_field():
assert EX.has_assoc_Field is True
assert ZZ.has_assoc_Field is True
assert QQ.has_assoc_Field is True
assert RR.has_assoc_Field is True
assert ALG.has_assoc_Field is True
assert ZZ[x].has_assoc_Field is True
assert QQ[x].has_assoc_Field is True
assert ZZ[x, y].has_assoc_Field is True
assert QQ[x, y].has_assoc_Field is True
assert EX.get_field() == EX
assert ZZ.get_field() == QQ
assert QQ.get_field() == QQ
assert RR.get_field() == RR
assert ALG.get_field() == ALG
assert ZZ[x].get_field() == ZZ.frac_field(x)
assert QQ[x].get_field() == QQ.frac_field(x)
assert ZZ[x, y].get_field() == ZZ.frac_field(x, y)
assert QQ[x, y].get_field() == QQ.frac_field(x, y)
def test_Domain_get_exact():
assert EX.get_exact() == EX
assert ZZ.get_exact() == ZZ
assert QQ.get_exact() == QQ
assert RR.get_exact() == QQ
assert ALG.get_exact() == ALG
assert ZZ[x].get_exact() == ZZ[x]
assert QQ[x].get_exact() == QQ[x]
assert ZZ[x, y].get_exact() == ZZ[x, y]
assert QQ[x, y].get_exact() == QQ[x, y]
assert ZZ.frac_field(x).get_exact() == ZZ.frac_field(x)
assert QQ.frac_field(x).get_exact() == QQ.frac_field(x)
assert ZZ.frac_field(x, y).get_exact() == ZZ.frac_field(x, y)
assert QQ.frac_field(x, y).get_exact() == QQ.frac_field(x, y)
def test_Domain_convert():
assert QQ.convert(10e-52) == QQ(1684996666696915, 1684996666696914987166688442938726917102321526408785780068975640576)
R, x = ring("x", ZZ)
assert ZZ.convert(x - x) == 0
assert ZZ.convert(x - x, R.to_domain()) == 0
def test_PolynomialRing__init():
raises(GeneratorsNeeded, lambda: ZZ.poly_ring())
def test_FractionField__init():
raises(GeneratorsNeeded, lambda: ZZ.frac_field())
def test_inject():
assert ZZ.inject(x, y, z) == ZZ[x, y, z]
assert ZZ[x].inject(y, z) == ZZ[x, y, z]
assert ZZ.frac_field(x).inject(y, z) == ZZ.frac_field(x, y, z)
raises(GeneratorsError, lambda: ZZ[x].inject(x))
def test_Domain_map():
seq = ZZ.map([1, 2, 3, 4])
assert all(ZZ.of_type(elt) for elt in seq)
seq = ZZ.map([[1, 2, 3, 4]])
assert all(ZZ.of_type(elt) for elt in seq[0]) and len(seq) == 1
def test_Domain___eq__():
assert (ZZ[x, y] == ZZ[x, y]) is True
assert (QQ[x, y] == QQ[x, y]) is True
assert (ZZ[x, y] == QQ[x, y]) is False
assert (QQ[x, y] == ZZ[x, y]) is False
assert (ZZ.frac_field(x, y) == ZZ.frac_field(x, y)) is True
assert (QQ.frac_field(x, y) == QQ.frac_field(x, y)) is True
assert (ZZ.frac_field(x, y) == QQ.frac_field(x, y)) is False
assert (QQ.frac_field(x, y) == ZZ.frac_field(x, y)) is False
def test_Domain__algebraic_field():
alg = ZZ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = QQ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = alg.algebraic_field(sqrt(3))
assert alg.ext.minpoly == Poly(x**4 - 10*x**2 + 1)
assert alg.dom == QQ
def test_PolynomialRing_from_FractionField():
F, x,y = field("x,y", ZZ)
R, X,Y = ring("x,y", ZZ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
F, x,y = field("x,y", QQ)
R, X,Y = ring("x,y", QQ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
def test_FractionField_from_PolynomialRing():
R, x,y = ring("x,y", QQ)
F, X,Y = field("x,y", ZZ)
f = 3*x**2 + 5*y**2
g = x**2/3 + y**2/5
assert F.to_domain().from_PolynomialRing(f, R.to_domain()) == 3*X**2 + 5*Y**2
assert F.to_domain().from_PolynomialRing(g, R.to_domain()) == (5*X**2 + 3*Y**2)/15
def test_FF_of_type():
assert FF(3).of_type(FF(3)(1)) is True
assert FF(5).of_type(FF(5)(3)) is True
assert FF(5).of_type(FF(7)(3)) is False
def test___eq__():
assert not QQ[x] == ZZ[x]
assert not QQ.frac_field(x) == ZZ.frac_field(x)
def test_RealField_from_sympy():
assert RR.convert(S(0)) == RR.dtype(0)
assert RR.convert(S(0.0)) == RR.dtype(0.0)
assert RR.convert(S(1)) == RR.dtype(1)
assert RR.convert(S(1.0)) == RR.dtype(1.0)
assert RR.convert(sin(1)) == RR.dtype(sin(1).evalf())
assert RR.convert(oo) == RR("+inf")
assert RR.convert(-oo) == RR("-inf")
raises(CoercionFailed, lambda: RR.convert(x))
def test_ModularInteger():
F3 = FF(3)
a = F3(0)
assert isinstance(a, F3.dtype) and a == 0
a = F3(1)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)
assert isinstance(a, F3.dtype) and a == 2
a = F3(3)
assert isinstance(a, F3.dtype) and a == 0
a = F3(4)
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(0))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(1))
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(2))
assert isinstance(a, F3.dtype) and a == 2
a = F3(F3(3))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(4))
assert isinstance(a, F3.dtype) and a == 1
a = -F3(1)
assert isinstance(a, F3.dtype) and a == 2
a = -F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2 + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 3 - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 1 % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**0
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**1
assert isinstance(a, F3.dtype) and a == 2
a = F3(2)**2
assert isinstance(a, F3.dtype) and a == 1
assert bool(F3(3)) is False
assert bool(F3(4)) is True
F5 = FF(5)
a = F5(1)**(-1)
assert isinstance(a, F5.dtype) and a == 1
a = F5(2)**(-1)
assert isinstance(a, F5.dtype) and a == 3
a = F5(3)**(-1)
assert isinstance(a, F5.dtype) and a == 2
a = F5(4)**(-1)
assert isinstance(a, F5.dtype) and a == 4
assert (F5(1) < F5(2)) is True
assert (F5(1) <= F5(2)) is True
assert (F5(1) > F5(2)) is False
assert (F5(1) >= F5(2)) is False
assert (F5(3) < F5(2)) is False
assert (F5(3) <= F5(2)) is False
assert (F5(3) > F5(2)) is True
assert (F5(3) >= F5(2)) is True
assert (F5(1) < F5(7)) is True
assert (F5(1) <= F5(7)) is True
assert (F5(1) > F5(7)) is False
assert (F5(1) >= F5(7)) is False
assert (F5(3) < F5(7)) is False
assert (F5(3) <= F5(7)) is False
assert (F5(3) > F5(7)) is True
assert (F5(3) >= F5(7)) is True
assert (F5(1) < 2) is True
assert (F5(1) <= 2) is True
assert (F5(1) > 2) is False
assert (F5(1) >= 2) is False
assert (F5(3) < 2) is False
assert (F5(3) <= 2) is False
assert (F5(3) > 2) is True
assert (F5(3) >= 2) is True
assert (F5(1) < 7) is True
assert (F5(1) <= 7) is True
assert (F5(1) > 7) is False
assert (F5(1) >= 7) is False
assert (F5(3) < 7) is False
assert (F5(3) <= 7) is False
assert (F5(3) > 7) is True
assert (F5(3) >= 7) is True
raises(NotInvertible, lambda: F5(0)**(-1))
raises(NotInvertible, lambda: F5(5)**(-1))
raises(ValueError, lambda: FF(0))
raises(ValueError, lambda: FF(2.1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.