text
stringlengths 4
1.02M
| meta
dict |
---|---|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from org.slf4j import LoggerFactory
from com.ziclix.python.sql import zxJDBC
import sys, os, re, json, csv
import datetime
import SchemaUrlHelper
from wherehows.common.writers import FileWriter
from wherehows.common.schemas import HiveDependencyInstanceRecord
from wherehows.common import Constant
class TableInfo:
""" Class to define the variable name """
table_name = 'name'
dataset_name = 'dataset_name'
native_name = 'native_name'
logical_name = 'logical_name'
version = 'version'
type = 'type'
serialization_format = 'serialization_format'
create_time = 'create_time'
schema_url = 'schema_url'
field_delimiter = 'field_delimiter'
db_id = 'DB_ID'
table_id = 'TBL_ID'
serde_id = 'SD_ID'
table_type = 'tbl_type'
location = 'location'
view_expended_text = 'view_expanded_text'
input_format = 'input_format'
output_format = 'output_format'
is_compressed = 'is_compressed'
is_storedassubdirectories = 'is_storedassubdirectories'
etl_source = 'etl_source'
field_list = 'fields'
schema_literal = 'schema_literal'
optional_prop = [create_time, serialization_format, field_delimiter, schema_url, db_id, table_id, serde_id,
table_type, location, view_expended_text, input_format, output_format, is_compressed,
is_storedassubdirectories, etl_source]
class HiveExtract:
"""
Extract hive metadata from hive metastore in mysql. store it in a json file
"""
conn_hms = None
db_dict = {} # name : index
table_dict = {} # fullname : index
dataset_dict = {} # name : index
instance_dict = {} # name : index
serde_param_columns = []
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
def get_table_info_from_v2(self, database_name, is_dali=False):
"""
get table, column info from table columns_v2
:param database_name:
:return: (0 DB_NAME, 1 TBL_NAME, 2 SERDE_FORMAT, 3 TBL_CREATE_TIME
4 DB_ID, 5 TBL_ID,6 SD_ID, 7 LOCATION, 8 VIEW_EXPANDED_TEXT, 9 TBL_TYPE, 10 VIEW_EXPENDED_TEXT, 11 INPUT_FORMAT,12 OUTPUT_FORMAT,
13IS_COMPRESSED, 14 IS_STOREDASSUBDIRECTORIES, 15 INTEGER_IDX, 16 COLUMN_NAME, 17 TYPE_NAME, 18 COMMENT)
"""
curs = self.conn_hms.cursor()
if is_dali:
tbl_info_sql = """select d.NAME DB_NAME, t.TBL_NAME TBL_NAME,
case when s.INPUT_FORMAT like '%.TextInput%' then 'Text'
when s.INPUT_FORMAT like '%.Avro%' then 'Avro'
when s.INPUT_FORMAT like '%.RCFile%' then 'RC'
when s.INPUT_FORMAT like '%.Orc%' then 'ORC'
when s.INPUT_FORMAT like '%.Sequence%' then 'Sequence'
when s.INPUT_FORMAT like '%.Parquet%' then 'Parquet'
else s.INPUT_FORMAT
end SerializationFormat,
t.CREATE_TIME TableCreateTime,
t.DB_ID, t.TBL_ID, s.SD_ID,
substr(s.LOCATION, length(substring_index(s.LOCATION, '/', 3))+1) Location,
t.TBL_TYPE, t.VIEW_EXPANDED_TEXT, s.INPUT_FORMAT, s.OUTPUT_FORMAT, s.IS_COMPRESSED, s.IS_STOREDASSUBDIRECTORIES,
c.INTEGER_IDX, c.COLUMN_NAME, c.TYPE_NAME, c.COMMENT,
case when t.TBL_NAME regexp '_[0-9]+_[0-9]+_[0-9]+$'
then concat(substring(t.TBL_NAME, 1, length(t.TBL_NAME) - length(substring_index(t.TBL_NAME, '_', -3)) - 1),'_{version}')
else t.TBL_NAME
end dataset_name,
case when t.TBL_NAME regexp '_[0-9]+_[0-9]+_[0-9]+$'
then replace(substring_index(t.TBL_NAME, '_', -3), '_', '.')
else 0
end version, 'Dalids' TYPE, 'View' storage_type, concat(d.NAME, '.', t.TBL_NAME) native_name,
case when t.TBL_NAME regexp '_[0-9]+_[0-9]+_[0-9]+$'
then substring(t.TBL_NAME, 1, length(t.TBL_NAME) - length(substring_index(t.TBL_NAME, '_', -3)) - 1)
else t.TBL_NAME
end logical_name, unix_timestamp(now()) created_time, concat('dalids:///', d.NAME, '/', t.TBL_NAME) dataset_urn
from TBLS t join DBS d on t.DB_ID=d.DB_ID
join SDS s on t.SD_ID = s.SD_ID
join COLUMNS_V2 c on s.CD_ID = c.CD_ID
where
d.NAME in ('{db_name}') and (d.NAME like '%\_mp' or d.NAME like '%\_mp\_versioned') and d.NAME not like 'dalitest%' and t.TBL_TYPE = 'VIRTUAL_VIEW'
order by DB_NAME, dataset_name, version DESC
""".format(version='{version}', db_name=database_name)
else:
tbl_info_sql = """select d.NAME DB_NAME, t.TBL_NAME TBL_NAME,
case when s.INPUT_FORMAT like '%.TextInput%' then 'Text'
when s.INPUT_FORMAT like '%.Avro%' then 'Avro'
when s.INPUT_FORMAT like '%.RCFile%' then 'RC'
when s.INPUT_FORMAT like '%.Orc%' then 'ORC'
when s.INPUT_FORMAT like '%.Sequence%' then 'Sequence'
when s.INPUT_FORMAT like '%.Parquet%' then 'Parquet'
else s.INPUT_FORMAT
end SerializationFormat,
t.CREATE_TIME TableCreateTime,
t.DB_ID, t.TBL_ID, s.SD_ID,
substr(s.LOCATION, length(substring_index(s.LOCATION, '/', 3))+1) Location,
t.TBL_TYPE, t.VIEW_EXPANDED_TEXT, s.INPUT_FORMAT, s.OUTPUT_FORMAT, s.IS_COMPRESSED, s.IS_STOREDASSUBDIRECTORIES,
c.INTEGER_IDX, c.COLUMN_NAME, c.TYPE_NAME, c.COMMENT, t.TBL_NAME dataset_name, 0 version, 'Hive' TYPE,
case when LOCATE('view', LOWER(t.TBL_TYPE)) > 0 then 'View'
when LOCATE('index', LOWER(t.TBL_TYPE)) > 0 then 'Index'
else 'Table'
end storage_type, concat(d.NAME, '.', t.TBL_NAME) native_name, t.TBL_NAME logical_name,
unix_timestamp(now()) created_time, concat('hive:///', d.NAME, '/', t.TBL_NAME) dataset_urn
from TBLS t join DBS d on t.DB_ID=d.DB_ID
join SDS s on t.SD_ID = s.SD_ID
join COLUMNS_V2 c on s.CD_ID = c.CD_ID
where
d.NAME in ('{db_name}') and not ((d.NAME like '%\_mp' or d.NAME like '%\_mp\_versioned') and t.TBL_TYPE = 'VIRTUAL_VIEW')
order by 1,2
""".format(db_name=database_name)
curs.execute(tbl_info_sql)
rows = curs.fetchall()
curs.close()
return rows
def get_table_info_from_serde_params(self, database_name):
"""
get table, column info {MANAGED and EXTERNAL} from avro schema parameter
:param database_name:
:return: (DatabaseName, TableName, SerializationFormat, Create_Time, SchemaLiteral, SchemaUrl, FieldDelimiter, DB_ID, TBL_ID, SD_ID
TBL_TYPE, INPUT_FORMAT, OUTPUT_FORMAT, IS_COMPRESSED, IS_STOREDASSUBDIRECTORIES, LOCATION, VIEW_EXPANDED_TEXT)
"""
curs_et = self.conn_hms.cursor()
tbl_info_sql = """select d.NAME DatabaseName, et.TBL_NAME TableName,
case when s.INPUT_FORMAT like '%.TextInput%' then 'Text'
when s.INPUT_FORMAT like '%.Avro%' then 'Avro'
when s.INPUT_FORMAT like '%.RCFile%' then 'RC'
when s.INPUT_FORMAT like '%.Orc%' then 'ORC'
when s.INPUT_FORMAT like '%.Sequence%' then 'Sequence'
when s.INPUT_FORMAT like '%.Parquet%' then 'Parquet'
else s.INPUT_FORMAT
end SerializationFormat,
et.CREATE_TIME TableCreateTime, et.DB_ID, et.TBL_ID, s.SD_ID,
substr(s.LOCATION, length(substring_index(s.LOCATION, '/', 3))+1) Location,
et.TBL_TYPE, et.VIEW_EXPANDED_TEXT, s.INPUT_FORMAT, s.OUTPUT_FORMAT, s.IS_COMPRESSED, s.IS_STOREDASSUBDIRECTORIES,
et.schema_literal SchemaLiteral, et.schema_url SchemaUrl, et.field_delim FieldDelimiter
from (
select t.DB_ID, t.TBL_ID, sp.SERDE_ID,
t.TBL_NAME, t.CREATE_TIME, t.TBL_TYPE, t.VIEW_EXPANDED_TEXT,
replace(max( case when param_key in ('avro.schema.literal', 'schema.literal')
then param_value
end), '\\n', ' ') schema_literal,
max( case when param_key in ('avro.schema.url', 'schema.url')
then param_value
end) schema_url,
max( case when param_key in ('field.delim')
then param_value
end) field_delim
from SERDE_PARAMS sp join TBLS t on sp.SERDE_ID = t.SD_ID
where sp.PARAM_KEY regexp 'schema.literal|schema.url|field.delim'
group by 1,2,3,4,5 ) et
JOIN DBS d on et.DB_ID = d.DB_ID
JOIN SDS s on et.SERDE_ID = s.SD_ID
where d.NAME = '{db_name}'
order by 1,2 """.format(db_name=database_name)
curs_et.execute(tbl_info_sql)
rows = curs_et.fetchall()
curs_et.close()
return rows
def format_table_metadata_v2(self, rows, schema):
"""
process info get from COLUMN_V2 into final table, several lines form one table info
:param rows: the info get from COLUMN_V2 table, order by database name, table name
:param schema: {database : _, type : _, tables : [{}, {} ...] }
:return:
"""
db_idx = len(schema) - 1
table_idx = -1
field_list = []
for row_index, row_value in enumerate(rows):
field_list.append({'IntegerIndex': row_value[14], 'ColumnName': row_value[15], 'TypeName': row_value[16],
'Comment': row_value[17]})
if row_index == len(rows) - 1 or (row_value[0] != rows[row_index+1][0] or row_value[1] != rows[row_index+1][1]): # if this is last record of current table
# sort the field_list by IntegerIndex
field_list = sorted(field_list, key=lambda k: k['IntegerIndex'])
# process the record of table
table_record = {TableInfo.table_name: row_value[1], TableInfo.type: row_value[21],
TableInfo.native_name: row_value[22], TableInfo.logical_name: row_value[23],
TableInfo.dataset_name: row_value[18], TableInfo.version: str(row_value[19]),
TableInfo.serialization_format: row_value[2],
TableInfo.create_time: row_value[3], TableInfo.db_id: row_value[4], TableInfo.table_id: row_value[5],
TableInfo.serde_id: row_value[6], TableInfo.location: row_value[7], TableInfo.table_type: row_value[8],
TableInfo.view_expended_text: row_value[9], TableInfo.input_format: row_value[10], TableInfo.output_format: row_value[11],
TableInfo.is_compressed: row_value[12], TableInfo.is_storedassubdirectories: row_value[13],
TableInfo.etl_source: 'COLUMN_V2', TableInfo.field_list: field_list[:]}
field_list = [] # empty it
if row_value[0] not in self.db_dict:
schema.append({'database': row_value[0], 'type': row_value[20], 'tables': []})
db_idx += 1
self.db_dict[row_value[0]] = db_idx
full_name = row_value[0] + '.' + row_value[1]
# put in schema result
if full_name not in self.table_dict:
schema[db_idx]['tables'].append(table_record)
table_idx += 1
self.table_dict[full_name] = table_idx
self.logger.info("%s %6d tables processed for database %12s from COLUMN_V2" % (
datetime.datetime.now(), table_idx + 1, row_value[0]))
def format_table_metadata_serde(self, rows, schema):
"""
add table info from rows into schema
also add extra info.
:param rows: DatabaseName, TableName, SerializationFormat, Create_Time, SchemaLiteral, SchemaUrl, FieldDelimiter, DB_ID, TBL_ID, SD_ID
:param schema: {database : _, type : _, tables : ['name' : _, ... '' : _] }
:return:
"""
db_idx = len(schema) - 1
table_idx = -1
for row_value in rows:
if row_value[0] not in self.db_dict:
schema.append({'database': row_value[0], 'type': 'Hive', 'tables': []})
db_idx += 1
self.db_dict[row_value[0]] = db_idx
else:
db_idx = self.db_dict[row_value[0]]
full_name = ''
if row_value[0]:
full_name = row_value[0]
if row_value[1]:
full_name += '.' + row_value[1]
elif row_value[1]:
full_name = row_value[1]
literal = None
if row_value[15] and not row_value[14]: # schema_url is available but missing schema_literal
try:
literal = self.get_schema_literal_from_url(row_value[15])
except Exception as e:
self.logger.error(str(e))
elif row_value[14]:
literal = row_value[14]
# put in schema result
if full_name not in self.table_dict:
schema[db_idx]['tables'].append(
{TableInfo.table_name: row_value[1], TableInfo.type: 'Table', TableInfo.serialization_format: row_value[2],
TableInfo.dataset_name: row_value[1],
TableInfo.native_name: row_value[1], TableInfo.logical_name: row_value[1], TableInfo.version: '0',
TableInfo.create_time: row_value[3], TableInfo.db_id: row_value[4], TableInfo.table_id: row_value[5],
TableInfo.serde_id: row_value[6], TableInfo.location: row_value[7], TableInfo.table_type: row_value[8],
TableInfo.view_expended_text: row_value[9], TableInfo.input_format: row_value[10],
TableInfo.output_format: row_value[11], TableInfo.is_compressed: row_value[12],
TableInfo.is_storedassubdirectories: row_value[13], TableInfo.etl_source: 'SERDE_PARAMS',
TableInfo.schema_literal: literal,
TableInfo.schema_url: row_value[15],
TableInfo.field_delimiter: row_value[16]})
table_idx += 1
self.table_dict[full_name] = table_idx
self.logger.info("%s %6d tables processed for database %12s from SERDE_PARAM" % (
datetime.datetime.now(), table_idx + 1, row_value[0]))
def run(self, schema_output_file, sample_output_file=None, hdfs_map_output_file=None,
hdfs_namenode_ipc_uri=None, kerberos_auth=False, kerberos_principal=None, keytab_file=None):
"""
The entrance of the class, extract schema.
One database per time
:param schema_output_file: output file
:return:
"""
cur = self.conn_hms.cursor()
schema = []
schema_json_file = open(schema_output_file, 'wb')
os.chmod(schema_output_file, 0666)
# open(sample_output_file, 'wb')
# os.chmod(sample_output_file, 0666)
# sample_file_writer = FileWriter(sample_output_file)
if type(kerberos_auth) == str:
if kerberos_auth.lower() == 'false':
kerberos_auth = False
else:
kerberos_auth = True
self.schema_url_helper = SchemaUrlHelper.SchemaUrlHelper(hdfs_namenode_ipc_uri, kerberos_auth, kerberos_principal, keytab_file)
for database_name in self.databases:
self.logger.info("Collecting hive tables in database : " + database_name)
# tables from Column V2
rows = []
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows.extend(self.get_table_info_from_v2(database_name, False))
if len(rows) > 0:
self.format_table_metadata_v2(rows, schema)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Get Hive table info from COLUMN_V2 %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
rows = []
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows.extend(self.get_table_info_from_v2(database_name, True))
if len(rows) > 0:
self.format_table_metadata_v2(rows, schema)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Get Dalids table info from COLUMN_V2 %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
# tables from schemaLiteral
rows = []
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows.extend(self.get_table_info_from_serde_params(database_name))
if len(rows) > 0:
self.format_table_metadata_serde(rows, schema)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Get table info from Serde %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
schema_json_file.write(json.dumps(schema, indent=None) + '\n')
schema_json_file.close()
# fetch hive (managed/external) table to hdfs path mapping
rows = []
hdfs_map_csv_file = open(hdfs_map_output_file, 'wb')
os.chmod(hdfs_map_output_file, 0666)
begin = datetime.datetime.now().strftime("%H:%M:%S")
rows = self.get_hdfs_map()
hdfs_map_columns = ['db_name', 'table_name', 'cluster_uri', 'abstract_hdfs_path']
csv_writer = csv.writer(hdfs_map_csv_file, delimiter='\x1a', lineterminator='\n', quoting=csv.QUOTE_NONE)
csv_writer.writerow(hdfs_map_columns)
csv_writer.writerows(rows)
end = datetime.datetime.now().strftime("%H:%M:%S")
self.logger.info("Get hdfs map from SDS %12s [%s -> %s]\n" % (database_name, str(begin), str(end)))
cur.close()
hdfs_map_csv_file.close()
def get_all_databases(self, database_white_list, database_black_list):
"""
Fetch all databases name from DBS table
:return:
"""
database_white_list = ",".join(["'" + x + "'" for x in database_white_list.split(',')])
database_black_list = ",".join(["'" + x + "'" for x in database_black_list.split(',')])
fetch_all_database_names = "SELECT `NAME` FROM DBS WHERE `NAME` IN ({white_list}) OR NOT (`NAME` IN ({black_list}) OR `NAME` LIKE 'u\\_%')"\
.format(white_list=database_white_list, black_list=database_black_list)
self.logger.info(fetch_all_database_names)
curs = self.conn_hms.cursor()
curs.execute(fetch_all_database_names)
rows = [item[0] for item in curs.fetchall()]
curs.close()
return rows
def get_schema_literal_from_url(self, schema_url):
"""
fetch avro schema literal from
- avsc file on hdfs via hdfs/webhdfs
- json string in schemaregistry via http
:param schema_url: e.g. hdfs://server:port/data/tracking/abc/_schema.avsc http://schema-registry-vip-1:port/schemaRegistry/schemas/latest_with_type=xyz
:param schema: {database : _, type : _, tables : ['name' : _, ... '' : _] }
:return: json string of avro schema
"""
if schema_url.startswith('hdfs://') or schema_url.startswith('/') or schema_url.startswith('webhdfs://'):
return self.schema_url_helper.get_from_hdfs(schema_url)
elif scheam_url.startswith('https://') or schema_url.startswith('http://'):
return self.schema_url_helper.get_from_http(schema_url)
else:
self.logger.error("get_schema_literal_from_url() gets a bad input: %s" % schema_url)
return ''
def get_hdfs_map(self):
"""
Fetch the mapping from hdfs location to hive (managed and external) table
:return:
"""
hdfs_map_sql = """select db_name, tbl_name table_name, cluster_uri,
cast(
case when substring_index(hdfs_path, '/', -4) regexp '[0-9]{4}/[0-9]{2}/[0-9]{2}/[0-9]{2}'
then substring(hdfs_path, 1, hdfs_path_len - length(substring_index(hdfs_path, '/', -4))-1)
when substring_index(hdfs_path, '/', -3) regexp '[0-9]{4}/[0-9]{2}/[0-9]{2}'
then substring(hdfs_path, 1, hdfs_path_len - length(substring_index(hdfs_path, '/', -3))-1)
when substring_index(hdfs_path, '/', -1) regexp '20[0-9]{2}([\\._-]?[0-9][0-9]){2,5}'
or substring_index(hdfs_path, '/', -1) regexp '1[3-6][0-9]{11}(-(PT|UTC|GMT|SCN)-[0-9]+)?'
or substring_index(hdfs_path, '/', -1) regexp '[0-9]+([\\._-][0-9]+)?'
or substring_index(hdfs_path, '/', -1) regexp '[vV][0-9]+([\\._-][0-9]+)?'
or substring_index(hdfs_path, '/', -1) regexp '(prod|ei|qa|dev)_[0-9]+\\.[0-9]+\\.[0-9]+_20[01][0-9]([_-]?[0-9][0-9]){2,5}'
then substring(hdfs_path, 1, hdfs_path_len - length(substring_index(hdfs_path, '/', -1))-1)
when hdfs_path regexp '/datepartition=20[01][0-9]([\\._-]?[0-9][0-9]){2,3}'
or hdfs_path regexp '/datepartition=[[:alnum:]]+'
then substring(hdfs_path, 1, locate('/datepartition=', hdfs_path)-1)
when hdfs_path regexp '/date_sk=20[01][0-9]([\\._-]?[0-9][0-9]){2,3}'
then substring(hdfs_path, 1, locate('/date_sk=', hdfs_path)-1)
when hdfs_path regexp '/ds=20[01][0-9]([\\._-]?[0-9][0-9]){2,3}'
then substring(hdfs_path, 1, locate('/ds=', hdfs_path)-1)
when hdfs_path regexp '/dt=20[01][0-9]([\\._-]?[0-9][0-9]){2,3}'
then substring(hdfs_path, 1, locate('/dt=', hdfs_path)-1)
when hdfs_path regexp '^/[[:alnum:]]+/[[:alnum:]]+/[[:alnum:]]+/20[01][0-9]([\\._-]?[0-9][0-9]){2,5}/'
then concat(substring_index(hdfs_path, '/', 4), '/*',
substring(hdfs_path, length(substring_index(hdfs_path, '/', 5))+1))
when hdfs_path regexp '^/[[:alnum:]]+/[[:alnum:]]+/20[01][0-9]([\\._-]?[0-9][0-9]){2,5}/'
then concat(substring_index(hdfs_path, '/', 3), '/*',
substring(hdfs_path, length(substring_index(hdfs_path, '/', 4))+1))
when substring_index(hdfs_path, '/', -3) regexp '^(prod|ei|qa|dev)_[0-9]+\\.[0-9]+\\.[0-9]+_20[01][0-9]([\\._-]?[0-9][0-9]){2,5}/'
then concat(substring(hdfs_path, 1, hdfs_path_len - length(substring_index(hdfs_path, '/', -3))-1), '/*/',
substring_index(hdfs_path, '/', -2))
when substring_index(hdfs_path, '/', -2) regexp '^(prod|ei|qa|dev)_[0-9]+\\.[0-9]+\\.[0-9]+_20[01][0-9]([\\._-]?[0-9][0-9]){2,5}/'
then concat(substring(hdfs_path, 1, hdfs_path_len - length(substring_index(hdfs_path, '/', -2))-1), '/*/',
substring_index(hdfs_path, '/', -1))
else hdfs_path
end as char(300)) abstract_hdfs_path
from (
select d.NAME DB_NAME, t.TBL_NAME,
substring_index(s.LOCATION, '/', 3) cluster_uri,
substring(s.LOCATION, length(substring_index(s.LOCATION, '/', 3))+1) hdfs_path,
length(s.LOCATION) - length(substring_index(s.LOCATION, '/', 3)) as hdfs_path_len
from SDS s
join TBLS t
on s.SD_ID = t.SD_ID
join DBS d
on t.DB_ID = d.DB_ID
left join PARTITIONS p
on t.TBL_ID = p.TBL_ID
where p.PART_ID is null
and LOCATION is not null
union all
select d.NAME DB_NAME, t.TBL_NAME,
substring_index(s.LOCATION, '/', 3) cluster_uri,
substring(s.LOCATION, length(substring_index(s.LOCATION, '/', 3))+1) hdfs_path,
length(s.LOCATION) - length(substring_index(s.LOCATION, '/', 3)) as hdfs_path_len
from SDS s
join (select TBL_ID, MAX(SD_ID) SD_ID from PARTITIONS group by 1) p
on s.SD_ID = p.SD_ID
join TBLS t
on p.TBL_ID = t.TBL_ID
join DBS d
on t.DB_ID = d.DB_ID
where not LOCATION like 'hdfs:%__HIVE_DEFAULT_PARTITION__%'
) x where hdfs_path not like '/tmp/%'
order by 1,2"""
curs = self.conn_hms.cursor()
curs.execute(hdfs_map_sql)
rows = curs.fetchall()
curs.close()
self.logger.info("%6d Hive table => HDFS path mapping relations found." % len(rows))
return rows
if __name__ == "__main__":
args = sys.argv[1]
# connection
username = args[Constant.HIVE_METASTORE_USERNAME]
password = args[Constant.HIVE_METASTORE_PASSWORD]
jdbc_driver = args[Constant.HIVE_METASTORE_JDBC_DRIVER]
jdbc_url = args[Constant.HIVE_METASTORE_JDBC_URL]
if Constant.HIVE_DATABASE_WHITELIST_KEY in args:
database_white_list = args[Constant.HIVE_DATABASE_WHITELIST_KEY]
else:
database_white_list = "''"
if Constant.HIVE_DATABASE_BLACKLIST_KEY in args:
database_black_list = args[Constant.HIVE_DATABASE_BLACKLIST_KEY]
else:
database_black_list = "''"
e = HiveExtract()
e.conn_hms = zxJDBC.connect(jdbc_url, username, password, jdbc_driver)
try:
e.databases = e.get_all_databases(database_white_list, database_black_list)
e.run(args[Constant.HIVE_SCHEMA_JSON_FILE_KEY], \
None, \
args[Constant.HIVE_HDFS_MAP_CSV_FILE_KEY], \
args[Constant.HDFS_NAMENODE_IPC_URI_KEY], \
args[Constant.KERBEROS_AUTH_KEY], \
args[Constant.KERBEROS_PRINCIPAL_KEY], \
args[Constant.KERBEROS_KEYTAB_FILE_KEY]
)
finally:
e.conn_hms.close()
| {
"content_hash": "597af83c4df50e604e7674addcfb8db6",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 160,
"avg_line_length": 47.17416829745597,
"alnum_prop": 0.6169003567576536,
"repo_name": "thomas-young-2013/wherehowsX",
"id": "bb1579dd1767737bf28d2fe8508ae94720b906bc",
"size": "24106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-etl/src/main/resources/jython/HiveExtract.template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40889"
},
{
"name": "HTML",
"bytes": "132449"
},
{
"name": "Java",
"bytes": "1544814"
},
{
"name": "JavaScript",
"bytes": "368681"
},
{
"name": "Python",
"bytes": "1204672"
},
{
"name": "Scala",
"bytes": "1729"
},
{
"name": "Shell",
"bytes": "1152"
}
],
"symlink_target": ""
} |
import numpy as np
from astropy.table import Table
from sklearn import mixture
#from snob import nips_search3 as snob
from snob import mixture_ka as snob
np.random.seed(42)
try:
catalog
except NameError:
catalog = Table.read("../../../apogee-dr14-catalog.fits")
else:
print("WARNING: USING PRE LOADED CATALOG")
# Number of Monte-Carlo realisations to do for each number of true clusters
M = 10
min_clusters = 1
#predictors = ("RA", "DEC", "VHELIO_AVG")
for element in ("NI", "O", "NA", "MG", "CA", "AL"):
catalog["{}_FE".format(element)] = catalog["{}_H".format(element)] - catalog["FE_H"]
predictors = ("FE_H", "O_FE", "CA_FE", "MG_FE", )
covariance_type = "full"
D = len(predictors)
homoskedastic_uncertainty = 0.05
fast = False
min_membership_probability = 1.0
min_stars_per_cluster = 10
cluster_names = sorted(set(catalog["FIELD"][catalog["ASSOCIATION_PROB"] > 0]))
finite = np.all(np.array([
np.isfinite(catalog[predictor]) * (catalog[predictor] > -10) \
for predictor in predictors]), axis=0)
skip_cluster_names = []
for cluster_name in cluster_names:
num = finite * (catalog["FIELD"] == cluster_name) \
* (catalog["ASSOCIATION_PROB"] >= min_membership_probability)
num = num.sum()
print(cluster_name, num)
if num < min_stars_per_cluster:
print("Skipping {} because {} < {}".format(cluster_name, num, min_stars_per_cluster))
skip_cluster_names.append(cluster_name)
cluster_names = sorted(set(cluster_names).difference(skip_cluster_names))
N = len(cluster_names)
print("Number of clusters ({}): {}".format(N, ", ".join(cluster_names)))
results = []
running_delta_aic = 0
running_delta_bic = 0
running_delta_mml = 0
fake_stars_per_cluster = 100
for n in range(1, 1 + N):
for m in range(M):
if m > 0 and n == N: break # because at n = N, we get all clusters.
# Pick which clusters.
selected_cluster_names = np.random.choice(
cluster_names, size=n, replace=False)
y = np.zeros((n * fake_stars_per_cluster, len(predictors)))
true_mu = np.zeros((n, D))
true_cov = np.zeros((n, D, D))
true_cov_diag = np.zeros((n, D))
for i, cluster_name in enumerate(selected_cluster_names):
# Get all the stars from those clusters.
match = finite \
* (catalog["FIELD"] == cluster_name) \
* (catalog["ASSOCIATION_PROB"] >= min_membership_probability)
values = np.array([catalog[p][match] for p in predictors])
mu = np.median(values, axis=1)
cov = homoskedastic_uncertainty**2 * np.eye(D)
si, ei = (i * fake_stars_per_cluster, (i + 1) * fake_stars_per_cluster)
y[si:ei, :] = np.random.multivariate_normal(mu, cov, size=fake_stars_per_cluster)
true_mu[i] = mu
true_cov[i] = cov
true_cov_diag[i] = homoskedastic_uncertainty**2
true_weight = np.ones(n, dtype=float)/n
# Construct the matrix of data.
#y = np.array([catalog[p][match] for p in predictors]).T
#y[:, 1] = y[:, 1] - y[:, 0]
# Determine number of Gaussians from MML
#model = snob.GaussianMixture(
# covariance_type=covariance_type, predict_mixtures=1,
# covariance_regularization=1e-6)
#mu, cov, weight, meta = model.fit(y)
#mml_num = weight.size
# Just check,....
#if mml_num != n:
"""
dumb_check = False
for zz in range(30):
alt_model2 = snob.jump_to_mixture(y, n,
covariance_type=covariance_type,
covariance_regularization=1e-6,
threshold=1e-5, max_em_iterations=10000)
if not abs(meta["message_length"] - alt_model2[-1]) < 1:
dumb_check = True
print("GOT A BETTER ONE FROM K = {} ({} < {}; {})".format(
n, alt_model2[-1], meta["message_length"], meta["message_length"] - alt_model2[-1]))
break
"""
model = snob.GaussianMixture(covariance_regularization=1e-6,
covariance_type=covariance_type)
op_mu, op_cov, op_weight, meta = model.fit(y)
mml_num = op_weight.size
try:
R, nll, true_ml = snob._expectation(y, true_mu, true_cov_diag if covariance_type == "diag" else true_cov, true_weight,
covariance_type=covariance_type, covariance_regularization=1e-6)
except:
print("Failed to check truth")
None
else:
if true_ml < meta["message_length"]:
print("TRUE ML BETTER BY {}".format(true_ml - meta["message_length"]))
"""
if fast:
model = snob.GaussianMixture(
covariance_type=covariance_type, predict_mixtures=10,
covariance_regularization=1e-6)
mu, cov, weight, meta = model.fit(y)
mml_num = weight.size
else:
mls = []
for ni in range(1, n+5):
min_mls = []
for zz in range(30):
alt_model = snob.jump_to_mixture(y, ni,
covariance_type=covariance_type,
covariance_regularization=1e-6,
threshold=1e-5, max_em_iterations=10000)
min_mls.append(alt_model[-1])
mls.append([ni, np.min(min_mls)])
mls = np.array(mls)
_ = np.argmin(mls.T[1])
if _ == (mls.shape[0] - 1):
raise a
#print("FROM {} to {}".format(mml_num, mls[_][0]))
mml_num, __ = mls[_]
"""
"""
# Consider alternative MML, where we initialize it at the true solution
model2 = snob.GaussianMixture(
covariance_type=covariance_type, covariance_regularization=1e-6)
op_mu2, op_cov2, op_weight2, meta2 = model.fit(y,
__initialize=(true_mu, true_cov, true_weights))
if op_weight2.size != op_weight.size:
#assert meta2["message_length"] < meta["mess age_length"]
print("DID BETTER FROM TRUE")
mml_num = op_weight2.size
"""
# Consider alternative where we initialize at K = XX?
# Determine number of components by AIC/BIC.
aic = []
bic = []
aic_converged = -1
bic_converged = -1
for k in range(1, 1 + 2*N):
try:
model = mixture.GaussianMixture(n_components=k, covariance_type=covariance_type, )
fitted_model = model.fit(y)
except ValueError:
print("FAILED ON GMM TEST {}".format(k))
aic_converged = 1 + np.argmin(aic)
bic_converged = 1 + np.argmin(bic)
break
bic.append(fitted_model.bic(y))
aic.append(fitted_model.aic(y))
if k > 2:
if aic_converged < 0 and np.all(np.diff(aic[-3:]) > 0):
aic_converged = 1 + np.argmin(aic)
if bic_converged < 0 and np.all(np.diff(bic[-3:]) > 0):
bic_converged = 1 + np.argmin(bic)
if aic_converged >= 0 and bic_converged >= 0:
break
#mml_num = np.nan
results.append((n, m, y.shape[0], aic_converged, bic_converged, mml_num))
print(results[-1])
running_delta_aic += abs(aic_converged - n)
running_delta_mml += abs(mml_num - n)
running_delta_bic += abs(bic_converged - n)
print("MML/BIC/AIC", running_delta_mml, running_delta_bic, running_delta_aic)
results = np.array(results)
import pickle
with open("v1.5-fake-apogee-results-{}-{}.pkl".format(covariance_type,
"fast" if fast else "slow"), "wb") as fp:
pickle.dump(results, fp, -1)
diff_mml = np.sum(np.abs(results.T[0] - results.T[-1]))
diff_bic = np.sum(np.abs(results.T[0] - results.T[-2]))
diff_aic = np.sum(np.abs(results.T[0] - results.T[-3]))
fig, axes = plt.subplots(3)
offset = 3
for i, (ax, name) in enumerate(zip(axes, ("AIC", "BIC", "MML"))):
ax.scatter(results.T[0], results.T[i + offset] - results.T[0], alpha=0.1, marker='s', s=100)
ax.set_ylabel(r"$\Delta${}".format(name))
#if not ax.is_last_row():
# ax.set_xticks()
| {
"content_hash": "c44ab72c2c71acdf542af2afb7af2d78",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 130,
"avg_line_length": 30.926739926739927,
"alnum_prop": 0.5494492478976667,
"repo_name": "andycasey/snob",
"id": "e169b06fbcd9c69686629e43714cb4d0b7c14403",
"size": "8445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "articles/chemical-tagging-gmm/experiments/early-on/experiment_v1.5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2904054"
},
{
"name": "MAXScript",
"bytes": "15074"
},
{
"name": "Makefile",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "489030"
},
{
"name": "Shell",
"bytes": "2964"
},
{
"name": "TeX",
"bytes": "490057"
}
],
"symlink_target": ""
} |
import argparse
import multiprocessing
import os
import posixpath
import sys
import urllib2
import buildbot_common
import build_version
import generate_make
import parse_dsc
from build_paths import SDK_SRC_DIR, OUT_DIR, SDK_RESOURCE_DIR
from build_paths import GSTORE
from generate_index import LandingPage
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
MAKE = 'nacl_sdk/make_3.99.90-26-gf80222c/make.exe'
LIB_DICT = {
'linux': [],
'mac': [],
'win': ['x86_32']
}
VALID_TOOLCHAINS = [
'newlib',
'clang-newlib',
'glibc',
'pnacl',
'win',
'linux',
'mac',
]
# Global verbosity setting.
# If set to True (normally via a command line arg) then build_projects will
# add V=1 to all calls to 'make'
verbose = False
def Trace(msg):
if verbose:
sys.stderr.write(str(msg) + '\n')
def CopyFilesFromTo(filelist, srcdir, dstdir):
for filename in filelist:
srcpath = os.path.join(srcdir, filename)
dstpath = os.path.join(dstdir, filename)
buildbot_common.CopyFile(srcpath, dstpath)
def UpdateHelpers(pepperdir, clobber=False):
tools_dir = os.path.join(pepperdir, 'tools')
if not os.path.exists(tools_dir):
buildbot_common.ErrorExit('SDK tools dir is missing: %s' % tools_dir)
exampledir = os.path.join(pepperdir, 'examples')
if clobber:
buildbot_common.RemoveDir(exampledir)
buildbot_common.MakeDir(exampledir)
# Copy files for individual build and landing page
files = ['favicon.ico', 'httpd.cmd', 'index.css', 'index.js',
'button_close.png', 'button_close_hover.png']
CopyFilesFromTo(files, SDK_RESOURCE_DIR, exampledir)
# Copy tools scripts and make includes
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.py'),
tools_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.mk'),
tools_dir)
# Copy tools/lib scripts
tools_lib_dir = os.path.join(pepperdir, 'tools', 'lib')
buildbot_common.MakeDir(tools_lib_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', 'lib', '*.py'),
tools_lib_dir)
# On Windows add a prebuilt make
if getos.GetPlatform() == 'win':
buildbot_common.BuildStep('Add MAKE')
make_url = posixpath.join(GSTORE, MAKE)
make_exe = os.path.join(tools_dir, 'make.exe')
with open(make_exe, 'wb') as f:
f.write(urllib2.urlopen(make_url).read())
def ValidateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(VALID_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Invalid toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def GetDeps(projects):
out = {}
# Build list of all project names
localtargets = [proj['NAME'] for proj in projects]
# For each project
for proj in projects:
deplist = []
# generate a list of dependencies
for targ in proj.get('TARGETS', []):
deplist.extend(targ.get('DEPS', []) + targ.get('LIBS', []))
# and add dependencies to targets built in this subtree
localdeps = [dep for dep in deplist if dep in localtargets]
if localdeps:
out[proj['NAME']] = localdeps
return out
def UpdateProjects(pepperdir, project_tree, toolchains,
clobber=False, configs=None, first_toolchain=False):
if configs is None:
configs = ['Debug', 'Release']
if not os.path.exists(os.path.join(pepperdir, 'tools')):
buildbot_common.ErrorExit('Examples depend on missing tools.')
if not os.path.exists(os.path.join(pepperdir, 'toolchain')):
buildbot_common.ErrorExit('Examples depend on missing toolchains.')
ValidateToolchains(toolchains)
# Create the library output directories
libdir = os.path.join(pepperdir, 'lib')
platform = getos.GetPlatform()
for config in configs:
for arch in LIB_DICT[platform]:
dirpath = os.path.join(libdir, '%s_%s_host' % (platform, arch), config)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
landing_page = None
for branch, projects in project_tree.iteritems():
dirpath = os.path.join(pepperdir, branch)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
targets = [desc['NAME'] for desc in projects]
deps = GetDeps(projects)
# Generate master make for this branch of projects
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch),
targets, deps)
if branch.startswith('examples') and not landing_page:
landing_page = LandingPage()
# Generate individual projects
for desc in projects:
srcroot = os.path.dirname(desc['FILEPATH'])
generate_make.ProcessProject(pepperdir, srcroot, pepperdir, desc,
toolchains, configs=configs,
first_toolchain=first_toolchain)
if branch.startswith('examples'):
landing_page.AddDesc(desc)
if landing_page:
# Generate the landing page text file.
index_html = os.path.join(pepperdir, 'examples', 'index.html')
index_template = os.path.join(SDK_RESOURCE_DIR, 'index.html.template')
with open(index_html, 'w') as fh:
out = landing_page.GeneratePage(index_template)
fh.write(out)
# Generate top Make for examples
targets = ['api', 'demo', 'getting_started', 'tutorial']
targets = [x for x in targets if 'examples/'+x in project_tree]
branch_name = 'examples'
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch_name),
targets, {})
def BuildProjectsBranch(pepperdir, branch, deps, clean, config, args=None):
make_dir = os.path.join(pepperdir, branch)
print "\nMake: " + make_dir
if getos.GetPlatform() == 'win':
# We need to modify the environment to build host on Windows.
make = os.path.join(make_dir, 'make.bat')
else:
make = 'make'
env = None
if os.environ.get('USE_GOMA') == '1':
env = dict(os.environ)
env['NACL_COMPILER_PREFIX'] = 'gomacc'
# Add -m32 to the CFLAGS when building using i686-nacl-gcc
# otherwise goma won't recognise it as different to the x86_64
# build.
env['X86_32_CFLAGS'] = '-m32'
env['X86_32_CXXFLAGS'] = '-m32'
jobs = '50'
else:
jobs = str(multiprocessing.cpu_count())
make_cmd = [make, '-j', jobs]
make_cmd.append('CONFIG='+config)
if not deps:
make_cmd.append('IGNORE_DEPS=1')
if verbose:
make_cmd.append('V=1')
if args:
make_cmd += args
else:
make_cmd.append('TOOLCHAIN=all')
buildbot_common.Run(make_cmd, cwd=make_dir, env=env)
if clean:
# Clean to remove temporary files but keep the built
buildbot_common.Run(make_cmd + ['clean'], cwd=make_dir, env=env)
def BuildProjects(pepperdir, project_tree, deps=True,
clean=False, config='Debug'):
# Make sure we build libraries (which live in 'src') before
# any of the examples.
build_first = [p for p in project_tree if p != 'src']
build_second = [p for p in project_tree if p == 'src']
for branch in build_first + build_second:
BuildProjectsBranch(pepperdir, branch, deps, clean, config)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-c', '--clobber',
help='Clobber project directories before copying new files',
action='store_true', default=False)
parser.add_argument('-b', '--build',
help='Build the projects. Otherwise the projects are only copied.',
action='store_true')
parser.add_argument('--config',
help='Choose configuration to build (Debug or Release). Builds both '
'by default')
parser.add_argument('-x', '--experimental',
help='Build experimental projects', action='store_true')
parser.add_argument('-t', '--toolchain',
help='Build using toolchain. Can be passed more than once.',
action='append', default=[])
parser.add_argument('-d', '--dest',
help='Select which build destinations (project types) are valid.',
action='append')
parser.add_argument('projects', nargs='*',
help='Select which projects to build.')
parser.add_argument('-v', '--verbose', action='store_true')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_projects.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options = parser.parse_args(args)
global verbose
if options.verbose:
verbose = True
buildbot_common.verbose = verbose
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# on the build.
del os.environ['NACL_SDK_ROOT']
pepper_ver = str(int(build_version.ChromeMajorVersion()))
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
if not options.toolchain:
# Order matters here: the default toolchain for an example's Makefile will
# be the first toolchain in this list that is available in the example.
# e.g. If an example supports newlib and glibc, then the default will be
# newlib.
options.toolchain = ['pnacl', 'newlib', 'glibc', 'host', 'clang-newlib']
if 'host' in options.toolchain:
options.toolchain.remove('host')
options.toolchain.append(getos.GetPlatform())
Trace('Adding platform: ' + getos.GetPlatform())
ValidateToolchains(options.toolchain)
filters = {}
if options.toolchain:
filters['TOOLS'] = options.toolchain
Trace('Filter by toolchain: ' + str(options.toolchain))
if not options.experimental:
filters['EXPERIMENTAL'] = False
if options.dest:
filters['DEST'] = options.dest
Trace('Filter by type: ' + str(options.dest))
if options.projects:
filters['NAME'] = options.projects
Trace('Filter by name: ' + str(options.projects))
try:
project_tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit(str(e))
if verbose:
parse_dsc.PrintProjectTree(project_tree)
UpdateHelpers(pepperdir, clobber=options.clobber)
UpdateProjects(pepperdir, project_tree, options.toolchain,
clobber=options.clobber)
if options.build:
if options.config:
configs = [options.config]
else:
configs = ['Debug', 'Release']
for config in configs:
BuildProjects(pepperdir, project_tree, config=config, deps=False)
return 0
if __name__ == '__main__':
script_name = os.path.basename(sys.argv[0])
try:
sys.exit(main(sys.argv[1:]))
except parse_dsc.ValidationError as e:
buildbot_common.ErrorExit('%s: %s' % (script_name, e))
except KeyboardInterrupt:
buildbot_common.ErrorExit('%s: interrupted' % script_name)
| {
"content_hash": "4797b4c9c6b28f97f4c93c63798e9c3d",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 78,
"avg_line_length": 31.67055393586006,
"alnum_prop": 0.6671269446745834,
"repo_name": "Workday/OpenFrame",
"id": "f1a76c23686687c97db0e02fc936c17fbfea722f",
"size": "11052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_client_sdk/src/build_tools/build_projects.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/gitian.sigs.git'])
if not os.path.isdir('chaincoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/chaincoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('chaincoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/chaincoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('chaincoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../chaincoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/chaincoin-*.tar.gz build/out/src/chaincoin-*.tar.gz ../chaincoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/chaincoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/chaincoin-*.zip build/out/chaincoin-*.exe ../chaincoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/chaincoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/chaincoin-*.tar.gz build/out/chaincoin-*.dmg ../chaincoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/chaincoin-' + args.version + '-win-unsigned.tar.gz inputs/chaincoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/chaincoin-*win64-setup.exe ../chaincoin-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/chaincoin-*win32-setup.exe ../chaincoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/chaincoin-' + args.version + '-osx-unsigned.tar.gz inputs/chaincoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/chaincoin-osx-signed.dmg ../chaincoin-binaries/'+args.version+'/chaincoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/chaincoin/chaincoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('chaincoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/chaincoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| {
"content_hash": "14f1793675ee3a1194b9b32f235edac3",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 233,
"avg_line_length": 60.15151515151515,
"alnum_prop": 0.6489384670744872,
"repo_name": "chaincoin/chaincoin",
"id": "f7a7e508c8392bff1a6d315f7fd52b93b2286999",
"size": "13919",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.18",
"path": "contrib/gitian-build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "782437"
},
{
"name": "C++",
"bytes": "7493134"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "199073"
},
{
"name": "Makefile",
"bytes": "123254"
},
{
"name": "Objective-C",
"bytes": "3901"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "2532944"
},
{
"name": "QMake",
"bytes": "792"
},
{
"name": "Shell",
"bytes": "94132"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.core.task import hub_loader
class HubKerasLayerV1V2Test(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
("hub_module_v1_mini", True),
("saved_model_v2_mini", True),
("hub_module_v1_mini", False),
("saved_model_v2_mini", False),
)
def test_load_with_defaults(self, module_name, trainable):
inputs, expected_outputs = 10., 11. # Test modules perform increment op.
path = test_util.get_test_data_path(module_name)
layer = hub_loader.HubKerasLayerV1V2(path, trainable=trainable)
output = layer(inputs)
self.assertEqual(output, expected_outputs)
def test_trainable_varaible(self):
path = test_util.get_test_data_path("hub_module_v1_mini_train")
layer = hub_loader.HubKerasLayerV1V2(path, trainable=True)
# Checks trainable variables.
self.assertLen(layer.trainable_variables, 2)
self.assertEqual(layer.trainable_variables[0].name, "a:0")
self.assertEqual(layer.trainable_variables[1].name, "b:0")
self.assertEqual(layer.variables, layer.trainable_variables)
# Checks non-trainable variables.
self.assertEmpty(layer.non_trainable_variables)
layer = hub_loader.HubKerasLayerV1V2(path, trainable=False)
# Checks trainable variables.
self.assertEmpty(layer.trainable_variables)
# Checks non-trainable variables.
self.assertLen(layer.non_trainable_variables, 2)
self.assertEqual(layer.non_trainable_variables[0].name, "a:0")
self.assertEqual(layer.non_trainable_variables[1].name, "b:0")
self.assertEqual(layer.variables, layer.non_trainable_variables)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "78d8c9c40479e9c91ee03091a49c8577",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 39.12244897959184,
"alnum_prop": 0.7303077725612936,
"repo_name": "tensorflow/examples",
"id": "a3ba72117d116b19a2655e7c5737ba5a31c7103b",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/core/task/hub_loader_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0026_auto_20170313_1144'),
]
operations = [
migrations.AlterModelManagers(
name='profile',
managers=[
],
),
]
| {
"content_hash": "f35acc25ba2c246b8d3a72a3fa28d530",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 43,
"avg_line_length": 18.27777777777778,
"alnum_prop": 0.5592705167173252,
"repo_name": "jumbocodespring2017/bostonathleticsassociation",
"id": "87b3289778f07389c8002ca26e600797318cbd98",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "back-end/api/migrations/0027_auto_20170405_1825.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "583725"
},
{
"name": "HTML",
"bytes": "24194"
},
{
"name": "JavaScript",
"bytes": "3394213"
},
{
"name": "Python",
"bytes": "67089"
}
],
"symlink_target": ""
} |
""" Get component information from metrics file. """
import sys, os
import logging
import json
from os.path import join, dirname
import functools
from collections import defaultdict
import cStringIO as StringIO
import numpy as np
from lxml import etree
from rpl.tools.geometry.geom_utils import trans_mat_from_avm_xml
from rpl.tools.geometry.read_cad_metrics import matrix_from_nx_ny_ro
from rpl.tools.geometry.surface_io import open_geom
## Get rid of any existing handlers (e.g. if running interactively in ipython)
logging.getLogger().handlers = []
## Main logging: output everything, including litany of debug messages
logging.basicConfig(filename="test_bench_debug.log",
filemode="w",
format="%(levelname)s %(asctime)s %(message)s",
level=logging.DEBUG)
## Also make end-user friendly log file (showing informational messages and TB results)
log_full = logging.FileHandler('test_bench.log', mode='w')
log_full.setLevel(logging.INFO)
log_full.setFormatter(logging.Formatter("%(levelname)s %(asctime)s %(message)s"))
logging.getLogger("").addHandler(log_full)
## Also log DEBUG and above to the console
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logging.getLogger("").addHandler(console)
def excepthook(*args):
"""
Ensure that any uncaught exception is logged
"""
logging.getLogger().error('Uncaught exception: ', exc_info=args)
# Uncaught exceptions and assertions should write error to log
sys.excepthook = excepthook
## Conversion factors to "m" in case CAD has odd units
si_units = ["m", "kg", "Pa", "N", "rad"]
conv_dict = {"inch": 0.0254, "millimeter": 0.001, "mm": 0.001, "lbs": 0.453592,
"g": 0.001, "slug": 14.5939029, "kg*mm^2": 1e-06,
"deg": 0.01745}
conv_units_dict = {"inch": "m", "millimeter": "m", "mm": "m", "lbs": "kg",
"g": "kg", "slug": "kg", "kg*mm^2": "kg*m^2",
"deg": "rad"}
## Compiled XPath functions to find datums properties and primitives
xp_datum = etree.XPath(r"//Datum[@DatumName = $v]")
xp_metric = etree.XPath(r"//ModelMetric[@Name = $v]/Value/ValueExpression//Value")
xp_metric_data_type = etree.XPath(r"//ModelMetric[@Name = $v]/Value/@DataType")
xp_metric_unit = etree.XPath(r"//ModelMetric[@Name = $v]/Value/@Unit")
xp_propt = etree.XPath(r"//Value[@ID = concat('nv.',$v)]/ValueExpression/Value")
xp_propt_data_type = etree.XPath(r"//Value[@ID = concat('nv.',$v)]/@DataType")
xp_propt_unit = etree.XPath(r"//Value[@ID = concat('nv.',$v)]/@Unit")
xp_propt_default = etree.XPath(r"//Value[@ID = concat('nv.',$v)]/ValueExpression/Default/Value")
xp_propt_source = etree.XPath(r"//Value[@ID = concat('nv.',$v)]/ValueExpression/@ValueSource")
## Load in the predefined exclusion sets that can strip unneeded classes to make a model smaller
with open(join(dirname(__file__), "geometry_sets.js"), "r") as g_sets_json:
## Convert json lists to sets
geom_sets = {k: set(v) for k, v in json.load(g_sets_json).items()}
id_to_class_map_file = ""
cad_metrics_xml_root = None
path_to_instance_xmls = ""
stl_loc = ""
instances_by_class = defaultdict(dict)
output_json_file = ""
def load_settings(settings_file):
"""
Load in the json file ``settings`` and use it to build the internal instances by class
dictionary as well as set paths to various files.
"""
global id_to_class_map_file, cad_metrics_xml_root, path_to_instance_xmls, stl_loc, output_json_file
try:
settings = json.load(open(settings_file, "r"))
msg = StringIO.StringIO()
with open(settings_file, "r") as set_out:
for line in set_out:
msg.write(line)
logging.info("Read in the following settings from file {}:\n {}".format(
settings_file, msg.getvalue()))
except (IOError, ValueError) as err:
msg = "Could not parse the json settings file, {}.".format(settings_file)
msg = msg + " Check the file and please try again. Error: {}".format(err)
raise ValueError(msg)
for r in ["metrics_file", "instance_file", "path_to_instance_stls", "path_to_instance_xmls",
"output_json_file"]:
if r not in settings:
msg = "Required setting '{}' not found in settings file.".format(r)
logging.error(msg)
raise ValueError(msg)
try:
cad_metrics_xml_root = etree.parse(settings["metrics_file"])
except IOError:
msg = "Cad metrics files '{}' could not be loaded.".format(settings["metrics_file"])
logging.error(msg)
raise ValueError(msg)
id_to_class_map_file = settings["instance_file"]
stl_loc = settings["path_to_instance_stls"]
path_to_instance_xmls = settings["path_to_instance_xmls"]
output_json_file = settings["output_json_file"]
try:
with open(id_to_class_map_file, "r") as json_data:
data = json.load(json_data)
except IOError:
msg = "Id to class map file '{}' could not be loaded.".format(id_to_class_map_file)
logging.error(msg)
raise ValueError(msg)
for d in data:
## Store the model path keyed by class then instance_id ( [1:-1] removes curly braces)
instances_by_class[d["Classification"]][d["InstanceID"][1:-1]] = d["ModelPath"]
return settings
def get_all_geom_set():
"""
Returns a set of all component classes used in the assembly.
"""
return set(instances_by_class.keys())
def get_data(parts_of_interest):
"""
Given a dictionary of classes with sub-dicts of required properties, returns a dictionary keyed
first by class and then by instance_id of those extracted properties.
"""
component_dict = defaultdict(dict)
## verbose log entry detailing what the components and attributes the tet bench is looking for
text_head = "This test bench will be looking for the following classes and attributes:\n"
text_str = ""
for comp_class, propt in parts_of_interest.items():
text_str = text_str + "\t{}\n".format(comp_class)
for prop_type, prop_value in propt.items():
if prop_type.lower() == "required":
min_num, max_num = get_class_req_num(prop_value)
if max_num == 1e3:
max_num = "any"
text_str = text_str + "\t\t{}: min: {}, max: {}\n".format(prop_type, min_num,
max_num)
else:
text_str = text_str + "\t\t{}: {}\n".format(prop_type, prop_value)
logging.info(text_head + text_str)
## Loop over each part class and get the attributes of interest
for part_class, attr_intr in parts_of_interest.items():
num_of_class_inst = 0
try:
min_num, max_num = get_class_req_num(attr_intr["Required"])
except (KeyError, NameError):
min_num = 1
## Set max_num to a high number so that any number of instances can be found
max_num = 1e3
# min_num, max_num = get_class_req_num(attr_intr["Required"])
for inst_num, inst in enumerate(instances_by_class[part_class]):
num_of_class_inst = inst_num + 1
if num_of_class_inst < min_num or num_of_class_inst > max_num:
if max_num == 1e3:
max_num = "any"
msg = "Could not find the required number of instances for class {}.".format(part_class)
msg = msg + " Minimum number required, {}. Maximum number required, {}.".format(
min_num, max_num)
msg = msg + " Found {}".format(num_of_class_inst)
logging.error(msg)
# raise ValueError(msg)
## Loop over each instance of the class getting the attributes of interest
for instance_id, model_path in instances_by_class[part_class].items():
part = _get_part(instance_id, model_path)
component_dict[part_class][instance_id] = component(attr_intr,
part["trans"],
model_path,
part_class,
instance_id)
component_dict[part_class][instance_id].update({"trans": part["trans"],
"name": part["name"]})
return component_dict
def load_geometry(class_set, single_file=False):
"""
Given a set of classes return a dictionary with instance_id keys each containing a surface
dictionary with "x", "y", "z", and "tris" keys.
If ``single_file == True`` all the surfaces are merged together and a dictionary containing the
"x", "y", "z", and "tris" keys is returned (i.e. no instance_id keys used).
The returned surface(s) are **always** transformed to be in their correct vehicle position.
"""
surf = {} if not single_file else {"x": np.empty((0,), dtype=np.float32),
"y": np.empty((0,), dtype=np.float32),
"z": np.empty((0,), dtype=np.float32),
"tris": np.empty((0, 3), dtype=np.int32)}
## Loop through each class of component asked for
for part_class in class_set:
if part_class not in instances_by_class:
logging.warn("No parts of class '{}' to load in.".format(part_class))
continue
## Loop over each component instance of the class
for inst_id, model_path in instances_by_class[part_class].items():
try:
instance_data = _get_part(inst_id, model_path)
except ValueError:
## For now we will carry on. ACM file is unloadable (probably not present)
## Already logged error in _get_part()
continue
try:
g_path = join(stl_loc, instance_data["stl_name"])
geom = open_geom(g_path)
transform_geometry(geom, instance_data["trans"], 0.001)
logging.debug("Loaded instance {} stl_file {}".format(inst_id, g_path))
except (KeyError, IOError) as e:
## For now we will log and carry on but this may mean results are wrong.
msg = "Cannot load instance '{}' stl_file '{}'. Got: {}".format(inst_id, g_path, e)
logging.error(msg)
continue
if single_file:
## Increment the node refs in the new tris to point to appended position
geom["tris"] += len(surf["x"])
surf = {k: np.concatenate((surf[k], geom[k])) for k in ["x", "y", "z", "tris"]}
else:
## Store the new surface under the instance_id key
surf[inst_id] = geom
# Add in the part class explicitly; don't make TBs guess based on instance name.
surf[inst_id]['part_class'] = part_class
return surf
def transform_geometry(geom, trans_matrix=None, pre_trans_scale_factor=1.0):
"""
Scale and translate x, y, and z coords of ``geom`` **in place**.
The scaling in ``pre_trans_scale_factor`` is applied before the transform.
"""
scaled = {k: geom[k] * pre_trans_scale_factor for k in "xyz"}
if trans_matrix is None:
geom.update(scaled)
else:
nodes = np.vstack((scaled["x"], scaled["y"], scaled["z"], np.ones_like(geom["x"])))
nodes = np.dot(trans_matrix, nodes).T[:, :3]
geom.update({k: nodes[:, i] for i, k in enumerate("xyz")})
def component(prop_dict, trans, model_path, part_class, instance_id):
"""
Returns a dictionary of the properties requested in ``prop_dict for an instance of a class.
``trans`` is the transformation matrix to put this instance into its global vehicle position.
``model_path`` is the location of this instances .acm file.
"""
## Parse the XML file and extract the data fields using xpath
xr = etree.parse(model_path)
to_global = functools.partial(np.dot, trans)
d = {"datums": {}, "properties": {}, "model_metrics": {}}
try:
for prop_type, props in prop_dict.items():
if prop_type == "Datum":
for k in props:
dat_loc = xp_datum(xr, v=k)
if len(dat_loc) > 0:
loc = trans_mat_from_avm_xml(dat_loc[0])
else:
loc = 0
msg = "Problem retrieving '{}' called '{}' from '{}'.".format(prop_type, k,
model_path)
logging.error(msg)
raise ValueError(msg)
d["datums"][k] = {"local": loc, "global": to_global(loc)}
if prop_type == "Property" or prop_type == "PrimitiveProperty":
for k in props:
data_type = _get_data_type(prop_type, k, xr)
if data_type == "Real" or data_type == "" or \
data_type == "Integer":
unit = xp_propt_unit(xr, v=k)
if len(unit) > 0:
unit = unit[0]
if unit not in si_units:
try:
conv_fact = conv_dict[unit]
except:
logging.error("{}: {} in {} has an unknown unit: '{}'".format(
prop_type, k, part_class, unit))
conv_fact = 1.0
else:
conv_fact = 1.0
try:
orig_value = float(xp_propt(xr, v=k)[0].text)
except:
try:
orig_value = float(xp_propt_default(xr, v=k)[0].text)
except:
value_source = xp_propt_source(xr, v=k)[0]
mm_name = value_source.split(".")[-1]
msg = "{}: {} in {} has no fixed value.".format(prop_type, k,
part_class)
msg = msg + "Looking for a ModelMetric with name {}".format(mm_name)
logging.debug(msg)
orig_value, conv_fact = try_model_metric(prop_type, mm_name, xr,
part_class)
converted_value = orig_value * conv_fact
if unit not in si_units:
if unit in conv_units_dict.keys():
converted_unit = conv_units_dict[unit]
else:
converted_unit = unit
msg = "In component '{}' for field '{}' found non-SI unit --> {}[{}] converted to {}[{}]" \
.format(instance_id, k, orig_value, unit, converted_value,
converted_unit)
logging.debug(msg)
d["properties"][k] = converted_value
else:
logging.debug("Returning {}: {} in {} as a string as indicated by the "
"DataType, {}.".format(prop_type, k, part_class, data_type))
try:
d["properties"][k] = xp_propt(xr, v=k)[0].text
except:
d["properties"][k] = xp_propt_default(xr, v=k)[0].text
if prop_type == "ModelMetric":
for k in props:
data_type = _get_data_type(prop_type, k, xr)
if data_type == "Real" or data_type == "" or \
data_type == "Integer":
unit = xp_metric_unit(xr, v=k)
if len(unit) > 0:
unit = unit[0]
if unit not in si_units:
try:
conv_fact = conv_dict[unit]
except:
logging.error("{}: {} in {} has an unknown unit: '{}'".format(
prop_type, k, part_class, unit))
conv_fact = 1.0
else:
conv_fact = 1.0
orig_value = float(xp_metric(xr, v=k)[0].text)
converted_value = orig_value * conv_fact
if unit not in si_units:
if unit in conv_units_dict.keys():
converted_unit = conv_units_dict[unit]
else:
converted_unit = unit
msg = "In component '{}' for field '{}' found non-SI unit --> {}[{}] converted to {}[{}]" \
.format(instance_id, k, orig_value, unit, converted_value,
converted_unit)
logging.debug(msg)
d["model_metrics"][k] = converted_value
else:
logging.debug("Returning {}: {} in {} as a string as indicated by the "
"DataType, {}.".format(prop_type, k, part_class, data_type))
d["model_metrics"][k] = xp_metric(xr, v=k)[0].text
except KeyError:
msg = "Problem retrieving '{}' called '{}' from '{}'.".format(prop_type, k, model_path)
logging.error(msg)
raise ValueError(msg)
return d
## Pre-compiled xpath functions for extracting transform information
x_id = etree.XPath(r"//ChildMetric[@ComponentInstanceID = $inst]")
x_rot = etree.XPath(r"RotationMatrix//Column/@Value")
x_resource = etree.XPath(r"//ResourceDependency[@ID = 'cad.path']/@Name")
x_met_comp = etree.XPath(r"//MetricComponents//MetricComponent")
x_name = etree.XPath(r"@Name")
def _get_data_type(prop_type, k, xr):
"""
get the data_type for properties, primitive properties, and ModelMetrics
if it exists. If no DataType is specified return a blank string
"""
if prop_type == "Property":
try:
data_type = xp_propt_data_type(xr, v=k)[0]
except:
data_type = ""
if prop_type == "PrimitiveProperty":
try:
data_type = xp_propt_data_type(xr, v=k)[0]
except:
data_type = ""
if prop_type == "ModelMetric":
try:
data_type = xp_metric_data_type(xr, v=k)[0]
except:
data_type = ""
return data_type
def _get_part(instance, model_path):
"""
Return a list of part geometric information for ``instance`` with a .acm file at ``model_path``.
"""
part_info = None
try:
x_root = etree.parse(model_path)
except (KeyError, IOError) as error:
msg = "Unreadable ACM file at '{}' for instance '{}' exception was {}".format(model_path,
instance,
error)
logging.error(msg)
raise ValueError(msg)
stl_prefix = x_resource(x_root)[0][:-4]
name = x_name(x_root)[0]
#find where in the list component is
for child_num, child in enumerate(cad_metrics_xml_root.xpath(r"//ChildMetric")):
if child.get("ComponentInstanceID")[1:-1] == instance:
inst_num = child.get("MetricID")
for child_metric in x_met_comp(cad_metrics_xml_root):
if child_metric.get("MetricID") == str(inst_num):
length_unit = child_metric.xpath(r"Units/@Distance")[0]
try:
conv_factor = conv_dict[length_unit]
if length_unit != "m" or length_unit != "kg" or length_unit != "N" \
or length_unit != "kg*m^1":
converted_unit = conv_units_dict[length_unit]
msg = "In component '{}' for field 'Distance' found ".format(stl_prefix)
msg = msg + "non-SI unit --> {} converted to {}." \
.format(length_unit, converted_unit)
logging.debug(msg)
except KeyError:
logging.error("{} has an unknown unit for length: '{}'".format(
stl_prefix, length_unit))
conv_factor = 1.0
stl_suffix = "_asm.stl" if child_metric.get("Type") == "ASSEMBLY" else "_prt.stl"
part_dict = {"stl_name": child_metric.get("Name") + stl_suffix,
"instance": instance,
"conv_fac": conv_factor,
"name": name,
"index": inst_num}
part_info = part_dict
break
if part_info is None:
## Couldn't find the instance in the metrics file
msg = "No instance '{}' with cad name '{}' in metrics file.".format(instance, str(inst_num))
logging.error(msg)
raise ValueError(msg)
for child_metric in x_id(cad_metrics_xml_root, inst="{" + instance + "}"):
mat = np.eye(4, dtype=np.float32)
flat = [float(f) for f in x_rot(child_metric)]
mat[0, :3] = flat[0:3]
mat[1, :3] = flat[3:6]
mat[2, :3] = flat[6:9]
## Check if this is a genuine VU file
if cad_metrics_xml_root.getroot().get("VersionInfo"):
logging.debug("Transposing rotation matrix contained in genuine VU metrics XML file")
## Will recalculate a fresh matrix because the ones in the file usually include
## a scale component too
n_x = np.dot(mat[:3, :3], np.array([1, 0, 0]))
n_y = np.dot(mat[:3, :3], np.array([0, 1, 0]))
scale = part_info["conv_fac"]
tran = [float(child_metric.xpath(r"Translation/@" + k)[0]) * scale for k in "XYZ"]
part_info.update({"trans": matrix_from_nx_ny_ro(n_x, n_y, tran)})
return part_info
def get_class_req_num(required):
"""Assigned value for required number of classes to find. If not assigned set min_num to 0"""
# required = attr_intr["Required"]
try:
required
except NameError:
min_num = 1
## Set max_num to a high number so that any number of instances can be found
max_num = 1e3
## set min and max number of class to find in assembly
if "*" in required:
min_num = 0
## Set max_num to a high number so that any number of instances can be found
max_num = 1e3
elif "+" in required:
min_num = 1
## Set max_num to a high number so that any number of instances above the min can be found
max_num = 1e3
elif "," in required:
req_num = required.split(",")
min_num = int(req_num[0].split("{")[1])
max_num = int(req_num[1].split("}")[0])
else:
req_num = required.split("{")
min_num = max_num = int(req_num[1].split("}")[0])
return min_num, max_num
def try_model_metric(prop_type, k, xr, part_class):
"""
If a property has no value and the attribute ValueSource is defined,
check for a ModelMetric witht he value
"""
unit = xp_metric_unit(xr, v=k)
if len(unit) > 0:
unit = unit[0]
if unit not in si_units:
try:
conv_fact = conv_dict[unit]
except:
logging.error("{}: {} in {} has an unknown unit: '{}'".format(
prop_type, k, part_class, unit))
conv_fact = 1.0
else:
conv_fact = 1.0
orig_value = float(xp_metric(xr, v=k)[0].text)
return orig_value, conv_fact
def write_results(results_dict):
"""
Write ``results_dict`` to json file ``output_json_file`` and echo contents to log.
"""
with open(output_json_file, "w") as out_metrics:
json.dump(results_dict, out_metrics, indent=4)
msg = StringIO.StringIO()
with open(output_json_file, "r") as check:
for line in check:
msg.write(line)
logging.info("Wrote to json file:\n{}".format(msg.getvalue()))
def get_ground_pts():
"""
Using the supplied computed values xml, gets the three points in ground plane.
"""
xml_name = r"ComputedValues.xml"
if os.path.exists(xml_name):
xml_root = etree.parse(xml_name)
ground_pts = []
for child in xml_root.xpath(r"//ComplexMetric[@SubType = 'GROUND']/Metric"):
unit = child.get("Units")
conv_factor = conv_dict[unit]
plane_coords = child.get("ArrayValue").split(";")
x = float(plane_coords[0]) * conv_factor
y = float(plane_coords[1]) * conv_factor
z = float(plane_coords[2]) * conv_factor
ground_pts.append(np.array([x, y, z]))
return ground_pts
else:
raise ValueError(
"{} is missing. Cannot find up direction or vectors in the ground plane. The Test Bench must compute these values.".format(
xml_name))
def get_up_vector():
"""
Using the supplied computed values xml, gets the three points in the ground plane.
Computes two vectors in the ground plane and determines the normal vector, aka "up direction"
"""
ground_pts = get_ground_pts()
plane_vecs = np.array([ground_pts[1] - ground_pts[0],
ground_pts[2] - ground_pts[0]])
up_vec = np.cross(plane_vecs[0], plane_vecs[1])
# Return normalized vector of unit length
up_vec /= np.linalg.norm(up_vec)
# If ComputedValues.xml doesn't exist, get_ground_pts() will raise an exception
return up_vec, plane_vecs, ground_pts
def get_veh_mass():
"""Extract the vehicle mass from CAD assembly metrics file
Will fail or give weird results if the overall assembly metrics are not printed out before
info about indiv components.
"""
# The whole-assembly section is always the first MetricComponent in the file (ID=1)
# TODO: consolidate into single xpath expression
assembly_node = cad_metrics_xml_root.xpath(r"//MetricComponent")[0]
mass = assembly_node.xpath("./Scalars/Scalar[@Name='Mass']/@Value")[0]
return float(mass)
def get_veh_cg():
"""Extract the vehicle CG from CAD assembly metrics file
Will fail or give weird results if the overall assembly metrics are not printed out before
info about indiv components."""
assembly_node = cad_metrics_xml_root.xpath(r"//MetricComponent")[0]
cg = assembly_node.xpath("./CG")[0]
return np.array([float(cg.get("X")),
float(cg.get("Y")),
float(cg.get("Z"))]) | {
"content_hash": "dfe593a65fe0d3300a289852f571037f",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 135,
"avg_line_length": 42.274924471299094,
"alnum_prop": 0.5221539341099121,
"repo_name": "pombredanne/metamorphosys-desktop",
"id": "826bf3aafd19375714dfade5a5cd4700b1f4da95",
"size": "27986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metamorphosys/META/analysis_tools/PYTHON_RICARDO/rpl/tools/api/test_bench_api.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10683"
},
{
"name": "Assembly",
"bytes": "117345"
},
{
"name": "Awk",
"bytes": "3591"
},
{
"name": "Batchfile",
"bytes": "228118"
},
{
"name": "BitBake",
"bytes": "4526"
},
{
"name": "C",
"bytes": "3613212"
},
{
"name": "C#",
"bytes": "11617773"
},
{
"name": "C++",
"bytes": "51448188"
},
{
"name": "CMake",
"bytes": "3055"
},
{
"name": "CSS",
"bytes": "109563"
},
{
"name": "Clojure",
"bytes": "37831"
},
{
"name": "Eagle",
"bytes": "3782687"
},
{
"name": "Emacs Lisp",
"bytes": "8514"
},
{
"name": "GAP",
"bytes": "49124"
},
{
"name": "Groff",
"bytes": "2178"
},
{
"name": "Groovy",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "4025250"
},
{
"name": "Inno Setup",
"bytes": "35715"
},
{
"name": "Java",
"bytes": "489537"
},
{
"name": "JavaScript",
"bytes": "167454"
},
{
"name": "Lua",
"bytes": "1660"
},
{
"name": "Makefile",
"bytes": "97209"
},
{
"name": "Mathematica",
"bytes": "26"
},
{
"name": "Matlab",
"bytes": "80874"
},
{
"name": "Max",
"bytes": "78198"
},
{
"name": "Modelica",
"bytes": "44541139"
},
{
"name": "Objective-C",
"bytes": "34004"
},
{
"name": "Perl",
"bytes": "19285"
},
{
"name": "PostScript",
"bytes": "400254"
},
{
"name": "PowerShell",
"bytes": "19749"
},
{
"name": "Processing",
"bytes": "1477"
},
{
"name": "Prolog",
"bytes": "3121"
},
{
"name": "Protocol Buffer",
"bytes": "58995"
},
{
"name": "Python",
"bytes": "5517835"
},
{
"name": "Ruby",
"bytes": "4483"
},
{
"name": "Shell",
"bytes": "956773"
},
{
"name": "Smarty",
"bytes": "37892"
},
{
"name": "TeX",
"bytes": "4183594"
},
{
"name": "Visual Basic",
"bytes": "22546"
},
{
"name": "XSLT",
"bytes": "332312"
}
],
"symlink_target": ""
} |
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from Products.Archetypes.Registry import registerWidget
from Products.Archetypes.Widget import TypesWidget
from Products.CMFCore.utils import getToolByName
class AnalysisRequestTemplatesView(BikaListingView):
""" BIKA listing to display ARTemplates for an SRTemplate.
"""
def __init__(self, context, request):
super(AnalysisRequestTemplatesView, self).__init__(context, request)
self.catalog = "bika_setup_catalog"
self.context_actions = {}
self.base_url = self.context.absolute_url()
self.view_url = self.base_url
self.show_sort_column = False
self.show_select_row = False
self.show_select_all_checkbox = False
self.show_column_toggles = False
self.show_select_column = True
self.show_categories = True
self.expand_all_categories = True
self.pagesize = 50
self.title = self.context.translate(_("AR Templates"))
self.icon = self.portal_url + "/++resource++bika.lims.images/artemplate_big.png"
self.form_id = "artemplates"
self.columns = {
'title': {
'title': _('AR Template Title'),
'index': 'sortable_title',
'sortable': True,
},
'SamplePoint': {
'title': _('Sample Point'),
'index': 'sortable_title',
'sortable': True,
},
'SampleType': {
'title': _('Sample Type'),
'index': 'sortable_title',
'sortable': True,
},
'Composite': {
'title': _('Composite Y/N'),
'index': 'sortable_title',
'sortable': True,
},
'ContainerTitle': {
'title': _('Container Title'),
'index': 'sortable_title',
'sortable': True,
},
'ContainerVolume': {
'title': _('Container Volume'),
'index': 'sortable_title',
'sortable': True,
},
'Preservation': {
'title': _('Preservation'),
'index': 'sortable_title',
'sortable': True,
},
'Sampler': {
'title': _('Sampler'),
'sortable': True,
},
'PreparationMethod': {
'title': _('Preservation Method'),
'index': 'sortable_title',
'sortable': True,
},
}
self.review_states = [
{'id': 'default',
'title': _('Active'),
'contentFilter': {'inactive_state': 'active'},
'transitions': [{'id': 'deactivate'}, ],
'columns': ['title',
'SamplePoint',
'SampleType',
'Composite',
'ContainerTitle',
'ContainerVolume',
'Preservation',
#'Sampler',
'PreparationMethod']},
{'id': 'inactive',
'title': _('Dormant'),
'contentFilter': {'inactive_state': 'inactive'},
'transitions': [{'id': 'activate'}, ],
'columns': ['title',
'SamplePoint',
'SampleType',
'Composite',
'ContainerTitle',
'ContainerVolume',
'Preservation',
#'Sampler',
'PreparationMethod']},
{'id': 'all',
'title': _('All'),
'contentFilter': {},
'columns': ['title',
'SamplePoint',
'SampleType',
'Composite',
'ContainerTitle',
'ContainerVolume',
'Preservation',
#'Sampler',
'PreparationMethod']},
]
def contentsMethod(self, contentFilter):
return self.context.getARTemplates()
def _buildFromPerPartition(self, item, partition):
"""
This function will get the partition info and then it'll write the container and preservation data
to the dictionary 'item'
:param item: a dict which contains the ARTeplate data columns
:param partition: a dict with some partition info
:return: the item dict with the partition's data
"""
uc = getToolByName(self, 'uid_catalog')
container = uc(UID=partition.get('container_uid', ''))
preservation = uc(UID=partition.get('preservation_uid', ''))
if container:
container = container[0].getObject()
item['ContainerTitle'] = container.title
item['replace']['ContainerTitle'] = "<a href='%s'>%s</a>" % \
(container.absolute_url(), item['ContainerTitle'])
item['ContainerVolume'] = container.getCapacity()
else:
item['ContainerTitle'] = ''
item['ContainerVolume'] = ''
if preservation:
preservation = preservation[0].getObject()
item['Preservation'] = preservation.title
item['replace']['Preservation'] = "<a href='%s'>%s</a>" % \
(preservation.absolute_url(), item['Preservation'])
else:
item['Preservation'] = ''
item['PreparationMethod'] = ''
return item
def folderitems(self):
items = BikaListingView.folderitems(self)
new_items = []
for item in items:
if not item.has_key('obj'): continue
obj = item['obj']
# Updating some ARTemplate columns
title_link = "<a href='%s'>%s</a>" % (item['url'], item['title'])
item['replace']['title'] = title_link
if obj.getSamplePoint():
item['SamplePoint'] = obj.getSamplePoint().title
item['replace']['SamplePoint'] = "<a href='%s'>%s</a>" % \
(obj.getSamplePoint().absolute_url(), item['SamplePoint'])
else:
item['SamplePoint'] = ''
if obj.getSamplePoint():
item['SamplePoint'] = obj.getSamplePoint().title
item['replace']['SamplePoint'] = "<a href='%s'>%s</a>" % \
(obj.getSamplePoint().absolute_url(), item['SamplePoint'])
else:
item['SamplePoint'] = ''
if obj.getSampleType():
item['SampleType'] = obj.getSampleType().title
item['replace']['SampleType'] = "<a href='%s'>%s</a>" % \
(obj.getSampleType().absolute_url(), item['SampleType'])
else:
item['SampleType'] = ''
item['Composite'] = obj.getComposite()
img_url = '<img src="'+self.portal_url+'/++resource++bika.lims.images/ok.png"/>'
item['replace']['Composite'] = img_url if obj.getComposite() else ' '
partitions = obj.getPartitions()
for partition in partitions:
c_item = item.copy()
# We ave to make a copy of 'replace' because it's a reference to a dict object
c_item['replace'] = item['replace'].copy()
# Adding the partition info
c_item = self._buildFromPerPartition(c_item, partition)
# Adding the ARTemplate item to the future list to display
new_items.append(c_item)
return new_items
| {
"content_hash": "bb86ea2396fa2c3db1bf2868f799fe48",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 106,
"avg_line_length": 41.60732984293194,
"alnum_prop": 0.48257203976343277,
"repo_name": "hocinebendou/bika.gsoc",
"id": "89051e4769fc1f306f605da5ba041a6bcc858898",
"size": "7947",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bika/lims/browser/srtemplate/artemplates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
"""
Created on Sun Apr 19 23:31:19 2015
@author: willy
"""
import numpy as np
from scipy.integrate import quad
import scipy
import os
import matplotlib.pyplot as plt
from scipy.stats import kstest
from scipy.stats import chisquare
from mpl_toolkits.mplot3d.axes3d import Axes3D
class MSPSCModel:
"""
This class allows to compute the pdf, the cdf as well as the number of
differences under the Multiple Step Population Size Model.
It needs the values of t_k (the moments when population sizes change)
the values of \lambda_k (factors of population size chances or the IICR)
and \theta (the scaled mutation rate per base multiplied by the length
of the sequence)
"""
def __init__(self, t_k_values, lambda_k_values, theta):
self.t_k_values = t_k_values
self.lambda_k_values = lambda_k_values
self.theta = theta
def pdf(self, t, tuple_of_params=0):
if tuple_of_params != 0:
t_list = tuple_of_params[0]
lambda_list = tuple_of_params[1]
else:
t_list = self.t_k_values
lambda_list = self.lambda_k_values
# find the term_n
term_n = len(t_list) - len(np.where(t_list-t>0)[0]) - 1
exponent = -sum(np.true_divide(t_list[1:term_n+1]-t_list[:term_n], lambda_list[:term_n]))
exponent -= np.true_divide(t-t_list[term_n], lambda_list[term_n])
return np.true_divide(np.exp(exponent), lambda_list[term_n])
def cdf(self, t, tuple_of_params):
t_list = tuple_of_params[0]
lambda_list = tuple_of_params[1]
# find the term_n
term_n = len(t_list) - len(np.where(t_list-t>0)[0]) - 1
exponent = -np.sum(np.true_divide(t_list[1:term_n+1]-t_list[:term_n], lambda_list[:term_n]))
exponent -= np.true_divide(t-t_list[term_n], lambda_list[term_n])
return 1-np.exp(exponent)
def compute_factors_vector(self, t_list, lambda_list):
# Computes the factor that will multiply the integral I_k in
# de developpment of the integral as a sum of integrals
t_k = np.array(t_list)
lambda_k = np.array(lambda_list)
temp_vector = np.true_divide(t_k[1:]-t_k[:-1], lambda_k[:-1])
temp_vector2 = np.true_divide(t_k, lambda_k)
exponent = -np.cumsum(temp_vector) + temp_vector2[1:]
temp_result = np.ones(len(t_k))
temp_result[1:] = np.exp(exponent)
return np.true_divide(temp_result, lambda_k)
def compute_dict_integral(self, t_list, lambda_list, k_max, theta):
# Computes all the values for the integrals and returns them in a dictionnary
dict_integrals = {}
# Compute the all the integrals for every interval [t_n, t_n+1]
for i in range(len(t_list)-1):
c = 2*theta + np.true_divide(1, lambda_list[i])
dict_integrals[(t_list[i], 0)] = np.true_divide(np.exp(-c*t_list[i])-np.exp(-c*t_list[i+1]),c)
for k in range(1, k_max+1):
# We use the recursive formula for finding the other values
dict_integrals[t_list[i], k] = np.true_divide(t_list[i]**k*np.exp(-c*t_list[i])
-t_list[i+1]**k*np.exp(-c*t_list[i+1])+k*dict_integrals[(t_list[i], k-1)],c)
# Now we compute the value for the last intervall [t_n, +infinity]
c = 2*theta + np.true_divide(1, lambda_list[-1])
dict_integrals[(t_list[-1], 0)] = np.true_divide(np.exp(-c*t_list[-1]),c)
for k in range(1, k_max+1):
dict_integrals[t_list[-1], k] = np.true_divide(t_list[-1]**k*np.exp(-c*t_list[-1])
+k*dict_integrals[(t_list[-1], k-1)],c)
return dict_integrals
def function_F(self,t_list, lambda_list, k_max, theta):
temp_F = np.zeros(k_max+1)
factors = self.compute_factors_vector(t_list, lambda_list)
dict_integrals = self.compute_dict_integral(t_list, lambda_list, k_max, theta)
for k in range(k_max+1):
integrals_list = np.array([dict_integrals[(i, k)] for i in t_list])
temp_F[k] = np.true_divide(np.power(2*theta, k)*np.dot(factors, integrals_list), np.math.factorial(k))
return temp_F
def integrand_prob_Nk(self, t, k, theta, density_f, tuple_of_params):
return np.exp(-2*theta*t)*np.power(t,k)*density_f(t, tuple_of_params)
def prob_Nk(self, k, theta, density_f, tuple_of_params):
# Here the parameters for the density f are passed in a tuple
integral = quad(self.integrand_prob_Nk, 0, float('inf'), args=(k, theta, density_f, tuple_of_params))[0]
return np.true_divide(np.power(2*theta,k)*integral, scipy.math.factorial(k))
def log_likelihood(self, count_k, theta, density_f, tuple_of_params):
# First we compute the vector P(N=k) for every k
p_Nk = [self.prob_Nk(i, theta, density_f, tuple_of_params) for i in range(len(count_k))]
return np.dot(count_k, np.log(p_Nk))
## This part is for making some test of the MLE strategy ####
def log_likelihood_NMut_MPSC(self, count_k, theta, t_list, lambda_list):
# We supose that t_list[0] > 1. We will add the value 0 at the begining of
# t_list and the value 1 at the begining of lambda_list in order to start
# at the present. We do this because we assume that we always start at time
# 0 with lambda=1
# Verify that all times and lambda are positive and that times are increassing
t_list = np.array(t_list)
lambda_list = np.array(lambda_list)
if sum(lambda_list>0)<len(lambda_list):
return float('-inf')
elif sum((t_list[1:]-t_list[:-1])>0) < (len(t_list)-1): # t_list is not increasing
return float('-inf')
elif min(t_list)<0:
return float('-inf')
else:
t_k = np.array([0]+list(t_list))
lambda_k = np.array([1]+list(lambda_list))
prob_Nk = self.function_F(t_k, lambda_k, len(count_k)-1, theta)
return np.dot(count_k, np.log(prob_Nk))
def plot_log_likelihood_NMut_MPSC(self, n_obs, theta, t_list, lambda_list, pos_v1=0, pos_v2=False, domain_v1 = np.arange(0.02, 20, 0.01), domain_v2 = np.arange(0.02, 20, 0.01)):
# Plot the likelihood of the Number of Mutations as a function of 1 or 2
# variables. The variable to take are population sizes at some time intervall
# (i.e. positions of the lambda_list vector)
'''
n_obs = 10000
theta = 0.5
t_list = np.array([0.01, 0.05, 0.1 , 0.3 , 0.5 ])
lambda_list = np.array([2. , 0.5, 0.1, 2. , 1. ])
pos_v1=0
pos_v2=1
domain_v1 = np.arange(0.2, 20, 0.1)
domain_v2 = np.arange(0.2, 10, 0.1)
'''
# First we compute the theoretical pmf (we assume that no more than 100
# differences are present)
prob_Nk = self.function_F(t_list, lambda_list, 100, theta)
# Construct the observed dataset
obs = prob_Nk * n_obs
# top = sum(obs>1)
count_k = obs[:sum(obs>1)]
fig = plt.figure()
if pos_v2:
# We plot a surface
X, Y = np.meshgrid(domain_v1, domain_v2)
nb_of_rows = len(domain_v2)
nb_of_columns = len(domain_v1)
Z = np.zeros([nb_of_rows, nb_of_columns])
for i in range(nb_of_rows):
lambda_variable = [v for v in lambda_list]
lambda_variable[pos_v2] = domain_v2[i]
for j in range(nb_of_columns):
lambda_variable[pos_v1] = domain_v1[j]
Z[i][j] = self.log_likelihood_NMut_MPSC(count_k, theta, t_list, np.array(lambda_variable))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)
#ax.plot_surface(X, Y, Z, rstride=4, cstride=4)
#ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4)
#ax.plot_surface(X, Y, Z)
else:
# We plot a function of one variable
X = domain_v1
Y = np.zeros(len(X))
lambda_variable = np.array([v for v in lambda_list])
for i in range(len(domain_v1)):
lambda_variable[pos_v1] = domain_v1[i]
Y[i] = self.log_likelihood_NMut_MPSC(count_k, theta, t_list, np.array(lambda_variable))
ax = fig.add_subplot(1,1,1)
ax.plot(X,Y)
plt.show()
def test_MLE_from_theory(self, n_obs, theta, t_list, lambda_list):
# For a give picewise function lambda_k we can write the probability
# of getting k differences (mutations) in a loci (ie P(N=k))
# Here, from the theoretical distribution, we compute the data that we
# should observe under the given parameters. Then, we use this data for
# finding the lambda_k values using a MLE estrategy.
# First we compute the theoretical pmf (we assume that no more than 100
# differences are present)
prob_Nk = self.function_F(t_list, lambda_list, 100, theta)
# Construct the observed dataset
obs = np.round(prob_Nk * n_obs)
# top = sum(obs>1)
count_k = obs[:sum(obs>1)]
# Define the objetive function (here, we used the right t values and we
# estimate the lambda_list values)
obj_f = lambda x: -self.log_likelihood_NMut_MPSC(count_k, theta, t_list, x)
x0 = np.ones(len(t_list))
res_basinh = scipy.optimize.basinhopping(obj_f, x0, niter=1000, T=2)
res_NM = scipy.optimize.minimize(obj_f, x0, method='Nelder-Mead')
res_Powell = scipy.optimize.minimize(obj_f, x0, method='Powell')
res_CG = scipy.optimize.minimize(obj_f, x0, method='CG')
res_BFGS = scipy.optimize.minimize(obj_f, x0, method='BFGS')
#res_NewtonCG = scipy.optimize.minimize(obj_f, x0, method='Newton-CG')
dict_result = {'ci':count_k, 'basinh': res_basinh, 'NM': res_NM, 'Powell': res_Powell,
'CG': res_CG, 'BFGS':res_BFGS}
return dict_result
def MLE_SSPSC_NMut(self, count_k, theta):
# Assuming a SSPSC model, estimate the parameters (alpha, T) by a maximum
# likelihood approach
obj_f = lambda x: -self.log_likelihood_NMut_MPSC(count_k, theta, np.array([x[0]]), np.array([x[1]]))
x0 = np.array([1, 1])
res_basinh = scipy.optimize.basinhopping(obj_f, x0)
res_NM = scipy.optimize.minimize(obj_f, x0, method='Nelder-Mead')
dict_result = {'basinh': res_basinh, 'NM': res_NM}
return dict_result
def test_MLE_SSPSC_NMut_theory(self, theta, alpha, T, n_obs=10000, max_ndif=100):
# This is for testing the accuracy of the MLE strategy
# The dataset is built from the theoretical distribution function
# First we compute the theoretical pmf (we assume that no more than 100
# differences are present)
prob_Nk = self.function_F(np.array([0, T]), np.array([1, alpha]), max_ndif, theta)
# Construct the observed dataset
obs = np.round(prob_Nk * n_obs)
# top = sum(obs>1)
count_k = obs[:sum(obs>1)]
return self.MLE_SSPSC_NMut(count_k, theta)
def MLE_MSPSC_NMut(self, count_k, theta, t_list0, lambda_list0,
fixed_T= False):
# Assuming a MSPSC model (Multiple Step Population Size Change) model
# we estimate the parameters (list_T, list_alpha) by a maximum
# likelihood approach. Here we have as many values of list_T as
# population size changes. The same is for list_lambda
# Here, the values of t_list0 and lambda_list0 are starting points.
if fixed_T:
obj_f = lambda x: -self.log_likelihood_NMut_MPSC(count_k, theta, t_list0, np.array(x))
x0 = lambda_list0
else:
obj_f = lambda x: -self.log_likelihood_NMut_MPSC(count_k, theta, np.array(x[:len(x)/2]), np.array(x[len(x)/2:]))
x0 = list(t_list0) + list(lambda_list0)
#obj_f = lambda x: -self.log_likelihood_NMut_MPSC(count_k, theta, np.array(fixed_t_list), np.array([x]))
#x0 = list(lambda_list0)
res_basinh = scipy.optimize.basinhopping(obj_f, x0)
#res_NM = scipy.optimize.minimize(obj_f, x0, method='Nelder-Mead')
#dict_result = {'basinh': res_basinh, 'NM': res_NM}
dict_result = {'basinh': res_basinh}
return dict_result
##### This is for the ms commands
class MSTester:
def create_ms_command(self, n_obs, t_list, lambda_list):
# We assume that t_list[0] is always 0 and lambda_list[0] is always 1
ms_command_base = 'ms 2 {} -T -L'.format(n_obs)
demographic_events = ['-eN {} {}'.format(t_list[i], lambda_list[i]) for i in range(1, len(t_list))]
return '{} {}'.format(ms_command_base, ' '.join(demographic_events))
def create_ms_command_NMut(self, n_obs, theta, t_list, lambda_list):
ms_command_base = 'ms 2 {} -t {}'.format(n_obs, theta)
demographic_events = ['-eN {} {}'.format(t_list[i], lambda_list[i]) for i in range(1, len(t_list))]
return '{} {}'.format(ms_command_base, ' '.join(demographic_events))
def generate_T2_ms(self, ms_command, path2ms='./utils'):
obs_text = os.popen(os.path.join(path2ms, ms_command)).read()
obs_text = obs_text.split('time')[1:]
obs = [float(i.split('\t')[1]) for i in obs_text]
return obs
def generate_NMut_ms(self, ms_command, path2ms='./utils'):
obs_text = os.popen(os.path.join(path2ms, ms_command)).read()
obs_text = obs_text.split('segsites: ')[1:]
obs = [int(i.split('\n')[0]) for i in obs_text]
return obs
def compare_cdf_MPSC_MS(self, array_t, n_obs, t_list, lambda_list):
# Given some history (t_list, lambda_list), we compare the theoretical
# cumulative distribution with the empirical distribution from MS
# t_list[0] = 0 and lambda_list[0] = 1
# Create a MSPSCModel object
V_to_test = MSPSCModel(t_list, lambda_list, 1)
# First we simulate the data
t_list_ms = np.true_divide(t_list, 2)
ms_command = self.create_ms_command(n_obs, t_list_ms, lambda_list)
obs_T2 = self.generate_T2_ms(ms_command)
#obs_T2 = np.true_divide(obs_T2, 2)
obs_T2 = np.array(obs_T2)*2
# We set the values of t that will be used for comparing
# Here we may need to extend the limit of array_t in order to get all
# the observed values of T2 lower than array_t[-1]
delta = array_t[-1]-array_t[-2]
#print max(obs_T2)
array_t[-1] = max(array_t[-1], max(obs_T2)+delta)
histogram = np.histogram(obs_T2, bins=array_t)
emp_cdf = np.zeros(len(array_t))
emp_cdf[1:] = np.cumsum(histogram[0])
emp_cdf = np.true_divide(emp_cdf, n_obs)
theor_cdf = np.array([V_to_test.cdf(i, (t_list, lambda_list)) for i in array_t])
# Now we plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(array_t, theor_cdf, label='theory')
ax.plot(array_t, emp_cdf, label='empirical')
ax.set_ylim(0, 1.5)
plt.legend()
plt.show()
t = (t_list, lambda_list)
f_test = lambda at: np.array([V_to_test.cdf(i, t) for i in at])
#f_test = lambda x : cdf_MPSC(x, t)
print kstest(obs_T2, f_test)
print 'Doing 100 times the KS test ...'
rejections_count = 0
for i in xrange(100):
obs_T2 = self.generate_T2_ms(ms_command)
obs_T2 = np.array(obs_T2)*2
if kstest(obs_T2, f_test)[1]<0.05: rejections_count+=1
print 'The number of rejections was {}'.format(rejections_count)
def compare_cdf_NMut_MPSC(self, n_obs, theta, t_list, lambda_list,
n_rep_chi2=100):
# Compare the theoretical with the empirical distribution
# (from ms simulations)
# of the number of differences
msc = self.create_ms_command_NMut(n_obs, 2*theta, np.true_divide(np.array(t_list),2), lambda_list)
obs = self.generate_NMut_ms(msc)
# Make the histogram for the observed data
b = np.arange(-0.5, max(obs)+0.5, 1)
h = np.histogram(obs, bins=b)[0]
# Compute the theoretical distribution
#prob_Nk = function_F(np.array(t_list)*2, lambda_list, max(obs), np.true_divide(theta,2))
model = MSPSCModel(t_list, lambda_list, theta)
prob_Nk = model.function_F(t_list, lambda_list, max(obs), theta)
# Make the plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.hist(obs, bins=b, color='g')
y = np.array(prob_Nk)*n_obs
ax.plot(b+0.5, y, 'bo')
plt.show()
# Now do a chisquare test
top = sum(h>5)
#top = 2
emp_dist = np.zeros(top+1)
emp_dist[:-1] = h[:top]
emp_dist[-1] = n_obs - sum(emp_dist)
theor_dist = np.zeros(top+1)
theor_dist[:-1] = np.round(prob_Nk[:top]*n_obs)
theor_dist[-1] = n_obs-sum(theor_dist)
print 'Empirical and Theoretical distributions'
print (emp_dist, theor_dist)
print 'Chisquare test result'
test_result = chisquare(emp_dist, theor_dist)
print test_result
print("Doing a chisquare test {} times and counting the number of\
rejections...".format(n_rep_chi2))
rejections_count = 0
for i in xrange(n_rep_chi2):
obs = self.generate_NMut_ms(msc)
b = np.arange(-0.5, max(obs)+0.5, 1)
h = np.histogram(obs, bins=b)[0]
prob_Nk = model.function_F(t_list, lambda_list, max(obs), theta)
top = sum(h>5)
emp_dist = np.zeros(top+1)
emp_dist[:-1] = h[:top]
emp_dist[-1] = n_obs - sum(emp_dist)
theor_dist = np.zeros(top+1)
theor_dist[:-1] = np.round(prob_Nk[:top]*n_obs)
theor_dist[-1] = n_obs-sum(theor_dist)
if chisquare(emp_dist, theor_dist)[1]<0.05:
rejections_count+=1
print 'We rejected {} times'.format(rejections_count)
| {
"content_hash": "d2a1a6ed6688be3c1c4825d720fd0380",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 181,
"avg_line_length": 44.43127962085308,
"alnum_prop": 0.5714666666666667,
"repo_name": "willyrv/DHNDiff",
"id": "31c169f9c94a5c5cf3525e4bcc65aa9468b540a2",
"size": "18774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MSPSC_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18774"
}
],
"symlink_target": ""
} |
import pyaudio
import wave
import numpy as np
import time
from filter_method import filter_signal
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 192000
CHUNK = 1024 # Buffer size
print("Recording")
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True, frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK)*5):
data = stream.read(CHUNK)
frames.append(data)
# Stop the stream
stream.stop_stream()
stream.close()
audio.terminate()
print("Saving")
waveFile = wave.open("rec.wav", 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
print("Done Recording")
filter_signal()
while True:
time.sleep(1)
| {
"content_hash": "3bfac2fa5b3c08dc90e4bb639b0b4ae3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 67,
"avg_line_length": 18.795454545454547,
"alnum_prop": 0.720677146311971,
"repo_name": "labseven/SigsysFinalProject",
"id": "9414d5048688e489de6d7dea4fab5bff21786a88",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recieve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17085"
}
],
"symlink_target": ""
} |
"""Test the importmulti RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ImportMultiTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| {
"content_hash": "9560c41195bd6a99fa0a7cbde07fcc17",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 137,
"avg_line_length": 48.152466367713004,
"alnum_prop": 0.6136151983609611,
"repo_name": "Rav3nPL/bitcoin",
"id": "a691595f15116f94c635dbc1b519b7f60a4b9433",
"size": "21690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/importmulti.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "693313"
},
{
"name": "C++",
"bytes": "5029049"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "188588"
},
{
"name": "Makefile",
"bytes": "109409"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1146190"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "53022"
}
],
"symlink_target": ""
} |
from dal.test import case, stories
from dal_select2.test import Select2Story
from .models import TestModel
class AdminOneToOneTestCase(Select2Story, case.AdminMixin, case.OptionMixin,
case.AutocompleteTestCase):
field_name = 'test'
inline_related_name = 'inline_test_models'
model = TestModel
def setUp(self):
super(AdminOneToOneTestCase, self).setUp()
self.get(url=self.get_modeladmin_url('add'))
def test_can_create_option_on_the_fly(self):
story = stories.CreateOption(self)
self.enter_text('#id_name', 'special %s' % self.id())
name = 'new option %s' % self.id()
story.create_option(name)
story.assert_value(self.model.objects.get(name=name).pk)
self.assertIn(name, story.get_label())
story.submit()
story.assert_value(self.model.objects.get(name=name).pk)
story.assert_label(name)
| {
"content_hash": "bbab1b951eb744d140f9c1937411bf19",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 28.303030303030305,
"alnum_prop": 0.6488222698072805,
"repo_name": "luzfcb/django-autocomplete-light",
"id": "6c20be3302814159104c02b302fa928bd52ef59e",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_one_to_one/test_functional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "166"
},
{
"name": "HTML",
"bytes": "4331"
},
{
"name": "JavaScript",
"bytes": "7815"
},
{
"name": "Python",
"bytes": "146531"
},
{
"name": "Shell",
"bytes": "1031"
}
],
"symlink_target": ""
} |
"""
Use regular expressions to obtain serial number, vendor, model, os_version, and uptime from show
version output.
"""
import re
def read_file(filename):
"""Read filename; return contents as one string."""
with open(filename) as my_file:
return my_file.read()
class NetDeviceInventory(object):
"""Parse show version, retain attributes"""
def __init__(self, show_ver):
self.show_ver = show_ver
self.serial_number = ''
self.vendor = ''
self.model = ''
self.os_version = ''
self.uptime = ''
self.find_serial_number()
self.find_vendor()
self.find_model()
self.find_os_version()
self.find_uptime()
def find_serial_number(self):
"""
Find the serial number in show version output.
Example: Processor board ID FTX1512038X
"""
match = re.search(r"Processor board ID (.*)", self.show_ver)
if match:
self.serial_number = match.group(1)
def find_vendor(self):
"""
Example:
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.4(2)T1, RELEASE...
"""
match = re.search(r"Cisco IOS Software", self.show_ver)
if match:
self.vendor = 'Cisco'
def find_model(self):
"""
Example:
Cisco 881 (MPC8300) processor (revision 1.0) with 236544K/25600K bytes of memory.
"""
match = re.search(r"^Cisco (.*?) .* bytes of memory.$", self.show_ver, flags=re.M)
if match:
self.model = match.group(1)
def find_os_version(self):
"""
Example:
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.4(2)T1, RELEASE...
"""
match = re.search(r"Cisco IOS Software.* Version (.*), .*", self.show_ver)
if match:
self.os_version = match.group(1)
def find_uptime(self):
"""
Example:
pynet-rtr1 uptime is 3 weeks, 1 day, 3 hours, 52 minutes
"""
match = re.search(r".* uptime is (.*)", self.show_ver)
if match:
self.uptime = match.group(1)
def main():
"""
Use regular expressions to obtain serial number, vendor, model, os_version, and uptime from
show version output.
"""
my_file = "show_version.txt"
show_ver = read_file(my_file)
net_dev_obj = NetDeviceInventory(show_ver)
net_dev_obj.find_serial_number()
net_dev_obj.find_vendor()
net_dev_obj.find_model()
net_dev_obj.find_os_version()
net_dev_obj.find_uptime()
print
print "Vendor: {}".format(net_dev_obj.vendor)
print "Model: {}".format(net_dev_obj.model)
print "OS Version: {}".format(net_dev_obj.os_version)
print "Serial Number: {}".format(net_dev_obj.serial_number)
print "Uptime: {}".format(net_dev_obj.uptime)
print
if __name__ == "__main__":
main()
| {
"content_hash": "bc462ba2a57aa16a65b4b58e8fb5a7af",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 97,
"avg_line_length": 29.565656565656564,
"alnum_prop": 0.579091219678852,
"repo_name": "ktbyers/pynet_ons",
"id": "62163de5493579c4a2e2d53f430628342dfb78fe",
"size": "2949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day4/show_version.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "123"
},
{
"name": "Python",
"bytes": "44216"
}
],
"symlink_target": ""
} |
import math
import operator
import serial
class AM03127():
"""Runs a AM03127-based signboard (http://www.amplus.com.hk/ aka Maplin N00GA)"""
special_map = {
u'\n': ' ',
u'\r': '',
u'<': '<UBC>',
u'>': '<UBE>'
}
def __init__ (self, signport=None, baud=None, signid=None):
default_signport = "/dev/ttyUSB0"
default_baud = 9600
default_signid = 1
if not signport:
signport = default_signport
if not baud:
baud = default_baud
if not signid:
signid = 1
self.signport = signport
self.baud = baud
self.signid = signid
def isAsciiRange (self, c, first, last) :
if type(c) != str or len(c) != 1 :
return False
if ord(c) < ord(first) or ord(c) > ord(last) :
return False
return True
def encodeCharset (self, unicode_str) :
s = ''
i = iter(unicode(unicode_str))
for u in i :
if u == '\033' :
s = s + '<' + i.next() + i.next() + '>'
elif u in self.special_map :
s = s + self.special_map[u]
else :
s = s + u.encode('cp1252')
return s
def sendPageMessage (self, line=1, page='A', lead=None, disp='A', wait=5, lag=None, msg='') :
default_lead_lag = 'E'
if not lead :
lead = default_lead_lag
if not lag :
lag = default_lead_lag
rmsg = u''.join (map (unicode, msg))
fmsg = self.encodeCharset(rmsg)
if line < 1 or line > 8 :
raise RuntimeError ('Line must be in range 1..8')
if not self.isAsciiRange (page, 'A', 'Z') :
raise RuntimeError ('Page must be in range A..Z')
if not self.isAsciiRange (lead, 'A', 'S') :
raise RuntimeError ('Lead must be in range A..S')
if not (disp in 'ABCDEQRSTUabcdeqrstu') :
raise RuntimeError ('Display must be one of {ABCDEQRSTUabcdeqrstu}')
if not self.isAsciiRange (wait, 'A', 'Z') :
raise RuntimeError ('Waittime must be in range A..Z (A=0.5 sec)')
if not self.isAsciiRange (lag, 'A', 'S') :
raise RuntimeError ('Lag must be in range A..S')
return '<L%d><P%c><F%c><M%c><W%c><F%c>'%(line, page, lead, disp, wait, lag) + fmsg
def setBrightness (self, brightness) :
default_brightness='D'
if not brightness :
brightness = default_brightness
if not self.isAsciiRange(brightness, 'A', 'D') :
raise RuntimeError('Brightness must be in range A..D (100%..25%)')
return '<B%c>'%(brightness)
def displayMessage (self, line=1, page='A', lead=None, disp='A', wait=5, lag=None, msg='', brightness='A') :
packets = []
data = self.sendPageMessage (line, page, lead, disp, wait, lag, msg)
packets.append (self.setBrightness(brightness))
packets.append (data)
self.sendPackets (packets)
def encodeMessage (self, board_id, data) :
if board_id < 0 or board_id > 255 :
raise RuntimeError ('Sign ID must be in range 0..255')
chksum = 0
for c in data :
chksum ^= ord(c)
return '<ID%02X>'%(board_id) + data + '%02X<E>'%(chksum)
def sendData (self, port, board_id, data) :
port.setTimeout(1)
encodedMessage = self.encodeMessage (board_id, data)
print "TX:[" + encodedMessage + "]"
port.write(encodedMessage)
replies = [ 'ACK', 'NACK' ]
buf = ''
while True :
c = port.read(1)
if c == '' :
return 'TIMEOUT'
buf = buf + c
valid_start = False
for r in replies :
if len(buf) > len(r) :
continue
if buf == r[0:len(buf)] :
valid_start = True
if len(buf) == len(r) :
return buf
if not valid_start :
return buf # invalid
def sendPackets (self, packets_list):
tty = serial.Serial(self.signport, self.baud)
for data in packets_list:
ret = self.sendData(tty, self.signid, data);
if ret != 'ACK' :
# We can't do anything at this point anyway, so pass
pass
| {
"content_hash": "23a84526e67285b8a6e35acef4b3a4ad",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 112,
"avg_line_length": 32.75373134328358,
"alnum_prop": 0.5103668261562998,
"repo_name": "barfle/signboard",
"id": "15b31511faf6d2a82db91bec64a2d79eae89bdd3",
"size": "4461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/AM03127.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5954"
}
],
"symlink_target": ""
} |
"""Tests for Volume Code."""
import datetime
import ddt
import os
import shutil
import sys
import tempfile
import time
import uuid
import enum
import eventlet
import mock
import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import imageutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from stevedore import extension
from taskflow.engines.action_engine import engine
from cinder.api import common
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder import keymgr as key_manager
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests import fake_driver
from cinder.tests.unit.api import fakes
from cinder.tests.unit.brick import fake_lvm
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_service
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit.keymgr import fake as fake_keymgr
from cinder.tests.unit import utils as tests_utils
from cinder import utils
import cinder.volume
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume import manager as vol_manager
from cinder.volume import rpcapi as volume_rpcapi
import cinder.volume.targets.tgt
from cinder.volume import utils as volutils
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
fake_opt = [
cfg.StrOpt('fake_opt1', default='fake', help='fake opts')
]
OVER_SNAPSHOT_QUOTA_EXCEPTION = exception.OverQuota(
overs=['snapshots'],
usages = {'snapshots': {'reserved': 1, 'in_use': 9}},
quotas = {'gigabytes': 10, 'snapshots': 10})
def create_snapshot(volume_id, size=1, metadata=None, ctxt=None,
**kwargs):
"""Create a snapshot object."""
metadata = metadata or {}
snap = objects.Snapshot(ctxt or context.get_admin_context())
snap.volume_size = size
snap.user_id = fake.USER_ID
snap.project_id = fake.PROJECT_ID
snap.volume_id = volume_id
snap.status = "creating"
if metadata is not None:
snap.metadata = metadata
snap.update(kwargs)
snap.create()
return snap
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
class BaseVolumeTestCase(test.TestCase):
"""Test Case for volumes."""
FAKE_UUID = fake.IMAGE_ID
def setUp(self):
super(BaseVolumeTestCase, self).setUp()
self.extension_manager = extension.ExtensionManager(
"BaseVolumeTestCase")
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
self.addCleanup(self._cleanup)
self.volume = importutils.import_object(CONF.volume_manager)
self.volume.message_api = mock.Mock()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
# NOTE(mriedem): The id is hard-coded here for tracking race fail
# assertions with the notification code, it's part of an
# elastic-recheck query so don't remove it or change it.
self.project_id = '7f265bd4-3a85-465e-a899-5dc4854a86d3'
self.context.project_id = self.project_id
self.volume_params = {
'status': 'creating',
'host': CONF.host,
'size': 1}
self.mock_object(brick_lvm.LVM,
'get_all_volume_groups',
self.fake_get_all_volume_groups)
fake_image.mock_image_service(self)
self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.mock_object(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
# keep ordered record of what we execute
self.called = []
self.volume_api = cinder.volume.api.API()
def _cleanup(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.00',
'available': '2.50',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
@mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask._clone_image_volume')
def _create_volume_from_image(self, mock_clone_image_volume,
mock_fetch_img,
fakeout_copy_image_to_volume=False,
fakeout_clone_image=False,
clone_image_volume=False):
"""Test function of create_volume_from_image.
Test cases call this function to create a volume from image, caller
can choose whether to fake out copy_image_to_volume and clone_image,
after calling this, test cases should check status of the volume.
"""
def fake_local_path(volume):
return dst_path
def fake_copy_image_to_volume(context, volume,
image_service, image_id):
pass
def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize,
size=None, throttle=None):
pass
def fake_clone_image(ctx, volume_ref,
image_location, image_meta,
image_service):
return {'provider_location': None}, True
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.mock_object(self.volume.driver, 'local_path', fake_local_path)
if fakeout_clone_image:
self.mock_object(self.volume.driver, 'clone_image',
fake_clone_image)
self.mock_object(image_utils, 'fetch_to_raw', fake_fetch_to_raw)
if fakeout_copy_image_to_volume:
self.mock_object(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
mock_clone_image_volume.return_value = ({}, clone_image_volume)
mock_fetch_img.return_value = mock.MagicMock(
spec=tests_utils.get_file_spec())
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume = tests_utils.create_volume(self.context, **self.volume_params)
# creating volume testdata
try:
request_spec = {
'volume_properties': self.volume_params,
'image_id': image_id,
}
self.volume.create_volume(self.context,
volume.id,
request_spec,
volume=volume)
finally:
# cleanup
os.unlink(dst_path)
volume = objects.Volume.get_by_id(self.context, volume.id)
return volume
class AvailabilityZoneTestCase(BaseVolumeTestCase):
def setUp(self):
super(AvailabilityZoneTestCase, self).setUp()
self.get_all = self.patch(
'cinder.db.service_get_all', autospec=True,
return_value = [{'availability_zone': 'a', 'disabled': False}])
def test_list_availability_zones_cached(self):
azs = self.volume_api.list_availability_zones(enable_cache=True)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNotNone(self.volume_api.availability_zones_last_fetched)
self.assertTrue(self.get_all.called)
self.volume_api.list_availability_zones(enable_cache=True)
self.assertEqual(1, self.get_all.call_count)
def test_list_availability_zones_no_cached(self):
azs = self.volume_api.list_availability_zones(enable_cache=False)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNone(self.volume_api.availability_zones_last_fetched)
self.get_all.return_value[0]['disabled'] = True
azs = self.volume_api.list_availability_zones(enable_cache=False)
self.assertEqual([{"name": 'a', 'available': False}], list(azs))
self.assertIsNone(self.volume_api.availability_zones_last_fetched)
def test_list_availability_zones_refetched(self):
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
azs = self.volume_api.list_availability_zones(enable_cache=True)
self.assertEqual([{"name": 'a', 'available': True}], list(azs))
self.assertIsNotNone(self.volume_api.availability_zones_last_fetched)
last_fetched = self.volume_api.availability_zones_last_fetched
self.assertTrue(self.get_all.called)
self.volume_api.list_availability_zones(enable_cache=True)
self.assertEqual(1, self.get_all.call_count)
# The default cache time is 3600, push past that...
timeutils.advance_time_seconds(3800)
self.get_all.return_value = [
{
'availability_zone': 'a',
'disabled': False,
},
{
'availability_zone': 'b',
'disabled': False,
},
]
azs = self.volume_api.list_availability_zones(enable_cache=True)
azs = sorted([n['name'] for n in azs])
self.assertEqual(['a', 'b'], azs)
self.assertEqual(2, self.get_all.call_count)
self.assertGreater(self.volume_api.availability_zones_last_fetched,
last_fetched)
def test_list_availability_zones_enabled_service(self):
def sort_func(obj):
return obj['name']
self.get_all.return_value = [
{'availability_zone': 'ping', 'disabled': 0},
{'availability_zone': 'ping', 'disabled': 1},
{'availability_zone': 'pong', 'disabled': 0},
{'availability_zone': 'pung', 'disabled': 1},
]
volume_api = cinder.volume.api.API()
azs = volume_api.list_availability_zones()
azs = sorted(azs, key=sort_func)
expected = sorted([
{'name': 'pung', 'available': False},
{'name': 'pong', 'available': True},
{'name': 'ping', 'available': True},
], key=sort_func)
self.assertEqual(expected, azs)
@ddt.ddt
class VolumeTestCase(BaseVolumeTestCase):
def setUp(self):
super(VolumeTestCase, self).setUp()
self._clear_patch = mock.patch('cinder.volume.utils.clear_volume',
autospec=True)
self._clear_patch.start()
self.expected_status = 'available'
def tearDown(self):
super(VolumeTestCase, self).tearDown()
self._clear_patch.stop()
def test_init_host_clears_downloads(self):
"""Test that init_host will unwedge a volume stuck in downloading."""
volume = tests_utils.create_volume(self.context, status='downloading',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume.refresh()
self.assertEqual("error", volume.status)
self.volume.delete_volume(self.context, volume_id, volume=volume)
def test_init_host_clears_uploads_available_volume(self):
"""init_host will clean an available volume stuck in uploading."""
volume = tests_utils.create_volume(self.context, status='uploading',
size=0, host=CONF.host)
self.volume.init_host()
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual("available", volume.status)
def test_init_host_clears_uploads_in_use_volume(self):
"""init_host will clean an in-use volume stuck in uploading."""
volume = tests_utils.create_volume(self.context, status='uploading',
size=0, host=CONF.host)
fake_uuid = fakes.get_fake_uuid()
tests_utils.attach_volume(self.context, volume.id, fake_uuid,
'fake_host', '/dev/vda')
self.volume.init_host()
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual("in-use", volume.status)
def test_init_host_resumes_deletes(self):
"""init_host will resume deleting volume in deleting status."""
volume = tests_utils.create_volume(self.context, status='deleting',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
self.assertRaises(exception.VolumeNotFound, db.volume_get,
context.get_admin_context(), volume_id)
def test_init_host_count_allocated_capacity(self):
vol0 = tests_utils.create_volume(
self.context, size=100, host=CONF.host)
vol1 = tests_utils.create_volume(
self.context, size=128,
host=volutils.append_host(CONF.host, 'pool0'))
vol2 = tests_utils.create_volume(
self.context, size=256,
host=volutils.append_host(CONF.host, 'pool0'))
vol3 = tests_utils.create_volume(
self.context, size=512,
host=volutils.append_host(CONF.host, 'pool1'))
vol4 = tests_utils.create_volume(
self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2'))
self.volume.init_host()
stats = self.volume.stats
self.assertEqual(2020, stats['allocated_capacity_gb'])
self.assertEqual(
384, stats['pools']['pool0']['allocated_capacity_gb'])
self.assertEqual(
512, stats['pools']['pool1']['allocated_capacity_gb'])
self.assertEqual(
1024, stats['pools']['pool2']['allocated_capacity_gb'])
# NOTE(jdg): On the create we have host='xyz', BUT
# here we do a db.volume_get, and now the host has
# been updated to xyz#pool-name. Note this is
# done via the managers init, which calls the drivers
# get_pool method, which in the legacy case is going
# to be volume_backend_name or None
vol0.refresh()
expected_host = volutils.append_host(CONF.host, 'fake')
self.assertEqual(expected_host, vol0.host)
self.volume.delete_volume(self.context, vol0.id, volume=vol0)
self.volume.delete_volume(self.context, vol1.id, volume=vol1)
self.volume.delete_volume(self.context, vol2.id, volume=vol2)
self.volume.delete_volume(self.context, vol3.id, volume=vol3)
self.volume.delete_volume(self.context, vol4.id, volume=vol4)
@mock.patch.object(driver.BaseVD, "update_provider_info")
def test_init_host_sync_provider_info(self, mock_update):
vol0 = tests_utils.create_volume(
self.context, size=1, host=CONF.host)
vol1 = tests_utils.create_volume(
self.context, size=1, host=CONF.host)
snap0 = tests_utils.create_snapshot(self.context, vol0.id)
snap1 = tests_utils.create_snapshot(self.context, vol1.id)
# Return values for update_provider_info
volumes = [{'id': vol0.id, 'provider_id': '1 2 xxxx'},
{'id': vol1.id, 'provider_id': '3 4 yyyy'}]
snapshots = [{'id': snap0.id, 'provider_id': '5 6 xxxx'},
{'id': snap1.id, 'provider_id': '7 8 yyyy'}]
mock_update.return_value = (volumes, snapshots)
# initialize
self.volume.init_host()
# Grab volume and snapshot objects
vol0_obj = objects.Volume.get_by_id(context.get_admin_context(),
vol0.id)
vol1_obj = objects.Volume.get_by_id(context.get_admin_context(),
vol1.id)
snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id)
snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id)
# Check updated provider ids
self.assertEqual('1 2 xxxx', vol0_obj.provider_id)
self.assertEqual('3 4 yyyy', vol1_obj.provider_id)
self.assertEqual('5 6 xxxx', snap0_obj.provider_id)
self.assertEqual('7 8 yyyy', snap1_obj.provider_id)
# Clean up
self.volume.delete_snapshot(self.context, snap0_obj)
self.volume.delete_snapshot(self.context, snap1_obj)
self.volume.delete_volume(self.context, vol0.id)
self.volume.delete_volume(self.context, vol1.id)
@mock.patch.object(driver.BaseVD, "update_provider_info")
def test_init_host_sync_provider_info_no_update(self, mock_update):
vol0 = tests_utils.create_volume(
self.context, size=1, host=CONF.host)
vol1 = tests_utils.create_volume(
self.context, size=1, host=CONF.host)
snap0 = tests_utils.create_snapshot(self.context, vol0.id)
snap1 = tests_utils.create_snapshot(self.context, vol1.id)
mock_update.return_value = ([], [])
# initialize
self.volume.init_host()
# Grab volume and snapshot objects
vol0_obj = objects.Volume.get_by_id(context.get_admin_context(),
vol0.id)
vol1_obj = objects.Volume.get_by_id(context.get_admin_context(),
vol1.id)
snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id)
snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id)
# Check provider ids are not changed
self.assertIsNone(vol0_obj.provider_id)
self.assertIsNone(vol1_obj.provider_id)
self.assertIsNone(snap0_obj.provider_id)
self.assertIsNone(snap1_obj.provider_id)
# Clean up
self.volume.delete_snapshot(self.context, snap0_obj)
self.volume.delete_snapshot(self.context, snap1_obj)
self.volume.delete_volume(self.context, vol0.id)
self.volume.delete_volume(self.context, vol1.id)
@mock.patch('cinder.volume.manager.VolumeManager.'
'_include_resources_in_cluster')
def test_init_host_cluster_not_changed(self, include_in_cluster_mock):
self.volume.init_host(False)
include_in_cluster_mock.assert_not_called()
@mock.patch('cinder.objects.volume.VolumeList.include_in_cluster')
@mock.patch('cinder.objects.consistencygroup.ConsistencyGroupList.'
'include_in_cluster')
def test_init_host_added_to_cluster(self, vol_include_mock,
cg_include_mock):
self.mock_object(self.volume, 'cluster', mock.sentinel.cluster)
self.volume.init_host(True)
vol_include_mock.assert_called_once_with(mock.ANY,
mock.sentinel.cluster,
host=self.volume.host)
cg_include_mock.assert_called_once_with(mock.ANY,
mock.sentinel.cluster,
host=self.volume.host)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'})
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.4'})
def test_reset(self, get_min_obj, get_min_rpc):
vol_mgr = vol_manager.VolumeManager()
scheduler_rpcapi = vol_mgr.scheduler_rpcapi
self.assertEqual('1.3', scheduler_rpcapi.client.version_cap)
self.assertEqual('1.4',
scheduler_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current()
vol_mgr.reset()
scheduler_rpcapi = vol_mgr.scheduler_rpcapi
self.assertEqual(get_min_rpc.return_value,
scheduler_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
scheduler_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest)
@mock.patch.object(vol_manager.VolumeManager,
'update_service_capabilities')
def test_report_filter_goodness_function(self, mock_update):
manager = vol_manager.VolumeManager()
manager.driver.set_initialized()
myfilterfunction = "myFilterFunction"
mygoodnessfunction = "myGoodnessFunction"
expected = {'name': 'cinder-volumes',
'filter_function': myfilterfunction,
'goodness_function': mygoodnessfunction,
}
with mock.patch.object(manager.driver,
'get_volume_stats') as m_get_stats:
with mock.patch.object(manager.driver,
'get_goodness_function') as m_get_goodness:
with mock.patch.object(manager.driver,
'get_filter_function') as m_get_filter:
m_get_stats.return_value = {'name': 'cinder-volumes'}
m_get_filter.return_value = myfilterfunction
m_get_goodness.return_value = mygoodnessfunction
manager._report_driver_status(1)
self.assertTrue(m_get_stats.called)
mock_update.assert_called_once_with(expected)
def test_is_working(self):
# By default we have driver mocked to be initialized...
self.assertTrue(self.volume.is_working())
# ...lets switch it and check again!
self.volume.driver._initialized = False
self.assertFalse(self.volume.is_working())
def test_create_volume_fails_with_creating_and_downloading_status(self):
"""Test init_host in case of volume.
While the status of volume is 'creating' or 'downloading',
volume process down.
After process restarting this 'creating' status is changed to 'error'.
"""
for status in ['creating', 'downloading']:
volume = tests_utils.create_volume(self.context, status=status,
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume.refresh()
self.assertEqual('error', volume.status)
self.volume.delete_volume(self.context, volume_id, volume=volume)
def test_create_snapshot_fails_with_creating_status(self):
"""Test init_host in case of snapshot.
While the status of snapshot is 'creating', volume process
down. After process restarting this 'creating' status is
changed to 'error'.
"""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
snapshot = tests_utils.create_snapshot(
self.context,
volume['id'],
status=fields.SnapshotStatus.CREATING)
snap_id = snapshot['id']
self.volume.init_host()
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume.id, volume=volume)
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_create_driver_not_initialized(self, reserve, commit, rollback,
mock_notify):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
mock_notify.assert_not_called()
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id, volume=volume)
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("error", volume.status)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_driver_not_initialized_rescheduling(self):
self.volume.driver._initialized = False
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}},
volume=volume)
# NOTE(dulek): Volume should be rescheduled as we passed request_spec
# and filter_properties, assert that it wasn't counted in
# allocated_capacity tracking.
self.assertEqual({}, self.volume.stats['pools'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_non_cinder_exception_rescheduling(self):
params = self.volume_params
del params['host']
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**params)
volume_id = volume['id']
with mock.patch.object(self.volume.driver, 'create_volume',
side_effect=processutils.ProcessExecutionError):
self.assertRaises(processutils.ProcessExecutionError,
self.volume.create_volume,
self.context, volume_id,
{'volume_properties': params},
{'retry': {'num_attempts': 1, 'host': []}},
volume=volume)
# NOTE(dulek): Volume should be rescheduled as we passed request_spec
# and filter_properties, assert that it wasn't counted in
# allocated_capacity tracking.
self.assertEqual({}, self.volume.stats['pools'])
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch.object(QUOTAS, 'rollback')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'reserve')
def test_delete_driver_not_initialized(self, reserve, commit, rollback,
mock_notify):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
self.assertIsNone(volume['encryption_key_id'])
mock_notify.assert_not_called()
self.assertRaises(exception.DriverNotInitialized,
self.volume.delete_volume,
self.context, volume.id, volume=volume)
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error_deleting", volume.status)
volume.destroy()
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock())
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock())
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION'])
def test_create_delete_volume(self, _mock_reserve, mock_notify):
"""Test volume can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
mock_notify.assert_not_called()
self.assertIsNone(volume['encryption_key_id'])
self.volume.create_volume(self.context, volume_id, volume=volume)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end']))
self.volume.delete_volume(self.context, volume_id, volume=volume)
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
volume_id)
self.assertEqual(vol['status'], 'deleted')
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'volume.delete.start'],
['INFO', 'volume.delete.end']))
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_delete_volume_with_metadata(self):
"""Test volume can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, metadata=test_meta,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
self.assertEqual(test_meta, volume.metadata)
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
@mock.patch('cinder.db.volume_metadata_update')
def test_create_volume_metadata(self, metadata_update):
metadata = {'fake_key': 'fake_value'}
metadata_update.return_value = metadata
volume = tests_utils.create_volume(self.context, **self.volume_params)
res = self.volume_api.create_volume_metadata(self.context,
volume, metadata)
metadata_update.assert_called_once_with(self.context, volume.id,
metadata, False,
common.METADATA_TYPES.user)
self.assertEqual(metadata, res)
@ddt.data('maintenance', 'uploading')
def test_create_volume_metadata_maintenance(self, status):
metadata = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume['status'] = status
self.assertRaises(exception.InvalidVolume,
self.volume_api.create_volume_metadata,
self.context,
volume,
metadata)
def test_update_volume_metadata_with_metatype(self):
"""Test update volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1'}
test_meta2 = {'fake_key1': 'fake_value2'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
# update user metadata associated with the volume.
result_meta = self.volume_api.update_volume_metadata(
self.context,
volume,
test_meta2,
False,
common.METADATA_TYPES.user)
self.assertEqual(test_meta2, result_meta)
# create image metadata associated with the volume.
result_meta = self.volume_api.update_volume_metadata(
self.context,
volume,
test_meta1,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta1, result_meta)
# update image metadata associated with the volume.
result_meta = self.volume_api.update_volume_metadata(
self.context,
volume,
test_meta2,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta2, result_meta)
# update volume metadata with invalid metadta type.
self.assertRaises(exception.InvalidMetadataType,
self.volume_api.update_volume_metadata,
self.context,
volume,
test_meta1,
False,
FAKE_METADATA_TYPE.fake_type)
def test_update_volume_metadata_maintenance(self):
"""Test update volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
self.assertRaises(exception.InvalidVolume,
self.volume_api.update_volume_metadata,
self.context,
volume,
test_meta1,
False,
FAKE_METADATA_TYPE.fake_type)
@mock.patch('cinder.db.volume_update')
def test_update_with_ovo(self, volume_update):
"""Test update volume using oslo_versionedobject."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
updates = {'display_name': 'foobbar'}
self.volume_api.update(self.context, volume, updates)
volume_update.assert_called_once_with(self.context, volume.id,
updates)
self.assertEqual('foobbar', volume.display_name)
def test_delete_volume_metadata_with_metatype(self):
"""Test delete volume metadata with different metadata type."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
test_meta2 = {'fake_key1': 'fake_value1'}
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
# delete user metadata associated with the volume.
self.volume_api.delete_volume_metadata(
self.context,
volume,
'fake_key2',
common.METADATA_TYPES.user)
self.assertEqual(test_meta2,
db.volume_metadata_get(self.context, volume_id))
# create image metadata associated with the volume.
result_meta = self.volume_api.update_volume_metadata(
self.context,
volume,
test_meta1,
False,
common.METADATA_TYPES.image)
self.assertEqual(test_meta1, result_meta)
# delete image metadata associated with the volume.
self.volume_api.delete_volume_metadata(
self.context,
volume,
'fake_key2',
common.METADATA_TYPES.image)
# parse the result to build the dict.
rows = db.volume_glance_metadata_get(self.context, volume_id)
result = {}
for row in rows:
result[row['key']] = row['value']
self.assertEqual(test_meta2, result)
# delete volume metadata with invalid metadta type.
self.assertRaises(exception.InvalidMetadataType,
self.volume_api.delete_volume_metadata,
self.context,
volume,
'fake_key1',
FAKE_METADATA_TYPE.fake_type)
def test_delete_volume_metadata_maintenance(self):
"""Test delete volume metadata in maintenance."""
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete_volume_metadata,
self.context,
volume,
'fake_key1',
FAKE_METADATA_TYPE.fake_type)
def test_volume_attach_in_maintenance(self):
"""Test attach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
self.context,
volume, None, None, None, None)
def test_volume_detach_in_maintenance(self):
"""Test detach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.detach,
self.context,
volume, None)
def test_initialize_connection_maintenance(self):
"""Test initialize connection in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.initialize_connection,
self.context,
volume,
None)
def test_accept_transfer_maintenance(self):
"""Test accept transfer in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.accept_transfer,
self.context,
volume,
None, None)
def test_copy_volume_to_image_maintenance(self):
"""Test copy volume to image in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.copy_volume_to_image,
self.context,
volume,
test_meta1,
force=True)
@mock.patch.object(cinder.volume.api.API, 'list_availability_zones')
def test_create_volume_uses_default_availability_zone(self, mock_list_az):
"""Test setting availability_zone correctly during volume create."""
mock_list_az.return_value = ({'name': 'az1', 'available': True},
{'name': 'az2', 'available': True},
{'name': 'default-az', 'available': True})
volume_api = cinder.volume.api.API()
# Test backwards compatibility, default_availability_zone not set
self.override_config('storage_availability_zone', 'az2')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual('az2', volume['availability_zone'])
self.override_config('default_availability_zone', 'default-az')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual('default-az', volume['availability_zone'])
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock())
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock())
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_create_volume_with_volume_type(self, _mock_reserve):
"""Test volume creation with default volume type."""
volume_api = cinder.volume.api.API()
# Create volume with default volume type while default
# volume type doesn't exist, volume_type_id should be NULL
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertIsNone(volume['volume_type_id'])
self.assertIsNone(volume['encryption_key_id'])
# Create default volume type
vol_type = conf_fixture.def_vol_type
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
# Create volume with default volume type
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertIsNone(volume['encryption_key_id'])
# Create volume with specific volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_with_encrypted_volume_type_aes(self):
ctxt = context.get_admin_context()
cipher = 'aes-xts-plain64'
key_size = 256
control_location = 'front-end'
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': control_location,
'provider': ENCRYPTION_PROVIDER,
'cipher': cipher,
'key_size': key_size})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
key_manager = volume_api.key_manager
key = key_manager.get(self.context, volume['encryption_key_id'])
self.assertEqual(key_size, len(key.get_encoded()) * 8)
self.assertEqual('aes', key.algorithm)
metadata = db.volume_encryption_metadata_get(self.context, volume.id)
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertEqual(cipher, metadata.get('cipher'))
self.assertEqual(key_size, metadata.get('key_size'))
self.assertIsNotNone(volume['encryption_key_id'])
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_with_encrypted_volume_type_blowfish(self):
ctxt = context.get_admin_context()
cipher = 'blowfish-cbc'
key_size = 32
control_location = 'front-end'
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': control_location,
'provider': ENCRYPTION_PROVIDER,
'cipher': cipher,
'key_size': key_size})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
key_manager = volume_api.key_manager
key = key_manager.get(self.context, volume['encryption_key_id'])
self.assertEqual('blowfish', key.algorithm)
metadata = db.volume_encryption_metadata_get(self.context, volume.id)
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertEqual(cipher, metadata.get('cipher'))
self.assertEqual(key_size, metadata.get('key_size'))
self.assertIsNotNone(volume['encryption_key_id'])
def test_create_volume_with_provider_id(self):
volume_params_with_provider_id = dict(provider_id=fake.PROVIDER_ID,
**self.volume_params)
volume = tests_utils.create_volume(self.context,
**volume_params_with_provider_id)
self.volume.create_volume(self.context, volume['id'])
self.assertEqual(fake.PROVIDER_ID, volume['provider_id'])
@mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api)
def test_create_delete_volume_with_encrypted_volume_type(self):
cipher = 'aes-xts-plain64'
key_size = 256
db.volume_type_create(self.context,
{'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'})
db.volume_type_encryption_create(
self.context, fake.VOLUME_TYPE_ID,
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER,
'cipher': cipher, 'key_size': key_size})
db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS')
volume = self.volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(db_vol_type.get('id'), volume['volume_type_id'])
self.assertIsNotNone(volume['encryption_key_id'])
volume['host'] = 'fake_host'
volume['status'] = 'available'
db.volume_update(self.context, volume['id'], {'status': 'available'})
self.volume_api.delete(self.context, volume)
volume = objects.Volume.get_by_id(self.context, volume.id)
while volume.status == 'available':
# Must wait for volume_api delete request to process enough to
# change the volume status.
time.sleep(0.5)
volume.refresh()
self.assertEqual('deleting', volume['status'])
db.volume_destroy(self.context, volume['id'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume['id'])
def test_extra_capabilities(self):
# Test valid extra_capabilities.
fake_capabilities = {'key1': 1, 'key2': 2}
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = vol_manager.VolumeManager()
manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
self.assertTrue(mock_loads.called)
volume_stats = manager.last_capabilities
self.assertEqual(fake_capabilities['key1'],
volume_stats['key1'])
self.assertEqual(fake_capabilities['key2'],
volume_stats['key2'])
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
self.assertRaises(exception.CinderException,
vol_manager.VolumeManager)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
with mock.patch.object(self.volume.driver, 'delete_volume',
side_effect=exception.VolumeIsBusy(
volume_name='fake')
) as mock_del_vol:
self.volume.delete_volume(self.context, volume_id, volume=volume)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
mock_del_vol.assert_called_once_with(volume)
def test_get_volume_different_tenant(self):
"""Test can't get volume of another tenant when viewable_admin_meta."""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
another_context = context.RequestContext('another_user_id',
'another_project_id',
is_admin=False)
self.assertNotEqual(another_context.project_id,
self.context.project_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeNotFound, volume_api.get,
another_context, volume_id, viewable_admin_meta=True)
self.assertEqual(volume_id,
volume_api.get(self.context, volume_id)['id'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
def test_get_all_limit_bad_value(self):
"""Test value of 'limit' is numeric and >= 0"""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="A")
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="-1")
def test_get_all_tenants_volume_list(self):
"""Validate when the volume list for all tenants is returned"""
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'volume_get_all_by_project') as by_project:
with mock.patch.object(volume_api.db,
'volume_get_all') as get_all:
db_volume = {'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID}
volume = fake_volume.fake_db_volume(**db_volume)
by_project.return_value = [volume]
get_all.return_value = [volume]
volume_api.get_all(self.context, filters={'all_tenants': '0'})
self.assertTrue(by_project.called)
by_project.called = False
self.context.is_admin = False
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(by_project.called)
# check for volume list of all tenants
self.context.is_admin = True
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(get_all.called)
def test_delete_volume_in_error_extending(self):
"""Test volume can be deleted in error_extending stats."""
# create a volume
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
# delete 'error_extending' volume
db.volume_update(self.context, volume['id'],
{'status': 'error_extending'})
self.volume.delete_volume(self.context, volume.id, volume=volume)
self.assertRaises(exception.NotFound, db.volume_get,
self.context, volume['id'])
@mock.patch.object(db.sqlalchemy.api, 'volume_get',
side_effect=exception.VolumeNotFound(
volume_id='12345678-1234-5678-1234-567812345678'))
def test_delete_volume_not_found(self, mock_get_volume):
"""Test delete volume moves on if the volume does not exist."""
volume_id = '12345678-1234-5678-1234-567812345678'
volume = objects.Volume(self.context, id=volume_id)
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertTrue(mock_get_volume.called)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create_from_snap):
"""Test volume can be created from a snapshot."""
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
snapshot_id = create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_obj)
volume_dst = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
volume=volume_dst)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_snapshot_with_types(self, _get_flow):
"""Test volume create from snapshot with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
snapshot = {'id': fake.SNAPSHOT_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
# Make sure the case of specifying a type that
# doesn't match the snapshots type fails
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
# Make sure that trying to specify a type
# when the snapshots type is None fails
snapshot_obj.volume_type_id = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
snapshot_obj.volume_type_id = foo_type['id']
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=foo_type,
snapshot=snapshot_obj)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_types(self, _get_flow):
"""Test volume create from source with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
# Make sure that trying to specify a type
# when the source type is None fails
source_vol['volume_type_id'] = None
source_vol['volume_type'] = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
source_vol['volume_type_id'] = biz_type['id']
source_vol['volume_type'] = biz_type
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=biz_type,
source_volume=source_vol)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_same_backend(self, _get_flow):
"""Test volume create from source with type mismatch same backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_from_source_and_snap_only_one_backend(self, _get_flow):
"""Test create from source and snap with type mismatch one backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'some_key': 3},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'some_other_key': 4},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
snapshot = {'id': fake.SNAPSHOT_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
with mock.patch('cinder.db.service_get_all') as mock_get_service, \
mock.patch.object(volume_api,
'list_availability_zones') as mock_get_azs:
mock_get_service.return_value = [{'host': 'foo'}]
mock_get_azs.return_value = {}
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
def test_create_snapshot_driver_not_initialized(self):
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
snapshot_id = create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_snapshot,
self.context, volume_src['id'], snapshot_obj)
# NOTE(flaper87): The volume status should be error.
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
# lets cleanup the mess
self.volume.driver._initialized = True
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
def _mock_synchronized(self, name, *s_args, **s_kwargs):
def inner_sync1(f):
def inner_sync2(*args, **kwargs):
self.called.append('lock-%s' % (name))
ret = f(*args, **kwargs)
self.called.append('unlock-%s' % (name))
return ret
return inner_sync2
return inner_sync1
def _fake_execute(self, *cmd, **kwargs):
pass
@mock.patch.object(coordination.Coordinator, 'get_lock')
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'create_volume_from_snapshot')
def test_create_volume_from_snapshot_check_locks(
self, mock_lvm_create, mock_lock):
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
snap_id = create_snapshot(src_vol_id,
size=src_vol['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.mock_object(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'snapshot_id': snap_id},
volume=dst_vol)
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(snap_id,
db.volume_get(admin_ctxt, dst_vol_id).snapshot_id)
# locked
self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id)
# locked
self.volume.delete_snapshot(self.context, snapshot_obj)
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
self.assertTrue(mock_lvm_create.called)
@mock.patch.object(coordination.Coordinator, 'get_lock')
def test_create_volume_from_volume_check_locks(self, mock_lock):
# mock the synchroniser so we can record events
self.mock_object(utils, 'execute', self._fake_execute)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
self.assertEqual(0, mock_lock.call_count)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.mock_object(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'source_volid': src_vol_id},
volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(src_vol_id,
db.volume_get(admin_ctxt, dst_vol_id).source_volid)
# locked
self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
def test_create_volume_from_volume_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
orig_elevated = self.context.elevated
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.mock_object(self.context, 'elevated', orig_elevated)
# we expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume,
self.context,
volume_id=dst_vol.id,
request_spec={'source_volid': src_vol_id},
volume=dst_vol)
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.mock_object(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
# we expect the volume create to fail with the following err since the
# source volume was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.VolumeNotFound, gthreads[0].wait)
def _raise_metadata_copy_failure(self, method, dst_vol_id, **kwargs):
# MetadataCopyFailure exception will be raised if DB service is Down
# while copying the volume glance metadata
with mock.patch.object(db, method) as mock_db:
mock_db.side_effect = exception.MetadataCopyFailure(
reason="Because of DB service down.")
self.assertRaises(exception.MetadataCopyFailure,
self.volume.create_volume,
self.context,
dst_vol_id,
**kwargs)
# ensure that status of volume is 'error'
vol = db.volume_get(self.context, dst_vol_id)
self.assertEqual('error', vol['status'])
# cleanup resource
db.volume_destroy(self.context, dst_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_volume_with_glance_volume_metadata_none(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_from_volume_to_volume,
self.context, src_vol_id, dst_vol['id'])
# ensure that status of volume is 'available'
vol = db.volume_get(self.context, dst_vol['id'])
self.assertEqual('available', vol['status'])
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
db.volume_destroy(self.context, dst_vol['id'])
@mock.patch('cinder.utils.execute')
def test_create_volume_from_volume_raise_metadata_copy_failure(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_from_volume_to_volume',
dst_vol.id, volume=dst_vol)
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_snapshot_raise_metadata_copy_failure(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from snapshot
snapshot_id = create_snapshot(src_vol['id'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, src_vol['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_to_volume',
dst_vol.id, volume=dst_vol)
# cleanup resource
snapshot_obj.destroy()
db.volume_destroy(self.context, src_vol_id)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
@mock.patch('cinder.utils.execute')
def test_create_volume_from_srcreplica_raise_metadata_copy_failure(
self, mock_execute, _create_replica_test):
mock_execute.return_value = None
_create_replica_test.return_value = None
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_from_volume_to_volume',
dst_vol.id, volume=dst_vol)
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_snapshot_with_glance_volume_metadata_none(
self, mock_execute):
# create source volume
mock_execute.return_value = None
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
volume = db.volume_get(self.context, src_vol_id)
# create snapshot of volume
snapshot_id = create_snapshot(volume['id'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status)
# create volume from snapshot
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_to_volume,
self.context, dst_vol['id'], snapshot_id)
# ensure that status of volume is 'available'
vol = db.volume_get(self.context, dst_vol['id'])
self.assertEqual('available', vol['status'])
# cleanup resource
snapshot_obj.destroy()
db.volume_destroy(self.context, src_vol_id)
db.volume_destroy(self.context, dst_vol['id'])
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_srcreplica_with_glance_volume_metadata_none(
self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
db.volume_update(self.context, volume_src['id'], {'bootable': True})
volume = db.volume_get(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
{'source_replicaid': volume.id},
volume=volume_dst)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_from_volume_to_volume,
self.context, volume_src['id'], volume_dst['id'])
self.assertEqual('available',
db.volume_get(self.context,
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
# cleanup resource
db.volume_destroy(self.context, volume_dst['id'])
db.volume_destroy(self.context, volume_src['id'])
def test_create_volume_from_snapshot_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
# create snapshot
snap_id = create_snapshot(src_vol_id,
size=src_vol['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj)
# create vol from snapshot...
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
orig_elevated = self.context.elevated
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.mock_object(self.context, 'elevated', orig_elevated)
# We expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume, self.context,
volume_id=dst_vol_id,
request_spec={'snapshot_id': snap_id},
volume=dst_vol)
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.mock_object(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_snapshot(self.context, snapshot_obj)
# we expect the volume create to fail with the following err since the
# snapshot was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
# make sure it is gone
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.context, src_vol_id)
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of an encrypted volume"""
ctxt = context.get_admin_context()
cipher = 'aes-xts-plain64'
key_size = 256
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER,
'cipher': cipher, 'key_size': key_size})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['host'] = 'fake_host'
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_src,
'name',
'description')
snapshot_ref['status'] = fields.SnapshotStatus.AVAILABLE
# status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
snapshot=snapshot_ref)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_ref['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_encrypted_volume(self):
"""Test volume can be created from an encrypted volume."""
self.mock_object(key_manager, 'API', fake_keymgr.fake_api)
cipher = 'aes-xts-plain64'
key_size = 256
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER,
'cipher': cipher, 'key_size': key_size})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
source_volume=volume_src)
self.assertEqual(volume_dst['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(volume_src['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).source_volid)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
km = volume_api.key_manager # must use *same* key manager
volume_src_key = km.get(self.context,
volume_src['encryption_key_id'])
volume_dst_key = km.get(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_delete_encrypted_volume(self):
self.volume_params['status'] = 'active'
volume = tests_utils.create_volume(self.context,
**self.volume_params)
vol_api = cinder.volume.api.API()
with mock.patch.object(
vol_api.key_manager,
'delete',
side_effect=Exception):
self.assertRaises(exception.InvalidVolume,
vol_api.delete,
self.context, volume)
def test_create_volume_from_snapshot_fail_bad_size(self):
"""Test volume can't be created from snapshot with bad volume size."""
volume_api = cinder.volume.api.API()
snapshot = {'id': fake.SNAPSHOT_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot_obj)
def test_create_volume_from_snapshot_fail_wrong_az(self):
"""Test volume can't be created from snapshot in a different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones(enable_cache=False):
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.mock_object(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
snapshot = create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot)
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
self.assertEqual('az2', volume_dst['availability_zone'])
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot,
availability_zone='nova')
def test_create_volume_with_invalid_exclusive_options(self):
"""Test volume create with multiple exclusive options fails."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
1,
'name',
'description',
snapshot=fake.SNAPSHOT_ID,
image_id=fake.IMAGE_ID,
source_volume=fake.VOLUME_ID)
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_fetchqos(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""Make sure initialize_connection returns correct information."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'key1': 'value1',
'key2': 'value2'}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake.VOLUME_ID, connector,
volume=fake_volume_obj)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake.VOLUME_ID, connector,
volume=fake_volume_obj)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
# initialize_connection() skips qos_specs that is designated to be
# consumed by back-end only
qos_values.update({'consumer': 'back-end'})
type_qos.return_value = dict(qos_specs=qos_values)
conn_info = self.volume.initialize_connection(
self.context, fake.VOLUME_ID, connector,
volume=fake_volume_obj)
self.assertIsNone(conn_info['data']['qos_specs'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
def test_initialize_connection_export_failure(self,
_mock_create_export):
"""Test exception path for create_export failure."""
volume = tests_utils.create_volume(
self.context, admin_metadata={'fake-key': 'fake-value'},
volume_type_id=fake.VOLUME_TYPE_ID, **self.volume_params)
_mock_create_export.side_effect = exception.CinderException
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.initialize_connection,
self.context, fake.VOLUME_ID, connector,
volume=volume)
def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("in-use", vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_detach_invalid_attachment_id(self):
"""Make sure if the attachment id isn't found we raise."""
attachment_id = "notfoundid"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
instance_uuid = '12345678-1234-5678-1234-567812345678'
attached_host = 'fake_host'
mountpoint = '/dev/fake'
tests_utils.attach_volume(self.context, volume['id'],
instance_uuid, attached_host,
mountpoint)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('in-use', volume['status'])
def test_detach_no_attachments(self):
self.volume_params['status'] = 'detaching'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'])
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid_2, None,
mountpoint, 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.assertRaises(exception.InvalidVolume,
self.volume.detach_volume,
self.context, volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance2_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertNotEqual(attachment, attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345699'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
def test_attach_detach_not_multiattach_volume_for_instances(self):
"""Make sure volume can't be attached to more than one instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
instance2_uuid,
None,
mountpoint2, 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host2', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host2', attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("in-use", vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id, volume=volume)
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
"""Make sure volume can't be attached to more than one host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host2',
mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available', })
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id,
attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context, defined_messages.ATTACH_READONLY_VOLUME,
self.context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume['id'])
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
def test_detach_volume_while_uploading_to_image_is_in_progress(self):
# If instance is booted from volume with 'Terminate on Delete' flag
# set, and when we delete instance then it tries to delete volume
# even it is in 'uploading' state.
# It is happening because detach call is setting volume status to
# 'available'.
mountpoint = "/dev/sdf"
# Attach volume to the instance
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
# Change volume status to 'uploading'
db.volume_update(self.context, volume_id, {'status': 'uploading'})
# Call detach api
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
# Check that volume status is 'uploading'
self.assertEqual("uploading", vol['status'])
self.assertEqual("detached", vol['attach_status'])
def test_reserve_volume_success(self):
volume = tests_utils.create_volume(self.context, status='available')
cinder.volume.api.API().reserve_volume(self.context, volume)
volume_db = db.volume_get(self.context, volume.id)
self.assertEqual('attaching', volume_db.status)
db.volume_destroy(self.context, volume.id)
def test_reserve_volume_in_attaching(self):
self._test_reserve_volume_bad_status('attaching')
def test_reserve_volume_in_maintenance(self):
self._test_reserve_volume_bad_status('maintenance')
def _test_reserve_volume_bad_status(self, status):
volume = tests_utils.create_volume(self.context, status=status)
self.assertRaises(exception.InvalidVolume,
cinder.volume.api.API().reserve_volume,
self.context,
volume)
db.volume_destroy(self.context, volume.id)
def test_unreserve_volume_success_in_use(self):
UUID = six.text_type(uuid.uuid4())
volume = tests_utils.create_volume(self.context, status='attaching')
tests_utils.attach_volume(self.context, volume.id, UUID,
'attached_host', 'mountpoint', mode='rw')
cinder.volume.api.API().unreserve_volume(self.context, volume)
db_volume = db.volume_get(self.context, volume.id)
self.assertEqual('in-use', db_volume.status)
def test_unreserve_volume_success_available(self):
volume = tests_utils.create_volume(self.context, status='attaching')
cinder.volume.api.API().unreserve_volume(self.context, volume)
db_volume = db.volume_get(self.context, volume.id)
self.assertEqual('available', db_volume.status)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
def test_create_delete_snapshot(self, mock_notify):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
mock_notify.assert_not_called()
self.volume.create_volume(self.context, volume['id'], volume=volume)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end']))
snapshot = create_snapshot(volume['id'], size=volume['size'])
snapshot_id = snapshot.id
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot_id, objects.Snapshot.get_by_id(self.context,
snapshot_id).id)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'snapshot.create.start'],
['INFO', 'snapshot.create.end']))
self.volume.delete_snapshot(self.context, snapshot)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'snapshot.create.start'],
['INFO', 'snapshot.create.end'],
['INFO', 'snapshot.delete.start'],
['INFO', 'snapshot.delete.end']))
snap = objects.Snapshot.get_by_id(context.get_admin_context(
read_deleted='yes'), snapshot_id)
self.assertEqual(fields.SnapshotStatus.DELETED, snap.status)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_create_delete_snapshot_with_metadata(self):
"""Test snapshot can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = create_snapshot(volume['id'], size=volume['size'],
metadata=test_meta)
snapshot_id = snapshot.id
result_dict = snapshot.metadata
self.assertEqual(test_meta, result_dict)
self.volume.delete_snapshot(self.context, snapshot)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
@mock.patch.object(db, 'snapshot_create',
side_effect=exception.InvalidSnapshot(
'Create snapshot in db failed!'))
def test_create_snapshot_failed_db_snapshot(self, mock_snapshot):
"""Test exception handling when create snapshot in db failed."""
test_volume = tests_utils.create_volume(
self.context,
status='available',
host=CONF.host)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidSnapshot,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
def test_create_snapshot_failed_maintenance(self):
"""Test exception handling when create snapshot in maintenance."""
test_volume = tests_utils.create_volume(
self.context,
status='maintenance',
host=CONF.host)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
@mock.patch.object(QUOTAS, 'commit',
side_effect=exception.QuotaError(
'Snapshot quota commit failed!'))
def test_create_snapshot_failed_quota_commit(self, mock_snapshot):
"""Test exception handling when snapshot quota commit failed."""
test_volume = tests_utils.create_volume(
self.context,
status='available',
host=CONF.host)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.QuotaError,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
@mock.patch.object(QUOTAS, 'reserve',
side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION)
def test_create_snapshot_failed_quota_reserve(self, mock_reserve):
"""Test exception handling when snapshot quota reserve failed."""
test_volume = tests_utils.create_volume(
self.context,
status='available',
host=CONF.host)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.SnapshotLimitExceeded,
volume_api.create_snapshot,
self.context,
test_volume,
'fake_name',
'fake_description')
@mock.patch.object(QUOTAS, 'reserve',
side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION)
def test_create_snapshots_in_db_failed_quota_reserve(self, mock_reserve):
"""Test exception handling when snapshot quota reserve failed."""
test_volume = tests_utils.create_volume(
self.context,
status='available',
host=CONF.host)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.SnapshotLimitExceeded,
volume_api.create_snapshots_in_db,
self.context,
[test_volume],
'fake_name',
'fake_description',
fake.CONSISTENCY_GROUP_ID)
def test_cannot_delete_volume_in_use(self):
"""Test volume can't be deleted in in-use status."""
self._test_cannot_delete_volume('in-use')
def test_cannot_delete_volume_maintenance(self):
"""Test volume can't be deleted in maintenance status."""
self._test_cannot_delete_volume('maintenance')
def _test_cannot_delete_volume(self, status):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, CONF.host,
status=status)
# 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
self.context,
volume)
# clean up
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_force_delete_volume(self):
"""Test volume can be forced to delete."""
# create a volume and assign to host
self.volume_params['status'] = 'error_deleting'
volume = tests_utils.create_volume(self.context, **self.volume_params)
# 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
self.context,
volume)
# delete with force
self.volume_api.delete(self.context, volume, force=True)
# status is deleting
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('deleting', volume.status)
# clean up
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state."""
volume = tests_utils.create_volume(self.context, CONF.host,
status='in-use',
attach_status = 'attached')
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
self.context,
volume,
force=True)
db.volume_destroy(self.context, volume.id)
def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
snapshot = create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot)
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, CONF.host)
snapshot = create_snapshot(volume.id, size=volume['size'],
ctxt=self.context,
status=fields.SnapshotStatus.ERROR)
self.volume_api.delete_snapshot(self.context, snapshot)
self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status)
self.volume.delete_volume(self.context, volume.id)
def test_cannot_delete_snapshot_with_bad_status(self):
volume = tests_utils.create_volume(self.context, CONF.host)
snapshot = create_snapshot(volume.id, size=volume['size'],
ctxt=self.context,
status=fields.SnapshotStatus.CREATING)
self.assertRaises(exception.InvalidSnapshot,
self.volume_api.delete_snapshot,
self.context,
snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.volume_api.delete_snapshot(self.context, snapshot)
self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status)
self.volume.delete_volume(self.context, volume.id)
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
instance_uuid = '12345678-1234-5678-1234-567812345678'
# create volume and attach to the instance
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], instance_uuid,
None, '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
snapshot_ref.destroy()
db.volume_destroy(self.context, volume['id'])
# create volume and attach to the host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
values = {'volume_id': volume['id'],
'attached_host': 'fake_host',
'attach_status': 'attaching', }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], None,
'fake_host', '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
snapshot_ref.destroy()
db.volume_destroy(self.context, volume['id'])
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_snapshot_from_bootable_volume(self, mock_qemu_info):
"""Test create snapshot from bootable volume."""
# create bootable volume from image
volume = self._create_volume_from_image()
volume_id = volume['id']
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
# get volume's volume_glance_metadata
ctxt = context.get_admin_context()
vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
self.assertTrue(vol_glance_meta)
# create snapshot from bootable volume
snap = create_snapshot(volume_id)
self.volume.create_snapshot(ctxt, volume_id, snap)
# get snapshot's volume_glance_metadata
snap_glance_meta = db.volume_snapshot_glance_metadata_get(
ctxt, snap.id)
self.assertTrue(snap_glance_meta)
# ensure that volume's glance metadata is copied
# to snapshot's glance metadata
self.assertEqual(len(vol_glance_meta), len(snap_glance_meta))
vol_glance_dict = {x.key: x.value for x in vol_glance_meta}
snap_glance_dict = {x.key: x.value for x in snap_glance_meta}
self.assertDictMatch(vol_glance_dict, snap_glance_dict)
# ensure that snapshot's status is changed to 'available'
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snap.status)
# cleanup resource
snap.destroy()
db.volume_destroy(ctxt, volume_id)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_snapshot_from_bootable_volume_fail(self, mock_qemu_info):
"""Test create snapshot from bootable volume.
But it fails to volume_glance_metadata_copy_to_snapshot.
As a result, status of snapshot is changed to ERROR.
"""
# create bootable volume from image
volume = self._create_volume_from_image()
volume_id = volume['id']
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
# get volume's volume_glance_metadata
ctxt = context.get_admin_context()
vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
self.assertTrue(vol_glance_meta)
snap = create_snapshot(volume_id)
snap_stat = snap.status
self.assertTrue(snap.id)
self.assertTrue(snap_stat)
# set to return DB exception
with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\
as mock_db:
mock_db.side_effect = exception.MetadataCopyFailure(
reason="Because of DB service down.")
# create snapshot from bootable volume
self.assertRaises(exception.MetadataCopyFailure,
self.volume.create_snapshot,
ctxt,
volume_id,
snap)
# get snapshot's volume_glance_metadata
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_snapshot_glance_metadata_get,
ctxt, snap.id)
# ensure that status of snapshot is 'error'
self.assertEqual(fields.SnapshotStatus.ERROR, snap.status)
# cleanup resource
snap.destroy()
db.volume_destroy(ctxt, volume_id)
def test_create_snapshot_from_bootable_volume_with_volume_metadata_none(
self):
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
# set bootable flag of volume to True
db.volume_update(self.context, volume_id, {'bootable': True})
snapshot = create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_snapshot_glance_metadata_get,
self.context, snapshot.id)
# ensure that status of snapshot is 'available'
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
# cleanup resource
snapshot.destroy()
db.volume_destroy(self.context, volume_id)
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
snapshot = create_snapshot(volume_id, size=volume['size'])
self.volume.create_snapshot(self.context, volume_id, snapshot)
with mock.patch.object(self.volume.driver, 'delete_snapshot',
side_effect=exception.SnapshotIsBusy(
snapshot_name='fake')
) as mock_del_snap:
snapshot_id = snapshot.id
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context,
snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual(fields.SnapshotStatus.AVAILABLE,
snapshot_ref.status)
mock_del_snap.assert_called_once_with(snapshot)
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.mock_object(os.path, 'exists', lambda x: False)
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
snapshot = create_snapshot(volume_id)
snapshot_id = snapshot.id
self.volume.create_snapshot(self.context, volume_id, snapshot)
with mock.patch.object(self.volume.driver, 'delete_snapshot',
side_effect=exception.SnapshotIsBusy(
snapshot_name='fake')) as mock_del_snap:
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context,
snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual(fields.SnapshotStatus.AVAILABLE,
snapshot_ref.status)
mock_del_snap.assert_called_once_with(snapshot)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume(self, mock_reserve, mock_commit,
mock_rollback, mock_cloned_volume):
vol = tests_utils.create_volume(self.context,
**self.volume_params)
# unnecessary attributes should be removed from image volume
vol.consistencygroup = None
result = self.volume._clone_image_volume(self.context, vol,
{'id': fake.VOLUME_ID})
self.assertNotEqual(False, result)
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_commit.assert_called_once_with(self.context, ["RESERVATION"],
project_id=vol.project_id)
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume_creation_failure(self, mock_reserve,
mock_commit, mock_rollback):
vol = tests_utils.create_volume(self.context, **self.volume_params)
with mock.patch.object(objects, 'Volume', side_effect=ValueError):
self.assertFalse(self.volume._clone_image_volume(
self.context, vol, {'id': fake.VOLUME_ID}))
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_rollback.assert_called_once_with(self.context, ["RESERVATION"])
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image()
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume.id, volume=volume)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_not_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_create_volume_from_image_exception(self):
"""Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
kwargs = {'display_description': 'Test Desc',
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': 'detached',
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume.id,
{'image_id': self.FAKE_UUID},
volume=volume)
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable'])
# cleanup
volume.destroy()
os.unlink(dst_path)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_copy_exception_rescheduling(
self, mock_qemu_info):
"""Test create volume with ImageCopyFailure
This exception should not trigger rescheduling and allocated_capacity
should be incremented so we're having assert for that here.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
def fake_copy_image_to_volume(context, volume, image_service,
image_id):
raise exception.ImageCopyFailure()
self.mock_object(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image)
# NOTE(dulek): Rescheduling should not occur, so lets assert that
# allocated_capacity is incremented.
self.assertDictEqual(self.volume.stats['pools'],
{'_pool0': {'allocated_capacity_gb': 1}})
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable(
self, mock_qemu_info, mock_detach, mock_secure, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception inside _connect_device after volume has
already been attached to confirm that it detaches the volume.
"""
mock_secure.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info):
"""Test create volume from image via image volume.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(clone_image_volume=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_create_volume_from_exact_sized_image(self):
"""Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=self.FAKE_UUID)
volume_id = volume['id']
self.assertEqual('creating', volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_deleted_imaged(self):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'deleted'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
@mock.patch.object(QUOTAS, "rollback")
@mock.patch.object(QUOTAS, "commit")
@mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"])
def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks):
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context,
size,
'name',
'description')
self.assertEqual(int(size), volume['size'])
def test_create_volume_int_size(self):
"""Test volume creation with int size."""
self._do_test_create_volume_with_size(2)
def test_create_volume_string_size(self):
"""Test volume creation with string size."""
self._do_test_create_volume_with_size('2')
@mock.patch.object(QUOTAS, "rollback")
@mock.patch.object(QUOTAS, "commit")
@mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"])
def test_create_volume_with_bad_size(self, *_unused_quota_mocks):
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'2Gb',
'name',
'description')
def test_create_volume_with_float_fails(self):
"""Test volume creation with invalid float size."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'1.5',
'name',
'description')
def test_create_volume_with_zero_size_fails(self):
"""Test volume creation with string size."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'0',
'name',
'description')
def test_begin_detaching_fails_available(self):
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, status='available')
# Volume status is 'available'.
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
db.volume_update(self.context, volume.id,
{'status': 'in-use', 'attach_status': 'detached'})
# Should raise an error since not attached
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
db.volume_update(self.context, volume.id,
{'attach_status': 'attached'})
# Ensure when attached no exception raised
volume_api.begin_detaching(self.context, volume)
volume_api.update(self.context, volume, {'status': 'maintenance'})
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
db.volume_destroy(self.context, volume.id)
def test_begin_roll_detaching_volume(self):
"""Test begin_detaching and roll_detaching functions."""
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context, **self.volume_params)
attachment = db.volume_attach(self.context,
{'volume_id': volume['id'],
'attached_host': 'fake-host'})
db.volume_attached(self.context, attachment['id'], instance_uuid,
'fake-host', 'vdb')
volume_api = cinder.volume.api.API()
volume_api.begin_detaching(self.context, volume)
volume = volume_api.get(self.context, volume['id'])
self.assertEqual("detaching", volume['status'])
volume_api.roll_detaching(self.context, volume)
volume = volume_api.get(self.context, volume['id'])
self.assertEqual("in-use", volume['status'])
def test_volume_api_update(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update(self.context, volume, update_dict)
# read changes from db
vol = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('test update name', vol['display_name'])
def test_volume_api_update_maintenance(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume['status'] = 'maintenance'
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
self.assertRaises(exception.InvalidVolume, volume_api.update,
self.context, volume, update_dict)
def test_volume_api_update_snapshot(self):
# create raw snapshot
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = create_snapshot(volume['id'])
snapshot_id = snapshot.id
self.assertIsNone(snapshot.display_name)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update_snapshot(self.context, snapshot, update_dict)
# read changes from db
snap = objects.Snapshot.get_by_id(context.get_admin_context(),
snapshot_id)
self.assertEqual('test update name', snap.display_name)
def test_volume_api_get_list_volumes_image_metadata(self):
"""Test get_list_volumes_image_metadata in volume API."""
ctxt = context.get_admin_context()
db.volume_create(ctxt, {'id': 'fake1', 'status': 'available',
'host': 'test', 'provider_location': '',
'size': 1})
db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1')
db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2')
db.volume_create(ctxt, {'id': 'fake2', 'status': 'available',
'host': 'test', 'provider_location': '',
'size': 1})
db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3')
db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4')
volume_api = cinder.volume.api.API()
results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1',
'fake2'])
expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'},
'fake2': {'key3': 'value3', 'key4': 'value4'}}
self.assertEqual(expect_results, results)
@mock.patch.object(QUOTAS, 'limit_check')
@mock.patch.object(QUOTAS, 'reserve')
def test_extend_volume(self, reserve, limit_check):
"""Test volume can be extended at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, size=2,
status='in-use', host=CONF.host)
volume_api = cinder.volume.api.API()
# Extend fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.extend,
self.context,
volume,
3)
db.volume_update(self.context, volume.id, {'status': 'available'})
# Extend fails when new_size < orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
1)
# Extend fails when new_size == orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
2)
# works when new_size > orig_size
reserve.return_value = ["RESERVATION"]
volume_api.extend(self.context, volume, 3)
volume.refresh()
self.assertEqual('extending', volume.status)
reserve.assert_called_once_with(self.context, gigabytes=1,
project_id=volume.project_id)
# Test the quota exceeded
db.volume_update(self.context, volume.id, {'status': 'available'})
reserve.side_effect = exception.OverQuota(overs=['gigabytes'],
quotas={'gigabytes': 20},
usages={'gigabytes':
{'reserved': 5,
'in_use': 15}})
self.assertRaises(exception.VolumeSizeExceedsAvailableQuota,
volume_api.extend, self.context,
volume, 3)
limit_check.side_effect = exception.OverQuota(
overs=['per_volume_gigabytes'], quotas={'per_volume_gigabytes': 2})
self.assertRaises(exception.VolumeSizeExceedsLimit,
volume_api.extend, self.context,
volume, 3)
# clean up
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_extend_volume_driver_not_initialized(self):
"""Test volume can be extended at API level."""
# create a volume and assign to host
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='available',
host=CONF.host)
self.volume.create_volume(self.context, volume.id, volume=volume)
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.extend_volume,
self.context, volume['id'], 3,
fake_reservations, volume=volume)
volume.refresh()
self.assertEqual('error_extending', volume.status)
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_extend_volume_manager(self):
"""Test volume can be extended at the manager level."""
def fake_extend(volume, new_size):
volume['size'] = new_size
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume.id, volume=volume)
# Test driver exception
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
extend_volume.side_effect =\
exception.CinderException('fake exception')
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4',
fake_reservations, volume=volume)
volume.refresh()
self.assertEqual(2, volume.size)
self.assertEqual('error_extending', volume.status)
# Test driver success
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
with mock.patch.object(QUOTAS, 'commit') as quotas_commit:
extend_volume.return_value = fake_extend
volume.status = 'extending'
self.volume.extend_volume(self.context, volume.id, '4',
fake_reservations, volume=volume)
volume.refresh()
self.assertEqual(4, volume.size)
self.assertEqual('available', volume.status)
quotas_commit.assert_called_with(
self.context,
['RESERVATION'],
project_id=volume.project_id)
# clean up
self.volume.delete_volume(self.context, volume.id, volume=volume)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume')
def test_extend_volume_with_volume_type(self, mock_rpc_extend):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'type')
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context, 100, 'name', 'description',
volume_type=vol_type)
try:
usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
self.assertEqual(100, volumes_in_use)
db.volume_update(self.context, volume.id, {'status': 'available'})
volume_api.extend(self.context, volume, 200)
mock_rpc_extend.called_once_with(self.context, volume, 200, mock.ANY)
try:
usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type')
volumes_reserved = usage.reserved
except exception.QuotaUsageNotFound:
volumes_reserved = 0
self.assertEqual(100, volumes_reserved)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_sourcereplica(self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
{'source_replicaid': volume_src.id},
volume=volume_dst)
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
def test_create_volume_from_sourcevol(self):
"""Test volume can be created from a source volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.mock_object(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
volume=volume_dst)
volume_dst.refresh()
self.assertEqual('available', volume_dst.status)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
@mock.patch('cinder.volume.api.API.list_availability_zones',
return_value=({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True}))
def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz):
"""Test volume can't be cloned from an other volume in different az."""
volume_api = cinder.volume.api.API()
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
volume_src = db.volume_get(self.context, volume_src['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src)
self.assertEqual('az2', volume_dst['availability_zone'])
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src,
availability_zone='nova')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_sourcevol_with_glance_metadata(
self, mock_qemu_info):
"""Test glance metadata can be correctly copied to new volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.mock_object(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume_src = self._create_volume_from_image()
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
volume=volume_dst)
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
src_glancemeta = db.volume_get(context.get_admin_context(),
volume_src['id']).volume_glance_metadata
dst_glancemeta = db.volume_get(context.get_admin_context(),
volume_dst['id']).volume_glance_metadata
for meta_src in src_glancemeta:
for meta_dst in dst_glancemeta:
if meta_dst.key == meta_src.key:
self.assertEqual(meta_src.value, meta_dst.value)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
def test_create_volume_from_sourcevol_failed_clone(self):
"""Test src vol status will be restore by error handling code."""
def fake_error_create_cloned_volume(volume, src_vref):
db.volume_update(self.context, src_vref['id'], {'status': 'error'})
raise exception.CinderException('fake exception')
self.mock_object(self.volume.driver, 'create_cloned_volume',
fake_error_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.assertEqual('creating', volume_src.status)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
self.assertEqual('available', volume_src.status)
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.assertEqual('creating', volume_dst.status)
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_dst.id,
volume=volume_dst)
# Source volume's status is still available and dst is set to error
self.assertEqual('available', volume_src.status)
self.assertEqual('error', volume_dst.status)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
def test_clean_temporary_volume(self):
def fake_delete_volume(ctxt, volume):
volume.destroy()
fake_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host,
migration_status='migrating')
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
# 1. Only clean the db
self.volume._clean_temporary_volume(self.context, fake_volume,
fake_new_volume,
clean_db_only=True)
self.assertRaises(exception.VolumeNotFound,
db.volume_get, self.context,
fake_new_volume.id)
# 2. Delete the backend storage
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \
mock_delete_volume:
mock_delete_volume.side_effect = fake_delete_volume
self.volume._clean_temporary_volume(self.context,
fake_volume,
fake_new_volume,
clean_db_only=False)
self.assertRaises(exception.VolumeNotFound,
db.volume_get, self.context,
fake_new_volume.id)
# Check when the migrated volume is not in migration
fake_new_volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
fake_volume.migration_status = 'non-migrating'
fake_volume.save()
self.volume._clean_temporary_volume(self.context, fake_volume,
fake_new_volume)
volume = db.volume_get(context.get_admin_context(),
fake_new_volume.id)
self.assertIsNone(volume.migration_status)
def test_check_volume_filters_true(self):
"""Test bootable as filter for true"""
volume_api = cinder.volume.api.API()
filters = {'bootable': 'TRUE'}
# To convert filter value to True or False
volume_api.check_volume_filters(filters)
# Confirming converted filter value against True
self.assertTrue(filters['bootable'])
def test_check_volume_filters_false(self):
"""Test bootable as filter for false"""
volume_api = cinder.volume.api.API()
filters = {'bootable': 'false'}
# To convert filter value to True or False
volume_api.check_volume_filters(filters)
# Confirming converted filter value against False
self.assertEqual(False, filters['bootable'])
def test_check_volume_filters_invalid(self):
"""Test bootable as filter"""
volume_api = cinder.volume.api.API()
filters = {'bootable': 'invalid'}
# To convert filter value to True or False
volume_api.check_volume_filters(filters)
# Confirming converted filter value against invalid value
self.assertTrue(filters['bootable'])
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
volume.status = 'in-use'
def sort_func(obj):
return obj['name']
volume_api = cinder.volume.api.API()
# Update fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.update_readonly_flag,
self.context,
volume,
False)
volume.status = 'available'
# works when volume in 'available' status
volume_api.update_readonly_flag(self.context, volume, False)
volume.refresh()
self.assertEqual('available', volume.status)
admin_metadata = volume.volume_admin_metadata
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
# clean up
self.volume.delete_volume(self.context, volume.id, volume=volume)
def test_secure_file_operations_enabled(self):
"""Test secure file operations setting for base driver.
General, non network file system based drivers do not have
anything to do with "secure_file_operations". This test verifies that
calling the method always returns False.
"""
ret_flag = self.volume.driver.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled')
def test_secure_file_operations_enabled_2(self, mock_secure):
mock_secure.return_value = True
vol = tests_utils.create_volume(self.context)
result = self.volume.secure_file_operations_enabled(self.context,
vol)
mock_secure.assert_called_once_with()
self.assertTrue(result)
@mock.patch('cinder.volume.flows.common.make_pretty_name',
new=mock.MagicMock())
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume',
return_value=None)
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.execute',
side_effect=exception.DriverNotInitialized())
def test_create_volume_raise_rescheduled_exception(self, mock_execute,
mock_reschedule):
# Create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
test_vol_id = test_vol['id']
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, test_vol_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}},
volume=test_vol)
self.assertTrue(mock_reschedule.called)
volume = db.volume_get(context.get_admin_context(), test_vol_id)
self.assertEqual('creating', volume['status'])
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.execute')
def test_create_volume_raise_unrescheduled_exception(self, mock_execute):
# create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
test_vol_id = test_vol['id']
mock_execute.side_effect = exception.VolumeNotFound(
volume_id=test_vol_id)
self.assertRaises(exception.VolumeNotFound,
self.volume.create_volume,
self.context, test_vol_id,
{'volume_properties': self.volume_params},
{'retry': {'num_attempts': 1, 'host': []}},
volume=test_vol)
volume = db.volume_get(context.get_admin_context(), test_vol_id)
self.assertEqual('error', volume['status'])
def test_cascade_delete_volume_with_snapshots(self):
"""Test volume deletion with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
snapshot = create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
volume_api.delete(self.context,
volume,
cascade=True)
def test_cascade_delete_volume_with_snapshots_error(self):
"""Test volume deletion with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
snapshot = create_snapshot(volume['id'], size=volume['size'])
self.volume.create_snapshot(self.context, volume['id'], snapshot)
self.assertEqual(
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
snapshot.update({'status': fields.SnapshotStatus.CREATING})
snapshot.save()
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume,
cascade=True)
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
def test_get_capabilities(self, mock_init_vendor, mock_get_volume_stats):
stats = {
'volume_backend_name': 'lvm',
'vendor_name': 'Open Source',
'storage_protocol': 'iSCSI',
'vendor_prefix': 'abcd'
}
expected = stats.copy()
expected['properties'] = {
'compression': {
'title': 'Compression',
'description': 'Enables compression.',
'type': 'boolean'},
'qos': {
'title': 'QoS',
'description': 'Enables QoS.',
'type': 'boolean'},
'replication': {
'title': 'Replication',
'description': 'Enables replication.',
'type': 'boolean'},
'thin_provisioning': {
'title': 'Thin Provisioning',
'description': 'Sets thin provisioning.',
'type': 'boolean'},
}
# Test to get updated capabilities
discover = True
mock_get_volume_stats.return_value = stats
mock_init_vendor.return_value = ({}, None)
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
mock_get_volume_stats.assert_called_once_with(True)
# Test to get existing original capabilities
mock_get_volume_stats.reset_mock()
discover = False
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
self.assertFalse(mock_get_volume_stats.called)
# Normal test case to get vendor unique capabilities
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"abcd:minIOPS",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer",
minimum=10,
default=100)
return properties, 'abcd'
expected['properties'].update(
{'abcd:minIOPS': {
'title': 'Minimum IOPS QoS',
'description': 'Sets minimum IOPS if QoS is enabled.',
'type': 'integer',
'minimum': 10,
'default': 100}})
mock_get_volume_stats.reset_mock()
mock_init_vendor.reset_mock()
discover = True
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities)
self.assertTrue(mock_get_volume_stats.called)
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
@mock.patch.object(driver.BaseVD, '_init_standard_capabilities')
def test_get_capabilities_prefix_error(self, mock_init_standard,
mock_init_vendor,
mock_get_volume_stats):
# Error test case: propety does not match vendor prefix
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"aaa:minIOPS",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer")
self._set_property(
properties,
"abcd:compression_type",
"Compression type",
"Specifies compression type.",
"string")
return properties, 'abcd'
expected = {
'abcd:compression_type': {
'title': 'Compression type',
'description': 'Specifies compression type.',
'type': 'string'}}
discover = True
mock_get_volume_stats.return_value = {}
mock_init_standard.return_value = {}
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities['properties'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats')
@mock.patch.object(driver.BaseVD, '_init_vendor_properties')
@mock.patch.object(driver.BaseVD, '_init_standard_capabilities')
def test_get_capabilities_fail_override(self, mock_init_standard,
mock_init_vendor,
mock_get_volume_stats):
# Error test case: propety cannot override any standard capabilities
def init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"qos",
"Minimum IOPS QoS",
"Sets minimum IOPS if QoS is enabled.",
"integer")
self._set_property(
properties,
"ab::cd:compression_type",
"Compression type",
"Specifies compression type.",
"string")
return properties, 'ab::cd'
expected = {
'ab__cd:compression_type': {
'title': 'Compression type',
'description': 'Specifies compression type.',
'type': 'string'}}
discover = True
mock_get_volume_stats.return_value = {}
mock_init_standard.return_value = {}
mock_init_vendor.return_value = (
init_vendor_properties(self.volume.driver))
capabilities = self.volume.get_capabilities(self.context,
discover)
self.assertEqual(expected, capabilities['properties'])
@mock.patch.object(driver.BaseVD, 'get_backup_device')
@mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled')
def test_get_backup_device(self, mock_secure, mock_get_backup):
vol = tests_utils.create_volume(self.context)
backup = tests_utils.create_backup(self.context, vol['id'])
mock_secure.return_value = False
mock_get_backup.return_value = (vol, False)
result = self.volume.get_backup_device(self.context,
backup)
mock_get_backup.assert_called_once_with(self.context, backup)
mock_secure.assert_called_once_with()
expected_result = {'backup_device': vol,
'secure_enabled': False,
'is_snapshot': False}
self.assertEqual(expected_result, result)
def test_backup_use_temp_snapshot_config(self):
local_conf = self.volume.driver.configuration.local_conf
self.assertFalse(local_conf.backup_use_temp_snapshot)
@mock.patch.object(QUOTAS, 'reserve',
side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION)
def test_existing_snapshot_failed_quota_reserve(self, mock_reserve):
vol = tests_utils.create_volume(self.context)
snap = tests_utils.create_snapshot(self.context, vol.id)
with mock.patch.object(
self.volume.driver,
'manage_existing_snapshot_get_size') as mock_get_size:
mock_get_size.return_value = 1
self.assertRaises(exception.SnapshotLimitExceeded,
self.volume.manage_existing_snapshot,
self.context,
snap)
def test_init_host_clears_deleting_snapshots(self):
"""Test that init_host will delete a snapshot stuck in deleting."""
volume = tests_utils.create_volume(self.context, status='deleting',
size=1, host=CONF.host)
snapshot = tests_utils.create_snapshot(self.context,
volume.id, status='deleting')
self.volume.init_host()
self.assertRaises(exception.VolumeNotFound, volume.refresh)
self.assertRaises(exception.SnapshotNotFound, snapshot.refresh)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing')
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing_get_size')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_manage_volume_with_notify(self, mock_notify, mock_size,
mock_manage):
elevated = context.get_admin_context()
vol_type = db.volume_type_create(
elevated, {'name': 'type1', 'extra_specs': {}})
# create source volume
volume_params = {'volume_type_id': vol_type.id, 'status': 'managing'}
test_vol = tests_utils.create_volume(self.context, **volume_params)
mock_size.return_value = 1
mock_manage.return_value = None
self.volume.manage_existing(self.context, None, 'volume_ref',
test_vol)
mock_notify.assert_called_with(self.context, test_vol,
'manage_existing.end',
host=test_vol.host)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing_get_size')
@mock.patch('cinder.volume.flows.manager.manage_existing.'
'ManageExistingTask.execute')
def test_manage_volume_raise_driver_exception(self, mock_execute,
mock_driver_get_size):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'type1')
# create source volume
self.volume_params['volume_type_id'] = vol_type['id']
self.volume_params['status'] = 'managing'
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
mock_execute.side_effect = exception.VolumeBackendAPIException(
data="volume driver got exception")
mock_driver_get_size.return_value = 1
# Set quota usage
reserve_opts = {'volumes': 1, 'gigabytes': 1}
reservations = QUOTAS.reserve(self.context, project_id=project_id,
**reserve_opts)
QUOTAS.commit(self.context, reservations)
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use = usage.in_use
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use = usage.in_use
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.manage_existing,
self.context, test_vol.id,
'volume_ref')
# check volume status
volume = objects.Volume.get_by_id(context.get_admin_context(),
test_vol.id)
self.assertEqual('error_managing', volume.status)
# Delete this volume with 'error_managing_deleting' status in c-vol.
test_vol.status = 'error_managing_deleting'
test_vol.save()
self.volume.delete_volume(self.context, test_vol.id, volume=test_vol)
ctxt = context.get_admin_context(read_deleted='yes')
volume = objects.Volume.get_by_id(ctxt, test_vol.id)
self.assertEqual('deleted', volume.status)
# Get in_use number after deleting error_managing volume
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use_new = usage.in_use
self.assertEqual(volumes_in_use, volumes_in_use_new)
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use_new = usage.in_use
self.assertEqual(gigabytes_in_use, gigabytes_in_use_new)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'manage_existing_get_size')
def test_manage_volume_raise_driver_size_exception(self,
mock_driver_get_size):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}})
# create source volume
test_vol = tests_utils.create_volume(self.context,
**self.volume_params)
mock_driver_get_size.side_effect = exception.VolumeBackendAPIException(
data="volume driver got exception")
# Set quota usage
reserve_opts = {'volumes': 1, 'gigabytes': 1}
reservations = QUOTAS.reserve(self.context, project_id=project_id,
**reserve_opts)
QUOTAS.commit(self.context, reservations)
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use = usage.in_use
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use = usage.in_use
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.manage_existing,
self.context, test_vol.id,
'volume_ref')
# check volume status
volume = objects.Volume.get_by_id(context.get_admin_context(),
test_vol.id)
self.assertEqual('error_managing', volume.status)
# Delete this volume with 'error_managing_deleting' status in c-vol.
test_vol.status = 'error_managing_deleting'
test_vol.save()
self.volume.delete_volume(self.context, test_vol.id, volume=test_vol)
ctxt = context.get_admin_context(read_deleted='yes')
volume = objects.Volume.get_by_id(ctxt, test_vol.id)
self.assertEqual('deleted', volume.status)
# Get in_use number after raising exception
usage = db.quota_usage_get(self.context, project_id, 'volumes')
volumes_in_use_new = usage.in_use
self.assertEqual(volumes_in_use, volumes_in_use_new)
usage = db.quota_usage_get(self.context, project_id, 'gigabytes')
gigabytes_in_use_new = usage.in_use
self.assertEqual(gigabytes_in_use, gigabytes_in_use_new)
@ddt.ddt
class VolumeMigrationTestCase(BaseVolumeTestCase):
def setUp(self):
super(VolumeMigrationTestCase, self).setUp()
self._clear_patch = mock.patch('cinder.volume.utils.clear_volume',
autospec=True)
self._clear_patch.start()
self.expected_status = 'available'
def tearDown(self):
super(VolumeMigrationTestCase, self).tearDown()
self._clear_patch.stop()
def test_migrate_volume_driver(self):
"""Test volume migration done by driver."""
# Mock driver and rpc functions
self.mock_object(self.volume.driver, 'migrate_volume',
lambda x, y, z, new_type_id=None: (
True, {'user_id': fake.USER_ID}))
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume.id, host_obj, False,
volume=volume)
# check volume properties
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
def _fake_create_volume(self, ctxt, volume, host, req_spec, filters,
allow_reschedule=True):
return db.volume_update(ctxt, volume['id'],
{'status': self.expected_status})
def test_migrate_volume_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate,\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at self.driver.migrate_volume and create_export
mock_migrate.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
False,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic(self, volume_get,
migrate_volume_completion,
nova_api):
fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
new_volume_obj = fake_volume.fake_volume_obj(self.context,
**fake_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
volume_get.return_value = fake_new_volume
update_server_volume = nova_api.return_value.update_server_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
with mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume:
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
mock_copy_volume.assert_called_with(self.context, volume,
new_volume_obj,
remote='dest')
migrate_volume_completion.assert_called_with(
self.context, volume.id, new_volume_obj.id, error=False,
volume=volume, new_volume=new_volume_obj)
self.assertFalse(update_server_volume.called)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic_attached_volume(self, volume_get,
migrate_volume_completion,
nova_api):
attached_host = 'some-host'
fake_volume_id = fake.VOLUME_ID
fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
fake_uuid = fakes.get_fake_uuid()
update_server_volume = nova_api.return_value.update_server_volume
volume_get.return_value = fake_new_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
volume_attach = tests_utils.attach_volume(
self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
self.assertEqual(
fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
self.assertEqual('in-use', volume_attach['status'])
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
self.assertFalse(migrate_volume_completion.called)
update_server_volume.assert_called_with(self.context, fake_uuid,
volume['id'], fake_volume_id)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')
def test_migrate_volume_for_volume_generic(self, create_volume,
rpc_delete_volume,
update_migrated_volume):
fake_volume = tests_utils.create_volume(self.context, size=1,
previous_status='available',
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate_volume,\
mock.patch.object(self.volume, '_copy_volume_data'),\
mock.patch.object(self.volume.driver, 'delete_volume') as \
delete_volume:
create_volume.side_effect = self._fake_create_volume
self.volume.migrate_volume(self.context, fake_volume.id,
host_obj, True, volume=fake_volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
fake_volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
self.assertFalse(mock_migrate_volume.called)
self.assertFalse(delete_volume.called)
self.assertTrue(rpc_delete_volume.called)
self.assertTrue(update_migrated_volume.called)
def test_migrate_volume_generic_copy_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export'):
# Exception case at migrate_volume_generic
# source_volume['migration_status'] is 'migrating'
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_migrate_volume_with_glance_metadata(self, mock_qemu_info):
volume = self._create_volume_from_image(clone_image_volume=True)
glance_metadata = volume.glance_metadata
# We imitate the behavior of rpcapi, by serializing and then
# deserializing the volume object we created earlier.
serializer = objects.base.CinderObjectSerializer()
serialized_volume = serializer.serialize_entity(self.context, volume)
volume = serializer.deserialize_entity(self.context, serialized_volume)
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
host_obj = {'host': 'newhost', 'capabilities': {}}
with mock.patch.object(self.volume.driver,
'migrate_volume') as mock_migrate_volume:
mock_migrate_volume.side_effect = (
lambda x, y, z, new_type_id=None: (
True, {'user_id': fake.USER_ID}))
self.volume.migrate_volume(self.context, volume.id, host_obj,
False, volume=volume)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
self.assertEqual(glance_metadata, volume.glance_metadata)
@mock.patch('cinder.db.volume_update')
def test_update_migrated_volume(self, volume_update):
fake_host = 'fake_host'
fake_new_host = 'fake_new_host'
fake_update = {'_name_id': fake.VOLUME2_NAME_ID,
'provider_location': 'updated_location'}
fake_elevated = context.RequestContext(fake.USER_ID, self.project_id,
is_admin=True)
volume = tests_utils.create_volume(self.context, size=1,
status='available',
host=fake_host)
new_volume = tests_utils.create_volume(
self.context, size=1,
status='available',
provider_location='fake_provider_location',
_name_id=fake.VOLUME_NAME_ID,
host=fake_new_host)
new_volume._name_id = fake.VOLUME_NAME_ID
new_volume.provider_location = 'fake_provider_location'
fake_update_error = {'_name_id': new_volume._name_id,
'provider_location':
new_volume.provider_location}
expected_update = {'_name_id': volume._name_id,
'provider_location': volume.provider_location}
with mock.patch.object(self.volume.driver,
'update_migrated_volume') as migrate_update,\
mock.patch.object(self.context, 'elevated') as elevated:
migrate_update.return_value = fake_update
elevated.return_value = fake_elevated
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, expected_update),
mock.call(fake_elevated, volume.id, fake_update)))
# Test the case for update_migrated_volume not implemented
# for the driver.
migrate_update.reset_mock()
volume_update.reset_mock()
# Reset the volume objects to their original value, since they
# were changed in the last call.
new_volume._name_id = fake.VOLUME_NAME_ID
new_volume.provider_location = 'fake_provider_location'
migrate_update.side_effect = NotImplementedError
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, fake_update),
mock.call(fake_elevated, volume.id, fake_update_error)))
def test_migrate_volume_generic_create_volume_error(self):
self.expected_status = 'error'
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume:
# Exception case at the creation of the new temporary volume
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_timeout_error(self):
CONF.set_override("migration_create_volume_timeout_secs", 2)
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as \
mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume, \
mock.patch.object(time, 'sleep'):
# Exception case at the timeout of the volume creation
self.expected_status = 'creating'
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_create_export_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at create_export
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
def test_migrate_volume_generic_migrate_volume_completion_error(self):
def fake_migrate_volume_completion(ctxt, volume_id, new_volume_id,
error=False, volume=None,
new_volume=None):
db.volume_update(ctxt, volume['id'],
{'migration_status': 'completing'})
raise processutils.ProcessExecutionError
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion')\
as mock_migrate_compl,\
mock.patch.object(self.volume.driver, 'create_export'), \
mock.patch.object(self.volume, '_attach_volume') \
as mock_attach, \
mock.patch.object(self.volume, '_detach_volume'), \
mock.patch.object(os_brick.initiator.connector,
'get_connector_properties') \
as mock_get_connector_properties, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_rpcapi.VolumeAPI,
'get_capabilities') \
as mock_get_capabilities:
# Exception case at delete_volume
# source_volume['migration_status'] is 'completing'
mock_create_volume.side_effect = self._fake_create_volume
mock_migrate_compl.side_effect = fake_migrate_volume_completion
mock_get_connector_properties.return_value = {}
mock_attach.side_effect = [{'device': {'path': 'bar'}},
{'device': {'path': 'foo'}}]
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume.id,
host_obj,
True,
volume=volume)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
mock_copy.assert_called_once_with('foo', 'bar', 0, '1M',
sparse=True)
def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
tests_utils.attach_volume(ctxt, volume.id,
instance_uuid, host_name,
'/dev/vda')
def _test_migrate_volume_completion(self, status='available',
instance_uuid=None, attached_host=None,
retyping=False,
previous_status='available'):
initial_status = retyping and 'retyping' or status
old_volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
status=initial_status,
migration_status='migrating',
previous_status=previous_status)
attachment_id = None
if status == 'in-use':
vol = tests_utils.attach_volume(self.context, old_volume.id,
instance_uuid, attached_host,
'/dev/vda')
self.assertEqual('in-use', vol['status'])
attachment_id = vol['volume_attachment'][0]['id']
target_status = 'target:%s' % old_volume.id
new_host = CONF.host + 'new'
new_volume = tests_utils.create_volume(self.context, size=0,
host=new_host,
migration_status=target_status)
with mock.patch.object(self.volume, 'detach_volume') as \
mock_detach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \
mock_delete_volume, \
mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as \
mock_attach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI,
'update_migrated_volume'),\
mock.patch.object(self.volume.driver, 'attach_volume'):
mock_attach_volume.side_effect = self.fake_attach_volume
old_volume_host = old_volume.host
new_volume_host = new_volume.host
self.volume.migrate_volume_completion(self.context, old_volume.id,
new_volume.id,
volume=old_volume,
new_volume=new_volume)
after_new_volume = objects.Volume.get_by_id(self.context,
new_volume.id)
after_old_volume = objects.Volume.get_by_id(self.context,
old_volume.id)
if status == 'in-use':
mock_detach_volume.assert_called_with(self.context,
old_volume.id,
attachment_id)
attachments = db.volume_attachment_get_all_by_instance_uuid(
self.context, old_volume.id, instance_uuid)
self.assertIsNotNone(attachments)
self.assertEqual(attached_host,
attachments[0]['attached_host'])
self.assertEqual(instance_uuid,
attachments[0]['instance_uuid'])
else:
self.assertFalse(mock_detach_volume.called)
self.assertTrue(mock_delete_volume.called)
# NOTE(sborkows): the migrate_volume_completion method alters
# old and new volume objects, so we need to check the equality
# between the former host value and the actual one.
self.assertEqual(old_volume_host, after_new_volume.host)
self.assertEqual(new_volume_host, after_old_volume.host)
def test_migrate_volume_completion_retype_available(self):
self._test_migrate_volume_completion('available', retyping=True)
def test_migrate_volume_completion_retype_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=True,
previous_status='in-use')
def test_migrate_volume_completion_migrate_available(self):
self._test_migrate_volume_completion()
def test_migrate_volume_completion_migrate_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=False,
previous_status='in-use')
@ddt.data(False, True)
def test_api_migrate_volume_completion_from_swap_with_no_migration(
self, swap_error):
# This test validates that Cinder properly finishes the swap volume
# status updates for the case that no migration has occurred
instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2'
attached_host = 'attached-host'
orig_attached_vol = tests_utils.create_volume(self.context, size=0)
orig_attached_vol = tests_utils.attach_volume(
self.context, orig_attached_vol['id'], instance_uuid,
attached_host, '/dev/vda')
new_volume = tests_utils.create_volume(self.context, size=0)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume')
def _run_migration_completion(rpc_attach_volume,
rpc_detach_volume):
attachment = orig_attached_vol['volume_attachment'][0]
attachment_id = attachment['id']
rpc_attach_volume.side_effect = self.fake_attach_volume
vol_id = volume_api.API().migrate_volume_completion(
self.context, orig_attached_vol, new_volume, swap_error)
if swap_error:
# When swap failed, we don't want to finish attachment
self.assertFalse(rpc_detach_volume.called)
self.assertFalse(rpc_attach_volume.called)
else:
# When no error, we should be finishing the attachment
rpc_detach_volume.assert_called_with(self.context,
orig_attached_vol,
attachment_id)
rpc_attach_volume.assert_called_with(
self.context, new_volume, attachment['instance_uuid'],
attachment['attached_host'], attachment['mountpoint'],
'rw')
self.assertEqual(new_volume['id'], vol_id)
_run_migration_completion()
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
def test_retype_setup_fail_volume_is_available(self, mock_notify):
"""Verify volume is still available if retype prepare failed."""
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
new_vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 0)
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='available',
volume_type_id=old_vol_type['id'])
api = cinder.volume.api.API()
self.assertRaises(exception.VolumeLimitExceeded, api.retype,
self.context, volume, new_vol_type['id'])
volume = db.volume_get(elevated, volume.id)
mock_notify.assert_not_called()
self.assertEqual('available', volume['status'])
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
def _retype_volume_exec(self, driver, mock_notify,
snap=False, policy='on-demand',
migrate_exc=False, exc=None, diff_equal=False,
replica=False, reserve_vol_type_only=False,
encryption_changed=False):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 10)
if replica:
rep_status = 'active'
else:
rep_status = 'disabled'
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='retyping',
volume_type_id=old_vol_type['id'],
replication_status=rep_status)
volume.previous_status = 'available'
volume.save()
if snap:
create_snapshot(volume.id, size=volume.size)
if driver or diff_equal:
host_obj = {'host': CONF.host, 'capabilities': {}}
else:
host_obj = {'host': 'newhost', 'capabilities': {}}
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(self.context,
reserve_opts,
vol_type['id'])
if reserve_vol_type_only:
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes')
total_volumes_in_use = usage.in_use
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
total_gigabytes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
total_volumes_in_use = 0
total_gigabytes_in_use = 0
reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**reserve_opts)
old_reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(self.context,
old_reserve_opts,
old_vol_type['id'])
old_reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**old_reserve_opts)
with mock.patch.object(self.volume.driver, 'retype') as _retype,\
mock.patch.object(volume_types, 'volume_types_diff') as _diff,\
mock.patch.object(self.volume, 'migrate_volume') as _mig,\
mock.patch.object(db.sqlalchemy.api, 'volume_get') as mock_get:
mock_get.return_value = volume
_retype.return_value = driver
returned_diff = {
'encryption': {},
'qos_specs': {},
'extra_specs': {},
}
if encryption_changed:
returned_diff = {'encryption': 'fake'}
_diff.return_value = (returned_diff, diff_equal)
if migrate_exc:
_mig.side_effect = KeyError
else:
_mig.return_value = True
if not exc:
self.volume.retype(self.context, volume.id,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
old_reservations=old_reservations,
volume=volume)
else:
self.assertRaises(exc, self.volume.retype,
self.context, volume.id,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
old_reservations=old_reservations,
volume=volume)
# get volume/quota properties
volume = objects.Volume.get_by_id(elevated, volume.id)
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes_new')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
# Get new in_use after retype, it should not be changed.
if reserve_vol_type_only:
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes')
new_total_volumes_in_use = usage.in_use
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
new_total_gigabytes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
new_total_volumes_in_use = 0
new_total_gigabytes_in_use = 0
self.assertEqual(total_volumes_in_use, new_total_volumes_in_use)
self.assertEqual(total_gigabytes_in_use,
new_total_gigabytes_in_use)
# check properties
if driver or diff_equal:
self.assertEqual(vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.retype'],))
elif not exc:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('retyping', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.retype'],))
else:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(0, volumes_in_use)
mock_notify.assert_not_called()
if encryption_changed:
self.assertTrue(_mig.called)
def test_retype_volume_driver_success(self):
self._retype_volume_exec(True)
def test_retype_volume_migration_bad_policy(self):
# Test volume retype that requires migration by not allowed
self._retype_volume_exec(False, policy='never',
exc=exception.VolumeMigrationFailed)
def test_retype_volume_migration_with_replica(self):
self._retype_volume_exec(False,
replica=True,
exc=exception.InvalidVolume)
def test_retype_volume_migration_with_snaps(self):
self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume)
def test_retype_volume_migration_failed(self):
self._retype_volume_exec(False, migrate_exc=True, exc=KeyError)
def test_retype_volume_migration_success(self):
self._retype_volume_exec(False, migrate_exc=False, exc=None)
def test_retype_volume_migration_equal_types(self):
self._retype_volume_exec(False, diff_equal=True)
def test_retype_volume_with_type_only(self):
self._retype_volume_exec(True, reserve_vol_type_only=True)
def test_retype_volume_migration_encryption(self):
self._retype_volume_exec(False, encryption_changed=True)
def test_migrate_driver_not_initialized(self):
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.migrate_volume,
self.context, volume.id, host_obj, True,
volume=volume)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'], volume=volume)
def test_delete_source_volume_in_migration(self):
"""Test deleting a source volume that is in migration."""
self._test_delete_volume_in_migration('migrating')
def test_delete_destination_volume_in_migration(self):
"""Test deleting a destination volume that is in migration."""
self._test_delete_volume_in_migration('target:vol-id')
def _test_delete_volume_in_migration(self, migration_status):
"""Test deleting a volume that is in migration."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
vol = db.volume_update(self.context, volume.id,
{'status': 'available',
'migration_status': migration_status})
self.volume.delete_volume(self.context, vol['id'], volume=volume)
# The volume is successfully removed during the volume delete
# and won't exist in the database any more.
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.context, vol['id'])
class ReplicationTestCase(BaseVolumeTestCase):
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_failover_host(self, mock_db_args, mock_db_update,
mock_failover):
"""Test replication failover_host."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = {'replication_status': 'enabled'}
volume_api = cinder.volume.api.API()
volume_api.failover_host(self.context, host=CONF.host)
mock_failover.assert_called_once_with(self.context, CONF.host, None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'failover_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_failover_host_unexpected_status(self, mock_db_args,
mock_db_update,
mock_failover):
"""Test replication failover_host unxepected status."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.failover_host,
self.context,
host=CONF.host)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_freeze_host(self, mock_db_args, mock_db_update,
mock_freeze):
"""Test replication freeze_host."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = {'frozen': False}
volume_api = cinder.volume.api.API()
volume_api.freeze_host(self.context, host=CONF.host)
mock_freeze.assert_called_once_with(self.context, CONF.host)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_freeze_host_unexpected_status(self, mock_db_args,
mock_db_update,
mock_freeze):
"""Test replication freeze_host unexpected status."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.freeze_host,
self.context,
host=CONF.host)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_thaw_host(self, mock_db_args, mock_db_update,
mock_thaw):
"""Test replication thaw_host."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = {'frozen': True}
mock_thaw.return_value = True
volume_api = cinder.volume.api.API()
volume_api.thaw_host(self.context, host=CONF.host)
mock_thaw.assert_called_once_with(self.context, CONF.host)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
@mock.patch.object(cinder.db, 'conditional_update')
@mock.patch.object(cinder.db, 'service_get')
def test_thaw_host_unexpected_status(self, mock_db_args,
mock_db_update,
mock_thaw):
"""Test replication thaw_host unexpected status."""
mock_db_args.return_value = fake_service.fake_service_obj(
self.context,
binary='cinder-volume')
mock_db_update.return_value = None
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.thaw_host,
self.context,
host=CONF.host)
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
self.addCleanup(os.unlink, self.dst_path)
os.close(self.dst_fd)
self.mock_object(self.volume.driver, 'local_path',
self.fake_local_path)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.image_meta = {
'id': self.image_id,
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = fake.VOLUME_ID
self.addCleanup(db.volume_destroy, self.context, self.volume_id)
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_over_image_quota(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
volume = db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.ImageLimitExceeded
# test with image not in queued state
self.assertRaises(exception.ImageLimitExceeded,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
self.context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume['id'])
def test_copy_volume_to_image_instance_deleted(self):
# During uploading volume to image if instance is deleted,
# volume should be in available status.
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
# Storing unmocked db api function reference here, because we have to
# update volume status (set instance_uuid to None) before calling the
# 'volume_update_status_based_on_attached_instance_id' db api.
unmocked_db_api = db.volume_update_status_based_on_attachment
def mock_volume_update_after_upload(context, volume_id):
# First update volume and set 'instance_uuid' to None
# because after deleting instance, instance_uuid of volume is
# set to None
db.volume_update(context, volume_id, {'instance_uuid': None})
# Calling unmocked db api
unmocked_db_api(context, volume_id)
with mock.patch.object(
db,
'volume_update_status_based_on_attachment',
side_effect=mock_volume_update_after_upload) as mock_update:
# Start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
# Check 'volume_update_status_after_copy_volume_to_image'
# is called 1 time
self.assertEqual(1, mock_update.call_count)
# Check volume status has changed to available because
# instance is deleted
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = self.FAKE_UUID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.flags(glance_api_version=2)
self.volume.driver.configuration.image_upload_use_cinder_backend = True
image_service = fake_image.FakeImageService()
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume_id, **kwargs):
db.volume_update(context, volume_id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertIsNone(image.get('locations'))
self.assertTrue(mock_delete.called)
class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_vol_attrs = [
{
'id': fake.VOLUME_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
},
{
'id': fake.VOLUME2_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': fake.VOLUME3_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
},
{
'id': fake.VOLUME4_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': fake.VOLUME5_ID,
'host': 'devstack',
'project_id': fake.PROJECT_ID,
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
}
]
self.db_snap_attrs = [
{
'id': fake.SNAPSHOT_ID,
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT3_ID,
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT_ID,
'project_id': 'p1',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'project_id': 'p1',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_vol_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_vol_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_vol_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_vol_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_vol_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id=fake.PROJECT_ID)
self.assertEqual(3, len(volumes))
self.assertEqual(fake.VOLUME2_ID, volumes[0].id)
self.assertEqual(fake.VOLUME3_ID, volumes[1].id)
self.assertEqual(fake.VOLUME4_ID, volumes[2].id)
def test_snapshot_get_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
db.volume_create(self.context, {'id': fake.VOLUME_ID})
for i in range(5):
self.db_vol_attrs[i]['volume_id'] = fake.VOLUME_ID
# Not in window
del self.db_snap_attrs[0]['id']
snap1 = objects.Snapshot(self.ctx, **self.db_snap_attrs[0])
snap1.create()
# In - deleted in window
del self.db_snap_attrs[1]['id']
snap2 = objects.Snapshot(self.ctx, **self.db_snap_attrs[1])
snap2.create()
# In - deleted after window
del self.db_snap_attrs[2]['id']
snap3 = objects.Snapshot(self.ctx, **self.db_snap_attrs[2])
snap3.create()
# In - created in window
del self.db_snap_attrs[3]['id']
snap4 = objects.Snapshot(self.ctx, **self.db_snap_attrs[3])
snap4.create()
# Not of window.
del self.db_snap_attrs[4]['id']
snap5 = objects.Snapshot(self.ctx, **self.db_snap_attrs[4])
snap5.create()
snapshots = objects.SnapshotList.get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1)).objects
self.assertEqual(3, len(snapshots))
self.assertEqual(snap2.id, snapshots[0].id)
self.assertEqual(fake.VOLUME_ID, snapshots[0].volume_id)
self.assertEqual(snap3.id, snapshots[1].id)
self.assertEqual(fake.VOLUME_ID, snapshots[1].volume_id)
self.assertEqual(snap4.id, snapshots[2].id)
self.assertEqual(fake.VOLUME_ID, snapshots[2].volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "cinder.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.output = ""
self.configuration = conf.Configuration(None)
self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True)
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
exec_patcher = mock.patch.object(self.volume.driver, '_execute',
_fake_execute)
exec_patcher.start()
self.addCleanup(exec_patcher.stop)
self.volume.driver.set_initialized()
self.addCleanup(self._cleanup)
def _cleanup(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
def _attach_volume(self):
"""Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
@ddt.ddt
class GenericVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver"
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_available(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.create_snapshot = mock.MagicMock()
self.volume.driver.delete_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
def test_create_temp_cloned_volume(self):
with mock.patch.object(
self.volume.driver,
'create_cloned_volume') as mock_create_cloned_volume:
model_update = {'provider_location': 'dummy'}
mock_create_cloned_volume.return_value = model_update
vol = tests_utils.create_volume(self.context,
status='backing-up')
cloned_vol = self.volume.driver._create_temp_cloned_volume(
self.context, vol)
self.assertEqual('dummy', cloned_vol.provider_location)
self.assertEqual('available', cloned_vol.status)
mock_create_cloned_volume.return_value = None
vol = tests_utils.create_volume(self.context,
status='backing-up')
cloned_vol = self.volume.driver._create_temp_cloned_volume(
self.context, vol)
self.assertEqual('available', cloned_vol.status)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
def test_backup_volume_inuse_temp_volume(self, mock_volume_get,
mock_get_connector_properties,
mock_file_open,
mock_temporary_chown):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = mock.Mock()
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver._create_temp_snapshot = mock.MagicMock()
self.volume.driver._delete_temp_snapshot = mock.MagicMock()
mock_volume_get.return_value = vol
self.volume.driver._create_temp_snapshot.return_value = temp_vol
mock_get_connector_properties.return_value = properties
f = mock_file_open.return_value = open('/dev/null', 'rb')
backup_service.backup(backup_obj, f, None)
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver.backup_volume(self.context, backup_obj,
backup_service)
mock_volume_get.assert_called_with(self.context, vol['id'])
self.volume.driver._create_temp_snapshot.assert_called_once_with(
self.context, vol)
self.volume.driver._delete_temp_snapshot.assert_called_once_with(
self.context, temp_vol)
@mock.patch.object(utils, 'temporary_chown')
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch('six.moves.builtins.open')
def test_restore_backup(self,
mock_open,
mock_get_connector_properties,
mock_temporary_chown):
dev_null = '/dev/null'
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'], 'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': dev_null}}
volume_file = mock.MagicMock()
mock_open.return_value.__enter__.return_value = volume_file
mock_get_connector_properties.return_value = properties
self.volume.driver._attach_volume = mock.MagicMock()
self.volume.driver._attach_volume.return_value = attach_info, vol
self.volume.driver._detach_volume = mock.MagicMock()
self.volume.driver.terminate_connection = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled = mock.MagicMock()
self.volume.driver.secure_file_operations_enabled.side_effect = (False,
True)
backup_service = mock.MagicMock()
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
backup_service.restore.assert_called_with(backup, vol['id'],
volume_file)
self.assertEqual(1, backup_service.restore.call_count)
def test_get_backup_device_available(self):
vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
(backup_device, is_snapshot) = self.volume.driver.get_backup_device(
self.context, backup_obj)
volume = objects.Volume.get_by_id(self.context, vol.id)
self.assertEqual(volume, backup_device)
self.assertFalse(is_snapshot)
backup_obj.refresh()
self.assertIsNone(backup_obj.temp_volume_id)
def test_get_backup_device_in_use(self):
vol = tests_utils.create_volume(self.context,
status='backing-up',
previous_status='in-use')
temp_vol = tests_utils.create_volume(self.context)
self.context.user_id = fake.USER_ID
self.context.project_id = fake.PROJECT_ID
backup_obj = tests_utils.create_backup(self.context,
vol['id'])
with mock.patch.object(
self.volume.driver,
'_create_temp_cloned_volume') as mock_create_temp:
mock_create_temp.return_value = temp_vol
(backup_device, is_snapshot) = (
self.volume.driver.get_backup_device(self.context,
backup_obj))
self.assertEqual(temp_vol, backup_device)
self.assertFalse(is_snapshot)
backup_obj.refresh()
self.assertEqual(temp_vol.id, backup_obj.temp_volume_id)
def test__create_temp_volume_from_snapshot(self):
volume_dict = {'id': fake.SNAPSHOT_ID,
'host': 'fakehost',
'availability_zone': 'fakezone',
'size': 1}
vol = fake_volume.fake_volume_obj(self.context, **volume_dict)
snapshot = fake_snapshot.fake_snapshot_obj(self.context)
with mock.patch.object(
self.volume.driver,
'create_volume_from_snapshot'):
temp_vol = self.volume.driver._create_temp_volume_from_snapshot(
self.context,
vol, snapshot)
self.assertEqual('detached', temp_vol.attach_status)
self.assertEqual('fakezone', temp_vol.availability_zone)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume')
@mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume')
@mock.patch.object(volutils, 'copy_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities')
@mock.patch.object(cinder.volume.volume_types,
'volume_types_encryption_changed')
@ddt.data(False, True)
def test_copy_volume_data_mgr(self,
encryption_changed,
mock_encryption_changed,
mock_get_capabilities,
mock_copy,
mock_detach,
mock_attach,
mock_get_connector):
"""Test function of _copy_volume_data."""
src_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
dest_vol = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
mock_get_connector.return_value = {}
mock_encryption_changed.return_value = encryption_changed
self.volume.driver._throttle = mock.MagicMock()
attach_expected = [
mock.call(self.context, dest_vol, {},
remote=False,
attach_encryptor=encryption_changed),
mock.call(self.context, src_vol, {},
remote=False,
attach_encryptor=encryption_changed)]
detach_expected = [
mock.call(self.context, {'device': {'path': 'bar'}},
dest_vol, {}, force=False, remote=False,
attach_encryptor=encryption_changed),
mock.call(self.context, {'device': {'path': 'foo'}},
src_vol, {}, force=False, remote=False,
attach_encryptor=encryption_changed)]
attach_volume_returns = [
{'device': {'path': 'bar'}},
{'device': {'path': 'foo'}}
]
# Test case for sparse_copy_volume = False
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {}
self.volume._copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# Test case for sparse_copy_volume = True
mock_attach.reset_mock()
mock_detach.reset_mock()
mock_attach.side_effect = attach_volume_returns
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
self.volume._copy_volume_data(self.context,
src_vol,
dest_vol)
self.assertEqual(attach_expected, mock_attach.mock_calls)
mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True)
self.assertEqual(detach_expected, mock_detach.mock_calls)
# cleanup resource
db.volume_destroy(self.context, src_vol['id'])
db.volume_destroy(self.context, dest_vol['id'])
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(image_utils, 'fetch_to_raw')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume')
@mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor')
@mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor')
def test_copy_image_to_encrypted_volume(self,
mock_detach_encryptor,
mock_attach_encryptor,
mock_detach_volume,
mock_attach_volume,
mock_fetch_to_raw,
mock_get_connector_properties):
properties = {}
volume = tests_utils.create_volume(
self.context, status='available',
size=2,
encryption_key_id=fake.ENCRYPTION_KEY_ID)
volume_id = volume['id']
volume = db.volume_get(context.get_admin_context(), volume_id)
image_service = fake_image.FakeImageService()
local_path = 'dev/sda'
attach_info = {'device': {'path': local_path},
'conn': {'driver_volume_type': 'iscsi',
'data': {}, }}
mock_get_connector_properties.return_value = properties
mock_attach_volume.return_value = [attach_info, volume]
self.volume.driver.copy_image_to_encrypted_volume(
self.context, volume, image_service, fake.IMAGE_ID)
encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID}
mock_attach_volume.assert_called_once_with(
self.context, volume, properties)
mock_attach_encryptor.assert_called_once_with(
self.context, attach_info, encryption)
mock_fetch_to_raw.assert_called_once_with(
self.context, image_service, fake.IMAGE_ID,
local_path, '1M', size=2)
mock_detach_encryptor.assert_called_once_with(
attach_info, encryption)
mock_detach_volume.assert_called_once_with(
self.context, attach_info, volume, properties)
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(image_utils, 'fetch_to_raw')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume')
@mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor')
@mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor')
def test_copy_image_to_encrypted_volume_failed_attach_encryptor(
self,
mock_detach_encryptor,
mock_attach_encryptor,
mock_detach_volume,
mock_attach_volume,
mock_fetch_to_raw,
mock_get_connector_properties):
properties = {}
volume = tests_utils.create_volume(
self.context, status='available',
size=2,
encryption_key_id=fake.ENCRYPTION_KEY_ID)
volume_id = volume['id']
volume = db.volume_get(context.get_admin_context(), volume_id)
image_service = fake_image.FakeImageService()
attach_info = {'device': {'path': 'dev/sda'},
'conn': {'driver_volume_type': 'iscsi',
'data': {}, }}
mock_get_connector_properties.return_value = properties
mock_attach_volume.return_value = [attach_info, volume]
raised_exception = os_brick.exception.VolumeEncryptionNotSupported(
volume_id = "123",
volume_type = "abc")
mock_attach_encryptor.side_effect = raised_exception
self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported,
self.volume.driver.copy_image_to_encrypted_volume,
self.context, volume, image_service, fake.IMAGE_ID)
encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID}
mock_attach_volume.assert_called_once_with(
self.context, volume, properties)
mock_attach_encryptor.assert_called_once_with(
self.context, attach_info, encryption)
self.assertFalse(mock_fetch_to_raw.called)
self.assertFalse(mock_detach_encryptor.called)
mock_detach_volume.assert_called_once_with(
self.context, attach_info, volume, properties)
@mock.patch.object(os_brick.initiator.connector,
'get_connector_properties')
@mock.patch.object(image_utils, 'fetch_to_raw')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume')
@mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume')
@mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor')
@mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor')
def test_copy_image_to_encrypted_volume_failed_fetch(
self,
mock_detach_encryptor, mock_attach_encryptor,
mock_detach_volume, mock_attach_volume, mock_fetch_to_raw,
mock_get_connector_properties):
properties = {}
volume = tests_utils.create_volume(
self.context, status='available',
size=2,
encryption_key_id=fake.ENCRYPTION_KEY_ID)
volume_id = volume['id']
volume = db.volume_get(context.get_admin_context(), volume_id)
image_service = fake_image.FakeImageService()
local_path = 'dev/sda'
attach_info = {'device': {'path': local_path},
'conn': {'driver_volume_type': 'iscsi',
'data': {}, }}
mock_get_connector_properties.return_value = properties
mock_attach_volume.return_value = [attach_info, volume]
raised_exception = exception.ImageUnacceptable(reason='fake',
image_id=fake.IMAGE_ID)
mock_fetch_to_raw.side_effect = raised_exception
encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID}
self.assertRaises(exception.ImageUnacceptable,
self.volume.driver.copy_image_to_encrypted_volume,
self.context, volume, image_service, fake.IMAGE_ID)
mock_attach_volume.assert_called_once_with(
self.context, volume, properties)
mock_attach_encryptor.assert_called_once_with(
self.context, attach_info, encryption)
mock_fetch_to_raw.assert_called_once_with(
self.context, image_service, fake.IMAGE_ID,
local_path, '1M', size=2)
mock_detach_encryptor.assert_called_once_with(
attach_info, encryption)
mock_detach_volume.assert_called_once_with(
self.context, attach_info, volume, properties)
class FibreChannelTestCase(DriverTestCase):
"""Test Case for FibreChannelDriver."""
driver_name = "cinder.volume.driver.FibreChannelDriver"
def test_initialize_connection(self):
self.assertRaises(NotImplementedError,
self.volume.driver.initialize_connection, {}, {})
def test_validate_connector(self):
"""validate_connector() successful use case.
validate_connector() does not throw an exception when
wwpns and wwnns are both set and both are not empty.
"""
connector = {'wwpns': ["not empty"],
'wwnns': ["not empty"]}
self.volume.driver.validate_connector(connector)
def test_validate_connector_no_wwpns(self):
"""validate_connector() throws exception when it has no wwpns."""
connector = {'wwnns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_empty_wwpns(self):
"""validate_connector() throws exception when it has empty wwpns."""
connector = {'wwpns': [],
'wwnns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_no_wwnns(self):
"""validate_connector() throws exception when it has no wwnns."""
connector = {'wwpns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
def test_validate_connector_empty_wwnns(self):
"""validate_connector() throws exception when it has empty wwnns."""
connector = {'wwnns': [],
'wwpns': ["not empty"]}
self.assertRaises(exception.InvalidConnectorException,
self.volume.driver.validate_connector, connector)
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
cinder.policy.init()
self.context = context.get_admin_context()
def test_check_policy(self):
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
with mock.patch.object(cinder.policy, 'enforce') as mock_enforce:
cinder.volume.api.check_policy(self.context, 'attach')
mock_enforce.assert_called_once_with(self.context,
'volume:attach',
target)
def test_check_policy_with_target(self):
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
with mock.patch.object(cinder.policy, 'enforce') as mock_enforce:
cinder.volume.api.check_policy(self.context, 'attach', {'id': 2})
mock_enforce.assert_called_once_with(self.context,
'volume:attach',
target)
class ImageVolumeCacheTestCase(BaseVolumeTestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.volume.driver.set_initialized()
@mock.patch('oslo_utils.importutils.import_object')
def test_cache_configs(self, mock_import_object):
opts = {
'image_volume_cache_enabled': True,
'image_volume_cache_max_size_gb': 100,
'image_volume_cache_max_count': 20
}
def conf_get(option):
if option in opts:
return opts[option]
else:
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertIsNotNone(manager.image_volume_cache)
self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb)
self.assertEqual(20, manager.image_volume_cache.max_cache_size_count)
def test_delete_image_volume(self):
volume_params = {
'status': 'creating',
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
volume.status = 'available'
volume.save()
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
image_id,
datetime.datetime.utcnow(),
volume['id'],
volume['size'])
volume_api.delete(self.context, volume)
entry = db.image_volume_cache_get_by_volume_id(self.context,
volume['id'])
self.assertIsNone(entry)
def test_delete_volume_with_keymanager_exception(self):
volume_params = {
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
with mock.patch.object(
volume_api.key_manager, 'delete') as key_del_mock:
key_del_mock.side_effect = Exception("Key not found")
volume_api.delete(self.context, volume)
@ddt.ddt
class DiscardFlagTestCase(BaseVolumeTestCase):
def setUp(self):
super(DiscardFlagTestCase, self).setUp()
self.volume.driver = mock.MagicMock()
@ddt.data(dict(config_discard_flag=True,
driver_discard_flag=None,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=None,
expected_flag=None),
dict(config_discard_flag=True,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=False,
expected_flag=False),
dict(config_discard_flag=None,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=None,
driver_discard_flag=False,
expected_flag=False))
@ddt.unpack
def test_initialize_connection_discard_flag(self,
config_discard_flag,
driver_discard_flag,
expected_flag):
self.volume.driver.create_export.return_value = None
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
conn_info = {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw',
'encrypted': False}
}
if driver_discard_flag is not None:
conn_info['data']['discard'] = driver_discard_flag
self.volume.driver.initialize_connection.return_value = conn_info
def _safe_get(key):
if key is 'report_discard_supported':
return config_discard_flag
else:
return None
self.volume.driver.configuration.safe_get.side_effect = _safe_get
with mock.patch.object(objects, 'Volume') as mock_vol:
volume = tests_utils.create_volume(self.context)
volume.volume_type_id = None
mock_vol.get_by_id.return_value = volume
conn_info = self.volume.initialize_connection(self.context,
volume.id,
connector)
self.assertEqual(expected_flag, conn_info['data'].get('discard'))
| {
"content_hash": "0e183c7edf6b30eaaa875de500c55c0b",
"timestamp": "",
"source": "github",
"line_count": 6720,
"max_line_length": 80,
"avg_line_length": 46.2125,
"alnum_prop": 0.5536567615956309,
"repo_name": "Nexenta/cinder",
"id": "b06613a1d890dcb4e162ec427cfca32ed7d1121f",
"size": "311279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18007018"
},
{
"name": "Shell",
"bytes": "13543"
}
],
"symlink_target": ""
} |
from common import * # NOQA
import yaml
from netaddr import IPNetwork, IPAddress
def _create_stack(client):
env = client.create_environment(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
return env
def _create_stack_long_name(client, lname):
env = client.create_environment(name=lname)
env = client.wait_success(env)
assert env.state == "active"
return env
def create_env_and_svc(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
return service, env
def test_mix_cased_labels(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {
"imageUuid": image_uuid,
'labels': {
'aAa': 'AaA',
}}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
assert launch_config['labels'] == service.launchConfig.labels
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate())
assert launch_config['labels'] == service.launchConfig.labels
instance = find_one(_get_instance_for_service, client, service.id)
for k, v in service.launchConfig.labels.items():
assert instance.labels[k] == v
def test_update_env_service(client, context):
service, env = create_env_and_svc(client, context)
new_env_name = env.name + '1'
new_name = service.name + '1'
service.name = new_name
service.scale = None
service = client.update(service, service)
assert service.name == new_name
env.name = new_env_name
env = client.update(env, env)
assert env.name == new_env_name
def test_env_set_outputs(client, context):
service, env = create_env_and_svc(client, context)
assert env.outputs is None
def func():
return env.addoutputs(outputs={
'foo': 'bar',
'foo2': 'bar2',
})
env = retry(func)
assert env.outputs == {'foo': 'bar', 'foo2': 'bar2'}
env = client.reload(env)
assert env.outputs == {'foo': 'bar', 'foo2': 'bar2'}
assert env.state == 'active'
def func():
return env.addoutputs(outputs={
'foo3': 'bar3',
})
env = retry(func)
assert env.outputs == {'foo': 'bar', 'foo2': 'bar2', 'foo3': 'bar3'}
def test_activate_single_service(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
host = context.host
container1 = client.create_container(imageUuid=image_uuid,
startOnCreate=True)
container1 = client.wait_success(container1)
container2 = client.create_container(imageUuid=image_uuid,
startOnCreate=True)
container2 = client.wait_success(container2)
caps = ["SYS_MODULE"]
dns = ['8.8.8.8', '1.2.3.4']
search = ['foo', 'bar']
health_check = {"name": "check1", "responseTimeout": 3,
"interval": 4, "healthyThreshold": 5,
"unhealthyThreshold": 6, "requestLine": "index.html",
"port": 200}
labels = {"foo": "bar"}
launch_config = {"imageUuid": image_uuid}
consumed_service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
consumed_service = client.wait_success(consumed_service)
launch_config = {"imageUuid": image_uuid,
"command": ['sleep', '42'],
"environment": {'TEST_FILE': "/etc/testpath.conf"},
"ports": ['8681', '8082/tcp'],
"dataVolumes": ['/foo'],
"dataVolumesFrom": [container1.id],
"capAdd": caps,
"capDrop": caps,
"dnsSearch": search,
"dns": dns,
"privileged": True,
"domainName": "rancher.io",
"memory": 8000000,
"stdinOpen": True,
"tty": True,
"entryPoint": ["/bin/sh", "-c"],
"cpuShares": 400,
"cpuSet": "2",
"workingDir": "/",
"hostname": "test",
"user": "test",
"instanceLinks": {
'container2_link':
container2.id},
"requestedHostId": host.id,
"healthCheck": health_check,
"labels": labels}
metadata = {"bar": {"foo": [{"id": 0}]}}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
metadata=metadata)
svc = client.wait_success(svc)
# validate that parameters were set for service
assert svc.state == "inactive"
assert svc.launchConfig.imageUuid == image_uuid
assert svc.launchConfig.command == ['sleep', '42']
assert len(svc.launchConfig.environment) == 1
assert len(svc.launchConfig.ports) == 2
assert len(svc.launchConfig.dataVolumes) == 1
assert svc.launchConfig.dataVolumesFrom == list([container1.id])
assert svc.launchConfig.capAdd == caps
assert svc.launchConfig.capDrop == caps
assert svc.launchConfig.dns == dns
assert svc.launchConfig.dnsSearch == search
assert svc.launchConfig.privileged is True
assert svc.launchConfig.domainName == "rancher.io"
assert svc.launchConfig.memory == 8000000
assert svc.launchConfig.stdinOpen is True
assert svc.launchConfig.tty is True
assert svc.launchConfig.entryPoint == ["/bin/sh", "-c"]
assert svc.launchConfig.cpuShares == 400
assert svc.launchConfig.workingDir == "/"
assert svc.launchConfig.hostname == "test"
assert svc.launchConfig.user == "test"
assert len(svc.launchConfig.instanceLinks) == 1
assert svc.kind == "service"
# assert service.launchConfig.registryCredentialId == reg_cred.id
assert svc.launchConfig.healthCheck.name == "check1"
assert svc.launchConfig.healthCheck.responseTimeout == 3
assert svc.launchConfig.healthCheck.interval == 4
assert svc.launchConfig.healthCheck.healthyThreshold == 5
assert svc.launchConfig.healthCheck.unhealthyThreshold == 6
assert svc.launchConfig.healthCheck.requestLine == "index.html"
assert svc.launchConfig.healthCheck.port == 200
assert svc.metadata == metadata
assert svc.launchConfig.version == '0'
assert svc.launchConfig.requestedHostId == host.id
# activate the service and validate that parameters were set for instance
service = client.wait_success(svc.activate())
assert service.state == "active"
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
container = instances[0]
assert container.imageUuid == image_uuid
assert container.command == ['sleep', '42']
assert len(container.instanceLinks()) == 1
assert len(container.environment) == 1
assert len(container.ports) == 2
assert len(container.dataVolumes) == 1
assert set(container.dataVolumesFrom) == set([container1.id])
assert container.capAdd == caps
assert container.capDrop == caps
dns.append("169.254.169.250")
assert all(item in dns for item in container.dns) is True
search.append(env.name + "." + "rancher.internal")
assert set(search).issubset(container.dnsSearch)
assert container.privileged is True
assert container.domainName == "rancher.io"
assert container.memory == 8000000
assert container.stdinOpen is True
assert container.tty is True
assert container.entryPoint == ["/bin/sh", "-c"]
assert container.cpuShares == 400
assert container.workingDir == "/"
assert container.hostname == "test"
assert container.user == "test"
assert container.state == "running"
assert container.cpuSet == "2"
assert container.requestedHostId == host.id
assert container.healthState == 'initializing'
assert container.deploymentUnitUuid is not None
assert container.version == '0'
def test_activate_services(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
env.activateservices()
service1 = client.wait_success(service1, 120)
service2 = client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
def _validate_instance_stopped(service, client, env):
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
instance = instances[0]
wait_for_condition(
client, instance, _resource_is_stopped,
lambda x: 'State is: ' + x.state)
def _validate_compose_instance_removed(client, service, env, number="1"):
def check():
return client. \
list_container(name=env.name + "-" + service.name + "-" + number)
wait_for(lambda: len(check()) == 0)
def _validate_instance_removed(client, name):
instances = client. \
list_container(name=name)
assert len(instances) == 1
instance = instances[0]
wait_for_condition(
client, instance, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_deactivate_remove_service(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(client, service, env, "1")
# deactivate service
service = client.wait_success(service.deactivate())
assert service.state == "inactive"
_validate_instance_stopped(service, client, env)
# remove service
service = client.wait_success(service.remove())
_validate_compose_instance_removed(client, service, env)
def test_env_deactivate_services(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
# activate services
env = env.activateservices()
service1 = client.wait_success(service1, 120)
service2 = client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
# deactivate service
service1 = client.wait_success(service1.deactivate())
# deactivate services
env.deactivateservices()
service1 = client.wait_success(service1)
service2 = client.wait_success(service2)
assert service1.state == "inactive"
assert service2.state == "inactive"
_validate_instance_stopped(service1, client, env)
_validate_instance_stopped(service2, client, env)
# remove services
client.wait_success(service1.remove())
client.wait_success(service2.remove())
env.deactivateservices()
def test_remove_inactive_service(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# activate service
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(client, service, env, "1")
# deactivate service
service = client.wait_success(service.deactivate())
assert service.state == "inactive"
# remove service
service = client.wait_success(service.remove())
assert service.removed is not None
_validate_compose_instance_removed(client, service, env)
def test_remove_environment(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# activate services
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(client, service, env, "1")
# deactivate services
env = env.deactivateservices()
service = client.wait_success(service)
assert service.state == "inactive"
# remove environment
env = client.wait_success(env.remove())
assert env.removed is not None
wait_for_condition(
client, service, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_link_volumes(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary']}
labels = {"io.rancher.container.start_once": "true"}
secondary_lc = {"imageUuid": image_uuid,
"name": "secondary", "labels": labels}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
service = client.wait_success(service.activate(), 120)
container1 = _validate_compose_instance_start(client, service, env, "1")
container2 = _validate_compose_instance_start(client, service, env, "1",
"secondary")
assert len(container1.dataVolumesFrom) == 1
assert set(container1.dataVolumesFrom) == set([container2.id])
container2 = client.wait_success(container2.stop())
client.wait_success(service)
assert container2.state == 'stopped'
def test_volumes_service_links_scale_one(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
sec_lc_1 = {"imageUuid": image_uuid, "name": "secondary1",
"dataVolumesFromLaunchConfigs": ["primary"]}
sec_lc_2 = {"imageUuid": image_uuid, "name": "secondary2",
"dataVolumesFromLaunchConfigs":
["primary", "secondary1"]}
service = client. \
create_service(name="primary",
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[sec_lc_1, sec_lc_2])
service = client.wait_success(service)
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. validate instances
s1_container = _validate_compose_instance_start(client, service, env, "1")
s2_container = _validate_compose_instance_start(client, service, env,
"1", "secondary1")
s3_container = _validate_compose_instance_start(client, service, env,
"1", "secondary2")
assert len(s2_container.dataVolumesFrom) == 1
assert set(s2_container.dataVolumesFrom) == set([s1_container.id])
assert len(s3_container.dataVolumesFrom) == 2
assert set(s3_container.dataVolumesFrom) == set([s1_container.id,
s2_container.id])
def test_volumes_service_links_scale_two(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ["secondary"]}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
service = client.wait_success(service.activate(), 160)
assert service.state == "active"
# 2. validate instances
s11_container = _validate_compose_instance_start(client, service, env, "1")
s12_container = _validate_compose_instance_start(client, service, env, "2")
_validate_compose_instance_start(client, service, env, "1", "secondary")
_validate_compose_instance_start(client, service, env, "2", "secondary")
assert len(s11_container.dataVolumesFrom) == 1
assert len(s12_container.dataVolumesFrom) == 1
assert set(s12_container.dataVolumesFrom) != set(
s11_container.dataVolumesFrom)
def test_remove_active_service(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# activate service
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(client, service, env, "1")
# remove service
service = client.wait_success(service.remove(), 120)
assert service.removed is not None
_validate_compose_instance_removed(client, service, env)
def _wait_until_active_map_count(service, count, client):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id, state='active')
return len(m) == count
wait_for(lambda: wait_for_condition(client, service, wait_for_map_count))
return client. \
list_serviceExposeMap(serviceId=service.id, state='active')
def test_remove_environment_w_active_svcs(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# activate services
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(client, service, env, "1")
# remove environment
env = client.wait_success(env.remove())
assert env.removed is not None
service = client.wait_success(service)
_validate_compose_instance_removed(client, service, env)
def _validate_compose_instance_start(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if \
launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
def wait_for_map_count(service):
instances = client. \
list_container(name=name,
state="running")
assert len(instances) <= 1
return len(instances) == 1
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count))
instances = client. \
list_container(name=name,
state="running")
return instances[0]
def _validate_instance_start(service, client, name):
instances = client. \
list_container(name=name)
assert len(instances) == 1
return instances[0]
def test_validate_service_scaleup_scaledown(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
wait_for(lambda: client.reload(service).healthState == 'unhealthy')
# scale up the inactive service
service = client.update(service, scale=3, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "inactive"
assert service.scale == 3
# activate services
env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
instance11 = _validate_compose_instance_start(client, service, env, "1")
instance21 = _validate_compose_instance_start(client, service, env, "2")
instance31 = _validate_compose_instance_start(client, service, env, "3")
assert instance31.createIndex > instance21.createIndex
assert instance21.createIndex > instance11.createIndex
# stop the instance2
client.wait_success(instance21.stop())
service = client.wait_success(service)
wait_for(lambda: client.reload(service).healthState == 'healthy')
# rename the instance 3
instance32 = client.update(instance31, name='newName')
# scale up the service
# instance 2 should get started
service = client.update(service, scale=4, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 4
instance12 = _validate_compose_instance_start(client, service, env, "1")
instance22 = _validate_compose_instance_start(client, service, env, "2")
instance32 = _validate_instance_start(service, client, instance32.name)
instance41 = _validate_compose_instance_start(client, service, env, "4")
assert instance41.createIndex > instance32.createIndex
assert instance32.createIndex > instance22.createIndex
assert instance22.createIndex > instance12.createIndex
# scale down the service
service = client.update(service, scale=0, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
# validate 0 service instance mappings
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map) == 0
# scale up service again, and validate
# that the new instance got unique create index
service = client.update(service, scale=4, name=service.name)
service = client.wait_success(service, 120)
instance42 = _validate_compose_instance_start(client, service, env, "4")
assert instance42.createIndex > instance41.createIndex
assert service.createIndex == instance42.createIndex
def _instance_remove(instance, client):
instance = client.wait_success(client.delete(instance))
wait_for_condition(client, instance,
lambda x: x.removed is not None)
return client.reload(instance)
def test_destroy_service_instance(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = wait_state(client, service, 'inactive')
# activate service
service.activate()
service = wait_state(client, service, 'active')
instance1 = _validate_compose_instance_start(client, service, env, "1")
instance2 = _validate_compose_instance_start(client, service, env, "2")
instance3 = _validate_compose_instance_start(client, service, env, "3")
# 1. stop and remove the instance2. Validate the mapping is gone
_instance_remove(instance2, client)
wait_for(lambda: len(client.
list_serviceExposeMap(serviceId=service.id,
instanceId=instance2.id)) == 0)
service = client.wait_success(service)
# 2. deactivate the service
service.deactivate()
service = wait_state(client, service, 'inactive')
# 3. activate the service
service.activate()
service = wait_state(client, service, 'active')
service = client.reload(service)
assert service.state == "active"
# 4. destroy instance3 and update the service's scale.
_instance_remove(instance3, client)
service = wait_state(client, service, 'active')
service = client.reload(service)
service = retry(lambda:
client.update(service, scale=4, name=service.name))
service = client.wait_success(service, 120)
_validate_service_instance_map_count(client, service, "active", 4)
# purge the instance1 w/o changing the service
# and validate instance1-service map is gone
instance1 = _instance_remove(instance1, client)
instance1 = client.wait_success(instance1.purge())
assert instance1.state == 'purged'
wait_for(lambda: len(client.
list_serviceExposeMap(serviceId=service.id,
instanceId=instance1.id)) == 0)
def test_service_rename(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service1 = client.wait_success(service1)
# activate service
service1.activate()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
_validate_compose_instance_start(client, service1, env, "1")
_validate_compose_instance_start(client, service1, env, "2")
# update name and validate that the service name got
# updated, all old instances weren't renamed,
# and the new instance got created with the new name
new_name = "newname"
service2 = client.update(service1, scale=3, name=new_name)
service2 = client.wait_success(service2)
assert service2.name == new_name
_validate_compose_instance_start(client, service1, env, "1")
_validate_compose_instance_start(client, service1, env, "2")
_validate_compose_instance_start(client, service2, env, "3")
def test_env_rename(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service_1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service_1 = client.wait_success(service_1)
service_2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1)
service_2 = client.wait_success(service_2)
# activate services
env = env.activateservices()
service_1 = client.wait_success(service_1, 120)
service_2 = client.wait_success(service_2, 120)
assert service_1.state == "active"
assert service_2.state == "active"
_validate_compose_instance_start(client, service_1, env, "1")
_validate_compose_instance_start(client, service_1, env, "2")
_validate_compose_instance_start(client, service_2, env, "1")
# update env name and validate that the
# env name got updated, but instances have old names
new_name = "newname"
env_updated = client.update(env, name=new_name)
env_updated = client.wait_success(env_updated)
assert env_updated.name == new_name
_validate_compose_instance_start(client, service_1, env, "1")
_validate_compose_instance_start(client, service_1, env, "2")
_validate_compose_instance_start(client, service_2, env, "1")
def test_validate_scale_down_restore_state(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
# activate services
env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
instance1 = _validate_compose_instance_start(client, service, env, "1")
instance2 = _validate_compose_instance_start(client, service, env, "2")
instance3 = _validate_compose_instance_start(client, service, env, "3")
# stop the instances 1, 2 and destroy instance 3
client.wait_success(instance1.stop())
client.wait_success(instance2.stop())
_instance_remove(instance3, client)
# wait for reconcile
service = wait_state(client, service, 'active')
# scale down the service and validate that:
# first instance is running
# second instance is removed
# third instance is removed
service = client.update(service, scale=1, name=service.name)
service = wait_state(client, service, 'active')
# validate that only one service instance mapping exists
instance_service_map = client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map) == 1
def test_validate_labels(client, context):
env = _create_stack(client)
# create service1 with labels defined
service_name1 = random_str()
initial_labels1 = {'affinity': "container==B", '!affinity': "container==C"}
image_uuid = context.image_uuid
launch_config1 = {"imageUuid": image_uuid, "labels": initial_labels1}
service1 = client.create_service(name=service_name1,
environmentId=env.id,
launchConfig=launch_config1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
assert service1.launchConfig.labels == initial_labels1
# create service2 w/o labels defined
service_name2 = random_str()
image_uuid = context.image_uuid
launch_config2 = {"imageUuid": image_uuid}
service2 = client.create_service(name=service_name2,
environmentId=env.id,
launchConfig=launch_config2)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
assert "labels" not in service2.launchConfig
# activate services
env.activateservices()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
service2 = client.wait_success(service2, 120)
assert service2.state == "active"
# check that labels defined in launch config + the internal label, are set
result_labels_1 = {'affinity': 'container==B', '!affinity': "container==C",
'io.rancher.stack.name': env.name,
'io.rancher.stack_service.name':
env.name + '/' + service_name1}
instance1 = _validate_compose_instance_start(client, service1, env, "1")
assert all(item in instance1.labels for item in result_labels_1) is True
# check that only one internal label is set
result_labels_2 = {'io.rancher.stack.name': env.name,
'io.rancher.stack_service.name':
env.name + '/' + service_name2}
instance2 = _validate_compose_instance_start(client, service2, env, "1")
assert all(item in instance2.labels for item in result_labels_2) is True
def test_sidekick_destroy_instance(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary']}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
# activate service1
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
_validate_service_instance_map_count(client, service, "active", 2)
instance11 = _validate_compose_instance_start(client, service, env, "1")
instance12 = _validate_compose_instance_start(client,
service,
env, "1", "secondary")
# destroy primary instance and wait for the service to reconcile
_instance_remove(instance11, client)
service = wait_state(client, service, 'active')
_validate_service_instance_map_count(client, service, "active", 2)
instance11 = _validate_compose_instance_start(client, service, env, "1")
# validate that the secondary instance is still up and running
instance12 = client.reload(instance12)
assert instance12.state == 'running'
# destroy secondary instance and wait for the service to reconcile
_instance_remove(instance12, client)
service = wait_state(client, service, 'active')
_validate_service_instance_map_count(client, service, "active", 2)
_validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "1", "secondary")
# validate that the primary instance was recreated
wait_for_condition(client, instance11, lambda x: x.removed is not None)
def test_sidekick_restart_instances(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
# activate service1
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance11 = _validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "2")
_validate_compose_instance_start(client, service, env, "1", "secondary")
instance22 = _validate_compose_instance_start(client, service,
env, "2", "secondary")
_wait_until_active_map_count(service, 4, client)
# stop instance11, destroy instance12 and call update on a service1
# scale should be restored
client.wait_success(instance11.stop())
_instance_remove(instance22, client)
service = wait_state(client, service, 'active')
service = client.update(service, scale=2, name=service.name)
service = client.wait_success(service, 120)
_validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "2")
_validate_compose_instance_start(client, service, env, "1", "secondary")
_validate_compose_instance_start(client, service, env, "2", "secondary")
_wait_until_active_map_count(service, 4, client)
def test_sidekick_scaleup(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
# activate service1, service 2 should be activated too
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
_validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "1", "secondary")
# scale up service1, verify that the service 2 was scaled up and updated
service = client.update(service, scale=2, name=service.name)
_wait_compose_instance_start(client, service, env, "1")
_wait_compose_instance_start(client, service, env, "2")
_wait_compose_instance_start(client, service, env, "1")
_wait_compose_instance_start(client, service, env, "2")
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 2
instance_service_map1 = client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map1) == 4
def _validate_service_ip_map_removed(client, service, ip):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id, ipAddress=ip)
return len(m) == 0
wait_for_condition(client, service, wait_for_map_count)
def _validate_service_ip_map(client, service, ip, state):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id, ipAddress=ip,
state=state)
return len(m) >= 1
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count))
return client. \
list_serviceExposeMap(serviceId=service.id, state=state)
def _validate_service_instance_map_count(client, service, state, count):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id, state=state)
return len(m) >= count
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count))
return client. \
list_serviceExposeMap(serviceId=service.id, state=state)
def _validate_service_hostname_map_removed(super_client, service, host_name):
def wait_for_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id)
m = [x for x in m if x.hostName == host_name]
return len(m) == 0
wait_for_condition(super_client, service, wait_for_map_count)
def _validate_service_hostname_map(client, service, host_name, state):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id,
hostname=host_name, state=state)
return len(m) >= 1
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count))
def test_external_service_w_ips(client, context):
env = _create_stack(client)
# create service1 as a regular service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
# create service 2 as external
ips = ["72.22.16.5", '192.168.0.10']
service2 = client.create_externalService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
externalIpAddresses=ips)
service2 = client.wait_success(service2)
# activate services
env.activateservices()
service1 = client.wait_success(service1)
assert service1.state == 'active'
service2 = client.wait_success(service2)
assert service2.state == 'active'
assert service2.externalIpAddresses == ips
_validate_service_ip_map(client, service2, "72.22.16.5", "active")
_validate_service_ip_map(client, service2, "192.168.0.10", "active")
# deactivate external service
service2 = client.wait_success(service2.deactivate())
assert service2.state == "inactive"
_validate_service_ip_map_removed(client, service2, "72.22.16.5")
_validate_service_ip_map_removed(client, service2, "192.168.0.10")
# activate external service again
service2 = client.wait_success(service2.activate())
assert service2.state == "active"
_validate_service_ip_map(client, service2, "72.22.16.5", "active")
_validate_service_ip_map(client, service2, "192.168.0.10", "active")
# add one extra ip address
ips = ["72.22.16.5", '192.168.0.10', '10.1.1.1']
service2 = client.update(service2, externalIpAddresses=ips)
service2 = client.wait_success(service2, 120)
assert len(service2.externalIpAddresses) == 3
_validate_service_ip_map(client, service2, "72.22.16.5", "active")
_validate_service_ip_map(client, service2, "192.168.0.10", "active")
_validate_service_ip_map(client, service2, "10.1.1.1", "active")
# remove 2 ips from the list, and add one new
ips = ["72.22.16.5", '50.255.37.17']
service2 = client.update(service2, externalIpAddresses=ips)
service2 = client.wait_success(service2, 120)
assert len(service2.externalIpAddresses) == 2
_validate_service_ip_map(client, service2, "72.22.16.5", "active")
_validate_service_ip_map(client, service2, "50.255.37.17", "active")
# remove external service
service2 = client.wait_success(service2.remove())
assert service2.removed is not None
def test_external_service_w_hostname(super_client, client, context):
env = _create_stack(client)
# create service1 as a regular service
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service1 = client.wait_success(service1)
# create service 2 as external
service2 = client.create_externalService(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
hostname="a.com")
service2 = client.wait_success(service2)
# activate services
env.activateservices()
service1 = client.wait_success(service1)
assert service1.state == 'active'
service2 = client.wait_success(service2)
assert service2.state == 'active'
assert service2.hostname == "a.com"
_validate_service_hostname_map(client, service2, "a.com", "active")
# deactivate external service
service2 = client.wait_success(service2.deactivate())
assert service2.state == "inactive"
_validate_service_hostname_map_removed(super_client, service2, "a.com")
# activate external service again
service2 = client.wait_success(service2.activate())
assert service2.state == "active"
_validate_service_hostname_map(client, service2, "a.com", "active")
# change hostname
service2 = client.update(service2, hostname="b.com")
service2 = client.wait_success(service2, 120)
assert service2.hostname == "b.com"
_validate_service_hostname_map(client, service2, "b.com", "active")
_validate_service_hostname_map_removed(super_client, service2, "a.com")
# remove external service
service2 = client.wait_success(service2.remove())
assert service2.removed is not None
def test_global_service(new_context):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
# add labels to the hosts
labels = {'group': 'web', 'subgroup': 'Foo'}
host1 = client.update(host1, labels=labels)
host2 = client.update(host2, labels=labels)
# create environment and services
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.scheduler.global': 'true',
'io.rancher.scheduler.affinity:host_label':
'group=Web,subgroup=foo'
}
}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that the instance was started on host1
instance1 = _validate_compose_instance_start(client, service, env, "1")
instance1_host = instance1.hosts()[0].id
# 3. verify that the instance was started on host2
instance2 = _validate_compose_instance_start(client, service, env, "2")
instance2_host = instance2.hosts()[0].id
assert instance1_host != instance2_host
service.deactivate()
def test_global_service_update_label(new_context):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
# add labels to the hosts
labels = {'group': 'web'}
host1 = client.update(host1, labels=labels)
# create environment and services
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.scheduler.global': 'true',
'io.rancher.scheduler.affinity:host_label': 'group=web'
}
}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that the instance was started on host1
instance1 = _validate_compose_instance_start(client, service, env, "1")
instance1_host = instance1.hosts()[0].id
assert instance1_host == host1.id
# verify 2nd instance isn't running
assert len(client.list_container(
name=env.name + "-" + service.name + "-2")) == 0
# update host2 with label group=web
host2 = client.wait_success(client.update(host2, labels=labels))
service = client.wait_success(service)
# wait for 2nd instance to start up
wait_for(
lambda: len(client.list_container(
name=env.name + "-" + service.name + "-2",
state="running")) > 0
)
instance2 = _validate_compose_instance_start(client, service, env, "2")
# confirm 2nd instance is on host2
instance2_host = instance2.hosts()[0].id
assert instance2_host == host2.id
# destroy the instance, reactivate the service and check
# both hosts got instances
_instance_remove(instance1, client)
service = wait_state(client, service.deactivate(), 'inactive')
service = wait_state(client, service.activate(), 'active')
instance1 = _validate_compose_instance_start(client, service, env, "1")
instance2 = _validate_compose_instance_start(client, service, env, "2")
instance1_host = instance1.hosts()[0].id
assert instance1_host == host1.id or instance1_host == host2.id
assert instance1.hosts()[0].id != instance2.hosts()[0].id
service.deactivate()
def test_global_add_host(new_context):
client = new_context.client
host1 = new_context.host
# create environment and services
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.scheduler.global': 'true'
}
}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
assert service.state == "inactive"
# 1. verify that the service was activated
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. verify that the instance was started on host1
instance1 = _validate_compose_instance_start(client,
service, env, "1")
instance1_host = instance1.hosts()[0].id
assert instance1_host == host1.id
# register new host
host2 = register_simulated_host(new_context)
# wait for 2nd instance to start up
wait_for(
lambda: len(client.list_container(
name=env.name + "-" + service.name + "-2",
state="running")) > 0
)
instance2 = _validate_compose_instance_start(client,
service, env, "2")
# confirm 2nd instance is on host2
instance2_host = instance2.hosts()[0].id
assert instance2_host == host2.id
service.deactivate()
def test_svc_container_reg_cred_and_image(super_client, client):
server = 'server{0}.io'.format(random_num())
registry = client.create_registry(serverAddress=server,
name=random_str())
registry = client.wait_success(registry)
reg_cred = client.create_registry_credential(
registryId=registry.id,
email='[email protected]',
publicValue='rancher',
secretValue='rancher')
registry_credential = client.wait_success(reg_cred)
name = server + '/rancher/authorized:latest'
image_uuid = 'docker:' + name
env = _create_stack(client)
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1)
service = client.wait_success(service)
service.activate()
service = client.wait_success(service, 120)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
container = instances[0]
container = super_client.wait_success(container)
assert container.registryCredentialId == registry_credential.id
image = container.image()
assert image.name == name
assert image.registryCredentialId == registry_credential.id
def test_network_from_service(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "networkMode": 'container',
"networkLaunchConfig": "secondary"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
assert len(service.secondaryLaunchConfigs) == 1
assert service.launchConfig.networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# 2. validate instances
s11_container = _validate_compose_instance_start(client, service, env, "1")
s12_container = _validate_compose_instance_start(client, service, env, "2")
s21_container = _validate_compose_instance_start(client, service,
env, "1", "secondary")
s22_container = _validate_compose_instance_start(client, service,
env, "2", "secondary")
assert s11_container.networkContainerId is not None
assert s12_container.networkContainerId is not None
assert s11_container.networkContainerId != s12_container.networkContainerId
assert s11_container.networkContainerId in [s21_container.id,
s22_container.id]
assert s11_container.networkMode == 'container'
assert s12_container.networkMode == 'container'
assert s21_container.networkMode == 'managed'
assert s22_container.networkMode == 'managed'
def _wait_compose_instance_start(client, service, env, number):
def wait_instance_state(service):
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + number,
state="running")
return len(instances) >= 1
wait_for_condition(client, service, wait_instance_state)
def test_service_affinity_rules(super_client, new_context):
register_simulated_host(new_context)
register_simulated_host(new_context)
client = new_context.client
env = _create_stack(client)
image_uuid = new_context.image_uuid
name = random_str()
service_name = "service" + name
# test anti-affinity
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=" +
env.name + '/' + service_name
}
}
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# check that all containers are on different hosts
instances = _get_instance_for_service(super_client, service.id)
assert len(instances) == 3
assert instances[0].hosts()[0].id != instances[1].hosts()[0].id
assert instances[1].hosts()[0].id != instances[2].hosts()[0].id
assert instances[2].hosts()[0].id != instances[0].hosts()[0].id
# test soft-affinity
service_name2 = "service2" + name
launch_config2 = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_soft":
"io.rancher.stack_service.name=" +
"${stack_name}/${service_name}"
}
}
service2 = client.create_service(name=service_name2,
environmentId=env.id,
launchConfig=launch_config2,
scale=3)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
service2 = service2.activate()
service2 = client.wait_success(service2, 120)
assert service2.state == "active"
# check that all containers are on the same host
instances = _get_instance_for_service(super_client, service2.id)
assert len(instances) == 3
assert instances[0].hosts()[0].id == instances[1].hosts()[0].id
assert instances[1].hosts()[0].id == instances[2].hosts()[0].id
assert instances[2].hosts()[0].id == instances[0].hosts()[0].id
def test_affinity_auto_prepend_stack(super_client, new_context):
register_simulated_host(new_context)
register_simulated_host(new_context)
client = new_context.client
env = _create_stack(client)
image_uuid = new_context.image_uuid
name = random_str()
service_name = "service" + name
# test anti-affinity
# only service_name is supplied.
# env/project/stack should be automatically prepended
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=" +
service_name
}
}
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# check that all containers are on different hosts
instances = _get_instance_for_service(super_client, service.id)
assert len(instances) == 3
assert instances[0].hosts()[0].id != instances[1].hosts()[0].id
assert instances[1].hosts()[0].id != instances[2].hosts()[0].id
assert instances[2].hosts()[0].id != instances[0].hosts()[0].id
def test_affinity_auto_prepend_stack_other_service(super_client, new_context):
register_simulated_host(new_context)
client = new_context.client
env = _create_stack(client)
image_uuid = new_context.image_uuid
service_name1 = "service" + random_str()
service_name2 = "service" + random_str()
service1 = client.create_service(name=service_name1,
environmentId=env.id,
launchConfig={
"imageUuid": image_uuid,
},
scale=1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = client.wait_success(service1.activate(), 120)
assert service1.state == "active"
# test anti-affinity
# only service_name is supplied.
# env/project/stack should be automatically prepended
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=" + service_name1
}
}
service2 = client.create_service(name=service_name2,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
service2 = client.wait_success(service2.activate(), 120)
assert service2.state == "active"
# check that all containers are on different hosts
svc1_instance = _get_instance_for_service(super_client, service1.id)[0]
svc2_instances = _get_instance_for_service(super_client, service2.id)
assert len(svc2_instances) == 2
assert svc2_instances[0].hosts()[0].id != svc1_instance.hosts()[0].id
assert svc2_instances[1].hosts()[0].id != svc1_instance.hosts()[0].id
def test_affinity_auto_prepend_stack_same_service(super_client, new_context):
image_uuid = new_context.image_uuid
client = new_context.client
# create 2 containers on the default simulator host, we call it one
# in order to make default scheduler consider the second host
# for both containers of the same service. Build a trap.
cs = client.create_container(imageUuid=image_uuid,
count=2)
assert len(cs) == 2
for c in cs:
c = super_client.wait_success(c)
assert c.state == 'running'
# create a second host, we call it two
register_simulated_host(new_context)
env = _create_stack(client)
service_name = "service"
# upper case for the label service name, test logic in cattle
# prepending the stack name should be case insensitive
label_service_name = "SERVICE"
# test anti-affinity
# only service_name is supplied.
# env/project/stack should be automatically prepended
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=" + label_service_name
}
}
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
# check that all containers are on different hosts
instances = _get_instance_for_service(super_client, service.id)
assert len(instances) == 2
assert instances[0].hosts()[0].id != instances[1].hosts()[0].id
def test_anti_affinity_sidekick(new_context):
register_simulated_host(new_context)
client = new_context.client
env = _create_stack(client)
image_uuid = new_context.image_uuid
name = random_str()
service_name = "service" + name
# only service name is provided.
# stack name should be prepended and secondaryLaunchConfig
# should automatically be appended for the sidekick
# containers
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.sidekicks": "secondary",
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=" +
service_name
}
}
secondary_lc = {
"imageUuid": image_uuid,
"name": "secondary"
}
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
# activate service1
service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
_validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "2")
_validate_compose_instance_start(client, service, env, "1", "secondary")
_validate_compose_instance_start(client, service, env, "2", "secondary")
instance_service_map1 = client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map1) == 4
def test_host_delete_reconcile_service(super_client, new_context):
register_simulated_host(new_context)
client = new_context.client
env = _create_stack(client)
image_uuid = new_context.image_uuid
name = random_str()
service_name = "service" + name
stack_svc_name = env.name + "/" + service_name
launch_config = {
"imageUuid": image_uuid,
"labels": {
"io.rancher.scheduler.affinity:container_label_soft_ne":
"io.rancher.stack_service.name=" + stack_svc_name
}
}
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
service = client.wait_success(service)
assert service.state == "inactive"
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance1 = _validate_compose_instance_start(client,
service, env, "1")
instance2 = _validate_compose_instance_start(client,
service, env, "2")
instance1_host = instance1.hosts()[0]
instance2_host = instance2.hosts()[0]
assert instance1_host.id != instance2_host.id
# remove host2
instance2_host = super_client.wait_success(instance2_host.deactivate())
instance2_host = super_client.delete(instance2_host)
super_client.wait_success(instance2_host)
# check that service is reconciled and instance2 gets recreated
# on host1.
wait_for(
lambda: len(client.list_container(
name=env.name + "-" + service.name + "-2",
state="running")) > 0
)
instance2 = client.list_container(
name=env.name + "-" + service.name + "-2",
state="running")[0]
instance2_host = instance2.hosts()[0]
assert instance1_host.id == instance2_host.id
service = client.wait_success(service.deactivate(), 120)
def test_export_config(client, context):
env = _create_stack(client)
# test:
# cpuCet
# global vs scale
image_uuid = context.image_uuid
labels = {'io.rancher.scheduler.global': 'true',
'io.rancher.service.hash': '088b54be-2b79-99e30b3a1a24'}
metadata = {"io.rancher.service.hash": "088b54be-2b79-99e30b3a1a24",
"$bar": {"metadata": [{"$id$$foo$bar$$": "${HOSTNAME}"}]}}
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
launch_config = {"imageUuid": image_uuid,
"cpuSet": "0,1", "labels": labels,
"restartPolicy": restart_policy,
"pidMode": "host",
"memory": 1048576,
"memorySwap": 2097152,
"memoryReservation": 4194304,
"milliCpuReservation": 1000,
"devices": ["/dev/sdc:/dev/xsdc:rwm"],
"logConfig": {"config": {"labels": "foo"},
"driver": "json-file"},
"blkioWeight": 100,
"cpuPeriod": 10000,
"cpuQuota": 20000,
"memorySwappiness": 50,
"oomScoreAdj": 500,
"shmSize": 67108864,
"uts": "host",
"ipcMode": "host",
"stopSignal": "SIGTERM",
"groupAdd": "root",
"cgroupParent": "parent",
"extraHosts": ["host1", "host2"],
"securityOpt": ["sopt1", 'sopt2'],
"readOnly": True,
"oomKillDisable": True,
"isolation": "hyper-v",
"dnsOpt": ["opt"],
"dnsSearch": ["192.168.1.1"],
"cpuShares": 100,
"blkioDeviceOptions": {
'/dev/sda': {
'readIops': 1000,
'writeIops': 2000,
},
'/dev/null': {
'readBps': 3000,
'writeBps': 3000,
'weight': 3000,
}
},
"tmpfs": {"/run": "rw"},
"ulimits": [{"name": "cpu", "soft": 1234, "hard": 1234},
{"name": "nporc", "soft": 1234}]
}
service = client. \
create_service(name="web",
environmentId=env.id,
launchConfig=launch_config,
metadata=metadata,
retainIp=True)
service = client.wait_success(service)
compose_config = env.exportconfig()
labels = {'io.rancher.scheduler.global': 'true'}
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
svc = docker_yml['services'][service.name]
assert svc['cpuset'] == "0,1"
assert svc['labels'] == labels
assert "restart" not in svc
assert svc["logging"] is not None
assert svc["logging"]["driver"] == "json-file"
assert svc["logging"]["options"] is not None
assert svc["pid"] == "host"
assert svc["mem_limit"] == 1048576
assert svc["memswap_limit"] == 2097152
assert svc["mem_reservation"] == 4194304
assert svc["devices"] is not None
assert svc["blkio_weight"] == 100
assert svc["cpu_period"] == 10000
assert svc["cpu_quota"] == 20000
assert svc["mem_swappiness"] == 50
assert svc["oom_score_adj"] == 500
assert svc["shm_size"] == 67108864
assert svc["uts"] == "host"
assert svc["ipc"] == "host"
assert svc["stop_signal"] == "SIGTERM"
assert svc["cgroup_parent"] == "parent"
assert svc["extra_hosts"] == ["host1", "host2"]
assert svc["security_opt"] == ["sopt1", "sopt2"]
assert svc["read_only"]
assert svc["oom_kill_disable"]
assert svc["isolation"] == "hyper-v"
assert svc["dns_opt"] == ["opt"]
assert svc["dns_search"] == ["192.168.1.1"]
assert svc["cpu_shares"] == 100
assert svc["device_read_iops"] == {"/dev/sda": 1000}
assert svc["device_write_iops"] == {"/dev/sda": 2000}
assert svc["device_read_bps"] == {"/dev/null": 3000}
assert svc["device_write_bps"] == {"/dev/null": 3000}
assert svc["blkio_weight_device"] == {"/dev/null": 3000}
assert svc["tmpfs"] == ["/run:rw"]
assert svc["ulimits"] == {"cpu": {"hard": 1234, "soft": 1234},
"nporc": 1234}
rancher_yml = yaml.load(compose_config.rancherComposeConfig)
svc = rancher_yml['services'][service.name]
assert 'scale' not in svc
updated = {"$$id$$$$foo$$bar$$$$": "$${HOSTNAME}"}
metadata = {"$$bar": {"metadata": [updated]}}
assert svc['metadata'] is not None
assert svc['metadata'] == metadata
assert svc['retain_ip'] is True
assert svc["milli_cpu_reservation"] == 1000
launch_config_without_log = {"imageUuid": image_uuid,
"cpuSet": "0,1", "labels": labels,
"restartPolicy": restart_policy}
service_nolog = client. \
create_service(name="web-nolog",
environmentId=env.id,
launchConfig=launch_config_without_log,
metadata=metadata,
retainIp=True)
service_nolog = client.wait_success(service_nolog)
compose_config = env.exportconfig()
labels = {'io.rancher.scheduler.global': 'true'}
assert compose_config is not None
docker_yml = yaml.load(compose_config.dockerComposeConfig)
svc = docker_yml['services'][service_nolog.name]
assert "logging" not in svc
def test_validate_create_only_containers(client, context):
env = _create_stack(client)
labels = {"io.rancher.container.start_once": "true"}
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "labels": labels}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
# activate services
env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
instance1 = _wait_for_compose_instance_start(client, service, env, "1")
_wait_for_compose_instance_start(client, service, env, "2")
instance3 = _wait_for_compose_instance_start(client, service, env, "3")
# stop instance1 and destroy instance 3
client.wait_success(instance1.stop())
_instance_remove(instance3, client)
# wait for reconcile
_wait_until_active_map_count(service, 3, client)
service = client.wait_success(service)
assert service.state == "active"
# validate that instance1 remains in stopped state,
# and instance 3 was recreated
instance1 = client.reload(instance1)
assert instance1.state == 'stopped'
_wait_for_compose_instance_start(client, service, env, "3")
# check that the service never went to a updating state, and remains active
updated = True
try:
wait_for_condition(
client, service, service.state == 'updating-active',
lambda x: 'State is: ' + x.state, 5)
except:
updated = False
assert updated is False
# destroy instance from stopped state, and validate it was recreated
_instance_remove(instance1, client)
_wait_until_active_map_count(service, 3, client)
service = client.wait_success(service)
assert service.state == "active"
_wait_for_compose_instance_start(client, service, env, "1")
def test_indirect_ref_sidekick_destroy_instance(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary']}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"dataVolumesFromLaunchConfigs": ['secondary1']}
secondary_lc1 = {"imageUuid": image_uuid, "name": "secondary1"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc,
secondary_lc1])
service = client.wait_success(service)
# activate service
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
instance11 = _validate_compose_instance_start(client, service, env, "1")
instance12 = _validate_compose_instance_start(client,
service,
env, "1", "secondary")
instance13 = _validate_compose_instance_start(client,
service,
env, "1", "secondary1")
_wait_until_active_map_count(service, 3, client)
# destroy secondary1 instance and wait for the service to reconcile
_instance_remove(instance13, client)
service = client.wait_success(service)
_validate_compose_instance_start(client, service, env, "1")
_validate_compose_instance_start(client, service, env, "1", "secondary")
_validate_compose_instance_start(client, service, env, "1", "secondary1")
_wait_until_active_map_count(service, 3, client)
# validate that the primary and secondary instances got recreated
wait_for_condition(client, instance11, lambda x: x.removed is not None)
wait_for_condition(client, instance12, lambda x: x.removed is not None)
def test_validate_hostname_override(client, context):
# create environment and services
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config1 = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.container.hostname_override': 'container_name'
}
}
service1 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = client.wait_success(service1.activate())
assert service1.state == "active"
instance1 = _validate_compose_instance_start(client, service1, env, "1")
# validate the host was overriden with instancename
assert instance1.hostname == instance1.name
# use case 2 - validate that even passed hostname gets overriden
launch_config2 = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.container.hostname_override': 'container_name',
"hostname": "test"
}
}
service2 = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config2)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
service2 = client.wait_success(service2.activate())
assert service2.state == "active"
instance2 = _validate_compose_instance_start(client, service2, env, "1")
# validate the host was overriden with instancename
assert instance2.hostname == instance2.name
def test_validate_long_hostname_override(client, context):
# create environment and services
env = _create_stack_long_name(client, "MyLongerStackNameCausingIssue")
image_uuid = context.image_uuid
launch_config1 = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.container.hostname_override': 'container_name'
}
}
first_service_name = "MyServiceNameLongerThanDNSPrefixLengthAllowed"
service1 = client.create_service(name=first_service_name,
environmentId=env.id,
launchConfig=launch_config1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = client.wait_success(service1.activate())
assert service1.state == "active"
instance1 = _validate_compose_instance_start(client, service1, env, "1")
# validate the host was overriden with truncated
# instancename - length should be 64
trunc_name = "MyLongerStackNameCausingIssue-" \
"MyServiceNameLongerThanDNSPrefix-1"
assert instance1.hostname == trunc_name
# use case 2 - validate that even passed hostname
# gets overriden by the truncated instancename
launch_config2 = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.container.hostname_override': 'container_name',
"hostname": "test"
}
}
second_service_name = "SecondServiceNameLongerThanDNSPrefixLengthAllowed"
service2 = client.create_service(name=second_service_name,
environmentId=env.id,
launchConfig=launch_config2)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
service2 = client.wait_success(service2.activate())
assert service2.state == "active"
instance2 = _validate_compose_instance_start(client, service2, env, "1")
# validate the host was overriden with instancename
trunc_name2 = "MyLongerStackNameCausingIssue-" \
"SecondServiceNameLongerThanDNSPr-1"
assert instance2.hostname == trunc_name2
def test_validate_long_hostname_with_domainname_override(client, context):
# create environment and services
env = _create_stack_long_name(client, "MySecondStackNameCausingIssue")
image_uuid = context.image_uuid
launch_config1 = {
"imageUuid": image_uuid,
"domainName": "rancher.io",
"labels": {
'io.rancher.container.hostname_override': 'container_name'
}
}
first_service_name = "MyServiceNameLongerThanDNSPrefixLength" \
"AllowedMyServiceNameLonge"
service1 = client.create_service(name=first_service_name,
environmentId=env.id,
launchConfig=launch_config1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = client.wait_success(service1.activate())
assert service1.state == "active"
instance1 = _validate_compose_instance_start(client, service1, env, "1")
# validate the host was overriden with truncated
# instancename - length should be 64
trunc_name = "MySecondStackNameCausingIssue-" \
"MyServiceNameLongerTh-1"
assert instance1.hostname == trunc_name
# use case 2 - validate that even passed hostname
# gets overriden by the truncated instancename
launch_config2 = {
"imageUuid": image_uuid,
"domainName": "rancher.io",
"labels": {
'io.rancher.container.hostname_override': 'container_name',
"hostname": "test"
}
}
second_service_name = "SecondServiceNameLongerThanDNSPrefixLengthAllowed"
service2 = client.create_service(name=second_service_name,
environmentId=env.id,
launchConfig=launch_config2)
service2 = client.wait_success(service2)
assert service2.state == "inactive"
service2 = client.wait_success(service2.activate())
assert service2.state == "active"
instance2 = _validate_compose_instance_start(client, service2, env, "1")
# validate the host was overriden with instancename
trunc_name2 = "MySecondStackNameCausingIssue-" \
"SecondServiceNameLong-1"
assert instance2.hostname == trunc_name2
def test_vip_service(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
init_labels = {'io.rancher.network.services': "vipService"}
launch_config = {"imageUuid": image_uuid, "labels": init_labels}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
assignServiceIpAddress=True)
service = client.wait_success(service)
assert service.state == "inactive"
assert service.vip is not None
assert IPAddress(service.vip) in IPNetwork("169.254.64.0/18")
def test_vip_requested_ip(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
vip = "169.254.65.30"
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
vip=vip)
service = client.wait_success(service)
assert service.state == "inactive"
assert service.vip is not None
assert service.vip == vip
def test_validate_scaledown_updating(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
# activate service
env.activateservices()
service = client.wait_success(service)
assert service.state == "active"
# change scale two times in a row
service = client.update(service, scale=10, name=service.name)
def wait():
s = client.reload(service)
return s.scale == 10
wait_for(wait)
service = client.update(service, scale=1, name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
wait_for(lambda: client.reload(service).scale == 1)
_wait_until_active_map_count(service, 1, client)
def test_stop_network_from_container(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "networkMode": 'container',
"networkLaunchConfig": "secondary"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
assert len(service.secondaryLaunchConfigs) == 1
assert service.launchConfig.networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
s11_container = _validate_compose_instance_start(client, service, env, "1")
s21_container = _validate_compose_instance_start(client, service,
env, "1", "secondary")
s11_container = super_client.reload(s11_container)
init_start_count = s11_container.startCount
assert init_start_count is not None
assert s11_container.networkContainerId is not None
assert s11_container.networkContainerId == s21_container.id
# stop s21 container, wait till it's started
# and validate s11 was restarted as well
s21_container = s21_container.stop()
client.wait_success(s21_container)
wait_for(lambda: client.reload(s21_container).state == 'running')
wait_for(
lambda:
super_client.reload(s11_container).startCount > init_start_count
)
# restart s21 container, and validate s11 was restarted as well
init_start_count = super_client.reload(s11_container).startCount
s21_container = client.reload(s21_container).restart()
client.wait_success(s21_container)
wait_for(
lambda:
super_client.reload(s11_container).startCount > init_start_count
)
init_start_count = super_client.reload(s11_container).startCount
def test_remove_network_from_container(client, context, super_client):
env = _create_stack(client)
svc_name = random_str()
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "networkMode": 'container'}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"networkLaunchConfig": svc_name}
service = client.create_service(name=svc_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
assert len(service.secondaryLaunchConfigs) == 1
assert service.launchConfig.networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
service = client.wait_success(service.activate(), 120)
assert service.state == "active"
s11_container = _validate_compose_instance_start(client, service, env, "1")
s21_container = _validate_compose_instance_start(client, service,
env, "1", "secondary")
s11_container = super_client.reload(s11_container)
init_start_count = s11_container.startCount
assert init_start_count is not None
assert s21_container.networkContainerId is not None
assert s21_container.networkContainerId == s11_container.id
# remove s11 container, and validate s21 was removed as well
_instance_remove(s11_container, client)
wait_for_condition(
client, s21_container, _resource_is_removed,
lambda x: 'State is: ' + x.state)
service = client.wait_success(service)
_wait_until_active_map_count(service, 2, client)
def test_metadata(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
metadata = {"bar": {"people": [{"id": 0}]}}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
metadata=metadata)
service = client.wait_success(service)
assert service.metadata == metadata
# Wait until unhealthy to avoid a race condition
def wait_unhealthy():
svc = client.reload(service)
if svc.healthState == 'unhealthy':
return svc
else:
return None
service = wait_for(wait_unhealthy)
metadata = {"bar1": {"foo1": [{"id": 0}]}}
service = client.update(service, metadata=metadata)
assert service.metadata == metadata
def test_env_external_id(client):
env = client.create_environment(name='env-' + random_str(),
externalId='something')
assert env.externalId == 'something'
def test_sidekick_labels_merge(new_context):
client = new_context.client
host1 = register_simulated_host(new_context)
labels = {'group': 'web', 'subgroup': 'Foo'}
client.update(host1, labels=labels)
env = _create_stack(client)
image_uuid = new_context.image_uuid
labels = {'foo': "bar"}
affinity_labels = {'io.rancher.scheduler.affinity:host_label':
'group=Web,subgroup=foo'}
labels.update(affinity_labels)
launch_config = {"imageUuid": image_uuid, "labels": labels}
secondary_labels = {'bar': "foo"}
secondary_labels.update(affinity_labels)
secondary_lc = {"imageUuid": image_uuid,
"name": "secondary", "labels": secondary_labels}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
service = client.wait_success(service)
service = wait_state(client, service.activate(), 'active')
primary = _validate_compose_instance_start(client, service, env, "1")
secondary = _validate_compose_instance_start(client, service, env, "1",
"secondary")
assert all(item in primary.labels for item in labels) is True
assert all(item in secondary.labels for item in secondary_labels) is True
assert all(item in primary.labels for item in secondary_labels) is False
assert all(item in secondary.labels for item in labels) is False
assert all(item in primary.labels for item in affinity_labels) is True
assert all(item in secondary.labels for item in affinity_labels) is True
def test_service_restart(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
svc = client.wait_success(svc.activate(), 120)
assert svc.state == "active"
# get initial start count for all the instances
instances = []
for exposeMap in svc.serviceExposeMaps():
instances.append(client.reload(exposeMap.instance()))
# restart service
svc = client. \
wait_success(svc.restart(rollingRestartStrategy={}), 120)
assert svc.state == 'active'
for instance in instances:
old = instance.startCount
new = client.reload(instance).startCount
assert new > old
wait_for(lambda: client.reload(svc).healthState == 'healthy')
wait_for(lambda: client.reload(env).healthState == 'healthy')
def _validate_endpoint(endpoints, public_port, host, service=None,
bind_addr=None):
if bind_addr:
host_ip = bind_addr
else:
host_ip = host.ipAddresses()[0].address
found = False
for endpoint in endpoints:
if host_ip == endpoint.ipAddress:
if endpoint.port == public_port \
and endpoint.hostId == host.id \
and endpoint.instanceId is not None:
if service is not None:
if endpoint.serviceId == service.id:
found = True
else:
found = True
break
assert found is True, "Cant find endpoint for " \
+ host_ip + ":" + str(public_port)
def test_random_ports(new_context):
client = new_context.client
new_context.host
register_simulated_host(new_context)
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid, "ports": ['6666', '7775']}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=2)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate())
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
c2 = _wait_for_compose_instance_start(client, svc, env, "1")
port11 = c1.ports_link()[0]
port12 = c1.ports_link()[1]
port21 = c2.ports_link()[0]
port22 = c2.ports_link()[1]
assert port11.publicPort is not None
assert port12.publicPort is not None
assert port21.publicPort is not None
assert port22.publicPort is not None
assert port11.publicPort != port12.publicPort
assert port11.publicPort == port21.publicPort
assert 49153 <= port11.publicPort <= 65535
assert 49153 <= port12.publicPort <= 65535
assert 49153 <= port21.publicPort <= 65535
assert 49153 <= port22.publicPort <= 65535
def test_random_ports_sidekicks(new_context):
client = new_context.client
new_context.host
register_simulated_host(new_context)
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid, "ports": ['6666', '7775']}
secondary_lc = {"imageUuid": image_uuid,
"name": "secondary", "ports": ['6666']}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate())
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
c2 = _validate_compose_instance_start(client, svc,
env, "1", "secondary")
port1 = c1.ports_link()[0]
port2 = c2.ports_link()[0]
assert 49153 <= port1.publicPort <= 65535
assert 49153 <= port2.publicPort <= 65535
def test_random_ports_static_port(new_context):
client = new_context.client
new_context.host
register_simulated_host(new_context)
env = _create_stack(client)
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid, "ports": ['6666:7775']}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate())
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
port11 = c1.ports_link()[0]
assert port11.publicPort == 6666
assert port11.privatePort == 7775
def test_project_random_port_update_create(new_context):
client = new_context.client
user_client = new_context.user_client
new_context.host
register_simulated_host(new_context)
env = _create_stack(client)
image_uuid = new_context.image_uuid
ports = ['6666', '7775', '776']
launch_config = {"imageUuid": image_uuid, "ports": ports}
# update the port
new_range = {"startPort": 65533, "endPort": 65535}
p = user_client.update(new_context.project,
servicesPortRange=new_range)
p = user_client.wait_success(p)
assert p.servicesPortRange.startPort == new_range['startPort']
assert p.servicesPortRange.endPort == new_range['endPort']
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
svc = client.wait_success(svc.activate())
c = _wait_for_compose_instance_start(client, svc, env, "1")
port = c.ports_link()[0]
assert port.publicPort is not None
assert 65533 <= port.publicPort <= 65535
# try to create service with more ports
# requested than random range can provide - should not be allowed
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
def condition(x):
return 'Not enough environment ports' in x.transitioningMessage
wait_for_condition(client, svc, condition)
assert svc.state == 'registering'
client.wait_success(client.delete(svc))
# create the port
new_range = {"startPort": 65533, "endPort": 65535}
project = user_client.create_project(servicesPortRange=new_range)
project = user_client.wait_success(project)
assert project.servicesPortRange.startPort == new_range['startPort']
assert project.servicesPortRange.endPort == new_range['endPort']
def test_update_port_endpoint(new_context):
client = new_context.client
host1 = new_context.host
env = _create_stack(client)
hosts = [host1]
port1 = 5557
port2 = 5558
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid, "ports": [str(port1) + ':6666']}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate(), 120)
assert svc.state == "active"
wait_for(lambda: client.reload(svc).publicEndpoints is not None and len(
client.reload(svc).publicEndpoints) == 1)
endpoints = client.reload(svc).publicEndpoints
for host in hosts:
_validate_endpoint(endpoints, port1, host, svc)
wait_for(lambda: client.reload(host1).publicEndpoints is not None and len(
client.reload(host1).publicEndpoints) == 1)
endpoints = client.reload(host1).publicEndpoints
_validate_endpoint(endpoints, port1, hosts[0], svc)
# update port
c = _wait_for_compose_instance_start(client, svc, env, "1")
port = c.ports_link()[0]
assert port.publicPort == port1
port = client.update(port, publicPort=port2)
assert port.state == 'updating-active'
assert port.publicPort == port2
port = client.wait_success(port)
assert port.state == 'active'
# validate endpoints
wait_for(lambda: client.reload(svc).publicEndpoints is not None and len(
client.reload(svc).publicEndpoints) == 1)
endpoints = client.reload(svc).publicEndpoints
wait_for(lambda: client.reload(svc).publicEndpoints[0].port == port2)
wait_for(lambda: client.reload(host).publicEndpoints[0].port == port2)
endpoints = client.reload(svc).publicEndpoints
for host in hosts:
_validate_endpoint(endpoints, port2, host, svc)
wait_for(lambda: client.reload(host1).publicEndpoints is not None and len(
client.reload(host1).publicEndpoints) == 1)
endpoints = client.reload(host1).publicEndpoints
_validate_endpoint(endpoints, port2, hosts[0], svc)
def test_ip_retain(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1,
retainIp=True)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
c1 = super_client.reload(c1)
ip1 = c1.primaryIpAddress
# remove instance and
# check that c1 and c2 got the same ip
_instance_remove(c1, client)
_wait_until_active_map_count(svc, 1, client)
svc = client.wait_success(svc)
assert svc.state == "active"
c2 = _wait_for_compose_instance_start(client, svc, env, "1")
c2 = super_client.reload(c2)
ip2 = c2.primaryIpAddress
assert c1.id != c2.id
assert ip1 == ip2
# upgrade the service and
# check that c3 and c2 got the same ip
strategy = {"launchConfig": launch_config,
"intervalMillis": 100}
svc.upgrade_action(inServiceStrategy=strategy)
client.wait_success(svc)
c3 = _wait_for_compose_instance_start(client, svc, env, "1")
ip3 = c3.primaryIpAddress
assert c2.id != c3.id
assert ip2 == ip3
def test_ip_retain_requested_ip(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
req_ip = '10.42.77.88'
launch_config = {"imageUuid": image_uuid, "requestedIpAddress": req_ip}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1,
retainIp=True)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
c1 = super_client.reload(c1)
ip1 = c1.primaryIpAddress
assert ip1 == req_ip
# remove instance and
# check that c1 and c2 got the same ip
_instance_remove(c1, client)
svc = wait_state(client, svc, 'active')
_wait_until_active_map_count(svc, 1, client)
svc = client.wait_success(svc)
assert svc.state == "active"
c2 = _wait_for_compose_instance_start(client, svc, env, "1")
c2 = super_client.reload(c2)
ip2 = c2.primaryIpAddress
assert c1.id != c2.id
assert ip1 == ip2
def _get_instance_for_service(super_client, serviceId):
instances = []
instance_service_maps = super_client. \
list_serviceExposeMap(serviceId=serviceId)
for mapping in instance_service_maps:
instances.append(mapping.instance())
return instances
def _resource_is_stopped(resource):
return resource.state == 'stopped'
def _resource_is_running(resource):
return resource.state == 'running'
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.removed is not None
def _wait_for_compose_instance_start(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if \
launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
wait_for(
lambda: len(client.list_container(name=name, state='running')) > 0
)
return client.list_container(name=name, state='running')[0]
def test_host_dns(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid, "networkMode": "host"}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
# activate the service and validate that parameters were set for instance
service = client.wait_success(svc.activate())
assert service.state == "active"
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
c = instances[0]
assert c.dns is None or len(c.dns) == 0
def test_dns_label(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
labels = {'io.rancher.container.dns': "false"}
launch_config = {"imageUuid": image_uuid,
"labels": labels}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
service = client.wait_success(svc.activate())
assert service.state == "active"
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
c = instances[0]
assert c.dns is None or len(c.dns) == 0
def test_dns_label_true(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
labels = {'io.rancher.container.dns': "true"}
launch_config = {"imageUuid": image_uuid,
"labels": labels}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
service = client.wait_success(svc.activate())
assert service.state == "active"
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
c = instances[0]
assert c.dns is not None and len(c.dns) > 0
def test_dns_label_and_dns_param(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
labels = {'io.rancher.container.dns': "false"}
launch_config = {"imageUuid": image_uuid,
"labels": labels,
"dns": ["1.1.1.1"],
"dnsSearch": ["foo"]}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
service = client.wait_success(svc.activate())
assert service.state == "active"
instance_service_map = client \
.list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = client. \
list_container(name=env.name + "-" + service.name + "-" + "1")
assert len(instances) == 1
c = instances[0]
assert c.dns == ["1.1.1.1"]
assert c.dnsSearch == ["foo"]
def test_standalone_container_endpoint(new_context):
client = new_context.client
host = new_context.host
client.wait_success(host)
env = _create_stack(client)
port0 = 5534
port1 = 5535
port2 = 6636
port3 = 6637
image_uuid = new_context.image_uuid
launch_config = {"imageUuid": image_uuid,
"ports": ['127.2.2.2:%s:%s' % (port0, '6666'),
'%s:%s' % (port1, '6666')]}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate(), 120)
assert svc.state == "active"
c = client.create_container(imageUuid=image_uuid,
startOnCreate=True,
ports=['%s:%s' % (port2, '6666'),
'127.0.0.1:%s:%s' % (port3, '6666')])
c = client.wait_success(c)
wait_for(
lambda: client.reload(host).publicEndpoints is not None and len(
client.reload(host).publicEndpoints) == 4)
endpoints = client.reload(host).publicEndpoints
svce_bind_ip = None
svce_no_ip = None
ce_no_ip = None
ce_bind_ip = None
for endpoint in endpoints:
if endpoint.port == port0:
svce_bind_ip = endpoint
if endpoint.port == port1:
svce_no_ip = endpoint
elif endpoint.port == port2:
ce_no_ip = endpoint
elif endpoint.port == port3:
ce_bind_ip = endpoint
assert svce_no_ip is not None
assert ce_no_ip is not None
_validate_endpoint([svce_bind_ip], port0, host, svc, bind_addr='127.2.2.2')
_validate_endpoint([svce_no_ip], port1, host, svc)
_validate_endpoint([ce_no_ip], port2, host)
_validate_endpoint([ce_bind_ip], port3, host, bind_addr='127.0.0.1')
c = client.wait_success(c.stop())
client.wait_success(c.remove())
wait_for(
lambda: client.reload(host).publicEndpoints is not None and len(
client.reload(host).publicEndpoints) == 2)
endpoints = client.reload(host).publicEndpoints
svce_bind_ip = None
svce_no_ip = None
ce_no_ip = None
ce_bind_ip = None
for endpoint in endpoints:
if endpoint.port == port0:
svce_bind_ip = endpoint
if endpoint.port == port1:
svce_no_ip = endpoint
elif endpoint.port == port2:
ce_no_ip = endpoint
elif endpoint.port == port3:
ce_bind_ip = endpoint
assert svce_bind_ip is not None
assert svce_no_ip is not None
assert ce_no_ip is None
assert ce_bind_ip is None
def test_service_start_on_create(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
environmentId=env.id,
startOnCreate=True,
launchConfig=launch_config)
assert svc.startOnCreate
svc = client.wait_success(svc)
assert svc.state == 'active'
def test_validate_scaledown_order(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary']}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary"}
service = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc],
scale=3)
service = client.wait_success(service)
assert service.state == "inactive"
# activate service
env.activateservices()
service = client.wait_success(service)
assert service.state == "active"
instance11 = _validate_compose_instance_start(client, service, env, "1")
# scale down, and validate the first instance is intact
service = client.update(service, scale=1)
service = client.wait_success(service, 120)
assert service.state == "active"
instance11 = client.reload(instance11)
assert instance11.state == 'running'
def test_retain_ip_update(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
environmentId=env.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
c1 = _wait_for_compose_instance_start(client, svc, env, "1")
c1 = super_client.reload(c1)
ip1 = c1.primaryIpAddress
# change retain ip to true
svc = client.update(svc, retainIp=True)
svc = client.wait_success(svc)
assert svc.retainIp is True
# remove instance and
# check that c1 and c2 got the same ip
_instance_remove(c1, client)
_wait_until_active_map_count(svc, 1, client)
svc = client.wait_success(svc)
assert svc.state == "active"
c2 = _wait_for_compose_instance_start(client, svc, env, "1")
c2 = super_client.reload(c2)
ip2 = c2.primaryIpAddress
assert c1.id != c2.id
assert ip1 == ip2
| {
"content_hash": "7769f78fcdfbe27f5033181444c25d06",
"timestamp": "",
"source": "github",
"line_count": 3108,
"max_line_length": 79,
"avg_line_length": 37.648648648648646,
"alnum_prop": 0.609134105903668,
"repo_name": "Cerfoglg/cattle",
"id": "53c4f1955a3ddc8c6fd7b9362cf614a323372d88",
"size": "117012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration-v1/cattletest/core/test_svc_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5271"
},
{
"name": "FreeMarker",
"bytes": "71"
},
{
"name": "Java",
"bytes": "6398519"
},
{
"name": "Makefile",
"bytes": "308"
},
{
"name": "Python",
"bytes": "1582534"
},
{
"name": "Shell",
"bytes": "41134"
}
],
"symlink_target": ""
} |
"""Module to load the Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# internal imports
import numpy as np
import tensorflow as tf
from magenta.models.nsynth import utils
# FFT Specgram Shapes
SPECGRAM_REGISTRY = {
(nfft, hop): shape for nfft, hop, shape in zip(
[256, 256, 512, 512, 1024, 1024],
[64, 128, 128, 256, 256, 512],
[[129, 1001, 2], [129, 501, 2], [257, 501, 2],
[257, 251, 2], [513, 251, 2], [513, 126, 2]])
}
class NSynthDataset(object):
"""Dataset object to help manage the TFRecord loading."""
def __init__(self, tfrecord_path, is_training=True):
self.is_training = is_training
self.record_path = tfrecord_path
def get_example(self, batch_size):
"""Get a single example from the tfrecord file.
Args:
batch_size: Int, minibatch size.
Returns:
tf.Example protobuf parsed from tfrecord.
"""
reader = tf.TFRecordReader()
num_epochs = None if self.is_training else 1
capacity = batch_size
path_queue = tf.train.input_producer(
[self.record_path],
num_epochs=num_epochs,
shuffle=self.is_training,
capacity=capacity)
unused_key, serialized_example = reader.read(path_queue)
features = {
"note_str": tf.FixedLenFeature([], dtype=tf.string),
"pitch": tf.FixedLenFeature([1], dtype=tf.int64),
"velocity": tf.FixedLenFeature([1], dtype=tf.int64),
"audio": tf.FixedLenFeature([64000], dtype=tf.float32),
"qualities": tf.FixedLenFeature([10], dtype=tf.int64),
"instrument_source": tf.FixedLenFeature([1], dtype=tf.int64),
"instrument_family": tf.FixedLenFeature([1], dtype=tf.int64),
}
example = tf.parse_single_example(serialized_example, features)
return example
def get_wavenet_batch(self, batch_size, length=64000):
"""Get the Tensor expressions from the reader.
Args:
batch_size: The integer batch size.
length: Number of timesteps of a cropped sample to produce.
Returns:
A dict of key:tensor pairs. This includes "pitch", "wav", and "key".
"""
example = self.get_example(batch_size)
wav = example["audio"]
wav = tf.slice(wav, [0], [64000])
pitch = tf.squeeze(example["pitch"])
key = tf.squeeze(example["note_str"])
if self.is_training:
# random crop
crop = tf.random_crop(wav, [length])
crop = tf.reshape(crop, [1, length])
key, crop, pitch = tf.train.shuffle_batch(
[key, crop, pitch],
batch_size,
num_threads=4,
capacity=500 * batch_size,
min_after_dequeue=200 * batch_size)
else:
# fixed center crop
offset = (64000 - length) // 2 # 24320
crop = tf.slice(wav, [offset], [length])
crop = tf.reshape(crop, [1, length])
key, crop, pitch = tf.train.shuffle_batch(
[key, crop, pitch],
batch_size,
num_threads=4,
capacity=500 * batch_size,
min_after_dequeue=200 * batch_size)
crop = tf.reshape(tf.cast(crop, tf.float32), [batch_size, length])
pitch = tf.cast(pitch, tf.int32)
return {"pitch": pitch, "wav": crop, "key": key}
def get_baseline_batch(self, hparams):
"""Get the Tensor expressions from the reader.
Args:
hparams: Hyperparameters object with specgram parameters.
Returns:
A dict of key:tensor pairs. This includes "pitch", "wav", and "key".
"""
example = self.get_example(hparams.batch_size)
audio = tf.slice(example["audio"], [0], [64000])
audio = tf.reshape(audio, [1, 64000])
pitch = tf.slice(example["pitch"], [0], [1])
velocity = tf.slice(example["velocity"], [0], [1])
instrument_source = tf.slice(example["instrument_source"], [0], [1])
instrument_family = tf.slice(example["instrument_family"], [0], [1])
qualities = tf.slice(example["qualities"], [0], [10])
qualities = tf.reshape(qualities, [1, 10])
# Get Specgrams
hop_length = hparams.hop_length
n_fft = hparams.n_fft
if hop_length and n_fft:
specgram = utils.tf_specgram(
audio,
n_fft=n_fft,
hop_length=hop_length,
mask=hparams.mask,
log_mag=hparams.log_mag,
re_im=hparams.re_im,
dphase=hparams.dphase,
mag_only=hparams.mag_only)
shape = [1] + SPECGRAM_REGISTRY[(n_fft, hop_length)]
if hparams.mag_only:
shape[-1] = 1
specgram = tf.reshape(specgram, shape)
tf.logging.info("SPECGRAM BEFORE PADDING", specgram)
if hparams.pad:
# Pad and crop specgram to 256x256
num_padding = 2**int(np.ceil(np.log(shape[2]) / np.log(2))) - shape[2]
tf.logging.info("num_pading: %d" % num_padding)
specgram = tf.reshape(specgram, shape)
specgram = tf.pad(specgram, [[0, 0], [0, 0], [0, num_padding], [0, 0]])
specgram = tf.slice(specgram, [0, 0, 0, 0], [-1, shape[1] - 1, -1, -1])
tf.logging.info("SPECGRAM AFTER PADDING", specgram)
# Form a Batch
if self.is_training:
(audio, velocity, pitch, specgram,
instrument_source, instrument_family,
qualities) = tf.train.shuffle_batch(
[
audio, velocity, pitch, specgram,
instrument_source, instrument_family, qualities
],
batch_size=hparams.batch_size,
capacity=20 * hparams.batch_size,
min_after_dequeue=10 * hparams.batch_size,
enqueue_many=True)
elif hparams.batch_size > 1:
(audio, velocity, pitch, specgram,
instrument_source, instrument_family, qualities) = tf.train.batch(
[
audio, velocity, pitch, specgram,
instrument_source, instrument_family, qualities
],
batch_size=hparams.batch_size,
capacity=10 * hparams.batch_size,
enqueue_many=True)
audio.set_shape([hparams.batch_size, 64000])
batch = dict(
pitch=pitch,
velocity=velocity,
audio=audio,
instrument_source=instrument_source,
instrument_family=instrument_family,
qualities=qualities,
spectrogram=specgram)
return batch
| {
"content_hash": "bbd86b0d9fe7d2c89bfde607fbda9328",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 33.956756756756754,
"alnum_prop": 0.6045845272206304,
"repo_name": "bda2017-shallowermind/MusTGAN",
"id": "70541718c2a9d61cccd5139e97bddb5a85f76436",
"size": "6877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magenta/magenta/models/nsynth/reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12668"
},
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "43259"
},
{
"name": "Jupyter Notebook",
"bytes": "2115912"
},
{
"name": "Protocol Buffer",
"bytes": "12931"
},
{
"name": "Python",
"bytes": "1389487"
},
{
"name": "Shell",
"bytes": "8783"
}
],
"symlink_target": ""
} |
{
'name': 'Accounting Tax Adjustments',
'version': '1.1',
'category': 'Accounting',
'description': """
Accounting Tax Adjustments.
===========================
This module adds a wizard to deal with manual Tax adjustments, to manually correct the VAT declaration through a miscellaneous operation for example.
The correct definition of an adjustment tax is
- type_tax_use: none
- amount_type: fixed
- amount: 0
- tags: a grid used in your vat report for manual correction.
""",
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account'],
'data': [
'views/tax_adjustments.xml',
'wizard/wizard_tax_adjustments_view.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
| {
"content_hash": "4636f47b33554f01b3d98450fb0d0e05",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 149,
"avg_line_length": 28.185185185185187,
"alnum_prop": 0.6320630749014454,
"repo_name": "vileopratama/vitech",
"id": "240e175971dce31060f9ed6ff6c0ac3b76b116b5",
"size": "861",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "src/addons/account_tax_adjustments/__openerp__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
class PybtexError(Exception):
pass
| {
"content_hash": "8c4649fc9d6b34633e6d1d33764c31cb",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 19.5,
"alnum_prop": 0.7435897435897436,
"repo_name": "rybesh/pybtex",
"id": "26969d05d9050046fa04998d1db83dbc52b04ee6",
"size": "1153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybtex/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "218154"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
import pyxb.binding.generate
import pyxb.utils.domutils
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/test-include-daq.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
#file('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestIncludeDD (unittest.TestCase):
def testDefault (self):
xmls = '<entry xmlns="%s"><from>one</from><to>single</to></entry>' % (Namespace.uri(),)
instance = CreateFromDocument(xmls.encode('utf-8'))
self.assertEqual(english.one, instance.from_)
def testExplicit (self):
xmls = '<ns:entry xmlns:ns="%s"><ns:from>one</ns:from><ns:to>single</ns:to></ns:entry>' % (Namespace.uri(),)
instance = CreateFromDocument(xmls.encode('utf-8'))
self.assertEqual(english.one, instance.from_)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e8776ec329d175a953044c95636d783f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 116,
"avg_line_length": 34.3448275862069,
"alnum_prop": 0.6495983935742972,
"repo_name": "jonfoster/pyxb1",
"id": "acc0d6a6dfd62aa80e2bef4e2b9105e4846062c3",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/drivers/test-include-daq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1564427"
},
{
"name": "Shell",
"bytes": "18946"
}
],
"symlink_target": ""
} |
"""Miscellaneous utility functions for use with Swift."""
from __future__ import print_function
import errno
import fcntl
import grp
import hmac
import json
import math
import operator
import os
import pwd
import re
import sys
import time
import uuid
import functools
import email.parser
from hashlib import md5, sha1
from random import random, shuffle
from contextlib import contextmanager, closing
import ctypes
import ctypes.util
from optparse import OptionParser
from tempfile import mkstemp, NamedTemporaryFile
import glob
import itertools
import stat
import datetime
import eventlet
import eventlet.semaphore
from eventlet import GreenPool, sleep, Timeout, tpool, greenthread, \
greenio, event
from eventlet.green import socket, threading
import eventlet.queue
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')
import six
from six.moves import cPickle as pickle
from six.moves.configparser import (ConfigParser, NoSectionError,
NoOptionError, RawConfigParser)
from six.moves import range
from six.moves.urllib.parse import ParseResult
from six.moves.urllib.parse import quote as _quote
from six.moves.urllib.parse import urlparse as stdlib_urlparse
from swift import gettext_ as _
import swift.common.exceptions
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
from swift.common.header_key_dict import HeaderKeyDict
if six.PY3:
stdlib_queue = eventlet.patcher.original('queue')
else:
stdlib_queue = eventlet.patcher.original('Queue')
stdlib_threading = eventlet.patcher.original('threading')
# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
import logging
logging.thread = eventlet.green.thread
logging.threading = eventlet.green.threading
logging._lock = logging.threading.RLock()
# setup notice level logging
NOTICE = 25
logging.addLevelName(NOTICE, 'NOTICE')
SysLogHandler.priority_map['NOTICE'] = 'notice'
# These are lazily pulled from libc elsewhere
_sys_fallocate = None
_posix_fadvise = None
_libc_socket = None
_libc_bind = None
_libc_accept = None
# If set to non-zero, fallocate routines will fail based on free space
# available being at or below this amount, in bytes.
FALLOCATE_RESERVE = 0
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
# These constants are Linux-specific, and Python doesn't seem to know
# about them. We ask anyway just in case that ever gets fixed.
#
# The values were copied from the Linux 3.0 kernel headers.
AF_ALG = getattr(socket, 'AF_ALG', 38)
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
# Used by the parse_socket_string() function to validate IPv6 addresses
IPV6_RE = re.compile("^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$")
class InvalidHashPathConfigError(ValueError):
def __str__(self):
return "[swift-hash]: both swift_hash_path_suffix and " \
"swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE
def validate_hash_conf():
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
hash_conf = ConfigParser()
if hash_conf.read(SWIFT_CONF_FILE):
try:
HASH_PATH_SUFFIX = hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
raise InvalidHashPathConfigError()
try:
validate_hash_conf()
except InvalidHashPathConfigError:
# could get monkey patched or lazy loaded
pass
def get_hmac(request_method, path, expires, key):
"""
Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:returns: hexdigest str of the HMAC-SHA1 for the request.
"""
return hmac.new(
key, '%s\n%s\n%s' % (request_method, expires, path), sha1).hexdigest()
# Used by get_swift_info and register_swift_info to store information about
# the swift cluster.
_swift_info = {}
_swift_admin_info = {}
def get_swift_info(admin=False, disallowed_sections=None):
"""
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
"""
disallowed_sections = disallowed_sections or []
info = dict(_swift_info)
for section in disallowed_sections:
key_to_pop = None
sub_section_dict = info
for sub_section in section.split('.'):
if key_to_pop:
sub_section_dict = sub_section_dict.get(key_to_pop, {})
if not isinstance(sub_section_dict, dict):
sub_section_dict = {}
break
key_to_pop = sub_section
sub_section_dict.pop(key_to_pop, None)
if admin:
info['admin'] = dict(_swift_admin_info)
info['admin']['disallowed_sections'] = list(disallowed_sections)
return info
def register_swift_info(name='swift', admin=False, **kwargs):
"""
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used
in the disallowed_sections to remove unwanted keys from /info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
:raises ValueError: if name or any of the keys in kwargs has "." in it
"""
if name == 'admin' or name == 'disallowed_sections':
raise ValueError('\'{0}\' is reserved name.'.format(name))
if admin:
dict_to_use = _swift_admin_info
else:
dict_to_use = _swift_info
if name not in dict_to_use:
if "." in name:
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
dict_to_use[name] = {}
for key, val in kwargs.items():
if "." in key:
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
dict_to_use[name][key] = val
def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block
"""
f.seek(0, os.SEEK_END)
if f.tell() == 0:
return
last_row = b''
while f.tell() != 0:
try:
f.seek(-blocksize, os.SEEK_CUR)
except IOError:
blocksize = f.tell()
f.seek(-blocksize, os.SEEK_CUR)
block = f.read(blocksize)
f.seek(-blocksize, os.SEEK_CUR)
rows = block.split(b'\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row
# Used when reading config values
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, six.string_types) and value.lower() in TRUE_VALUES)
def config_auto_int_value(value, default):
"""
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise.
"""
if value is None or \
(isinstance(value, six.string_types) and value.lower() == 'auto'):
return default
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('Config option must be an integer or the '
'string "auto", not "%s".' % value)
return value
def append_underscore(prefix):
if prefix and not prefix.endswith('_'):
prefix += '_'
return prefix
def config_read_reseller_options(conf, defaults):
"""
Read reseller_prefix option and associated options from configuration
Reads the reseller_prefix option, then reads options that may be
associated with a specific reseller prefix. Reads options such that an
option without a prefix applies to all reseller prefixes unless an option
has an explicit prefix.
:param conf: the configuration
:param defaults: a dict of default values. The key is the option
name. The value is either an array of strings or a string
:return: tuple of an array of reseller prefixes and a dict of option values
"""
reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',')
reseller_prefixes = []
for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]:
if prefix == "''":
prefix = ''
prefix = append_underscore(prefix)
if prefix not in reseller_prefixes:
reseller_prefixes.append(prefix)
if len(reseller_prefixes) == 0:
reseller_prefixes.append('')
# Get prefix-using config options
associated_options = {}
for prefix in reseller_prefixes:
associated_options[prefix] = dict(defaults)
associated_options[prefix].update(
config_read_prefixed_options(conf, '', defaults))
prefix_name = prefix if prefix != '' else "''"
associated_options[prefix].update(
config_read_prefixed_options(conf, prefix_name, defaults))
return reseller_prefixes, associated_options
def config_read_prefixed_options(conf, prefix_name, defaults):
"""
Read prefixed options from configuration
:param conf: the configuration
:param prefix_name: the prefix (including, if needed, an underscore)
:param defaults: a dict of default values. The dict supplies the
option name and type (string or comma separated string)
:return: a dict containing the options
"""
params = {}
for option_name in defaults.keys():
value = conf.get('%s%s' % (prefix_name, option_name))
if value:
if isinstance(defaults.get(option_name), list):
params[option_name] = []
for role in value.lower().split(','):
params[option_name].append(role.strip())
else:
params[option_name] = value.strip()
return params
def noop_libc_function(*args):
return 0
def validate_configuration():
try:
validate_hash_conf()
except InvalidHashPathConfigError as e:
sys.exit("Error: %s" % e)
def load_libc_function(func_name, log_error=True,
fail_if_missing=False):
"""
Attempt to find the function in libc, otherwise return a no-op func.
:param func_name: name of the function to pull from libc.
:param log_error: log an error when a function can't be found
:param fail_if_missing: raise an exception when a function can't be found.
Default behavior is to return a no-op function.
"""
try:
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
return getattr(libc, func_name)
except AttributeError:
if fail_if_missing:
raise
if log_error:
logging.warning(_("Unable to locate %s in libc. Leaving as a "
"no-op."), func_name)
return noop_libc_function
def generate_trans_id(trans_id_suffix):
return 'tx%s-%010x%s' % (
uuid.uuid4().hex[:21], time.time(), quote(trans_id_suffix))
def get_policy_index(req_headers, res_headers):
"""
Returns the appropriate index of the storage policy for the request from
a proxy server
:param req: dict of the request headers.
:param res: dict of the response headers.
:returns: string index of storage policy, or None
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = res_headers.get(header, req_headers.get(header))
return str(policy_index) if policy_index is not None else None
def get_log_line(req, res, trans_time, additional_info):
"""
Make a line for logging that matches the documented log line format
for backend servers.
:param req: the request.
:param res: the response.
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formatted line for logging.
"""
policy_index = get_policy_index(req.headers, res.headers)
return '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % (
req.remote_addr,
time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),
req.method, req.path, res.status.split()[0],
res.content_length or '-', req.referer or '-',
req.headers.get('x-trans-id', '-'),
req.user_agent or '-', trans_time, additional_info or '-',
os.getpid(), policy_index or '-')
def get_trans_id_time(trans_id):
if len(trans_id) >= 34 and \
trans_id.startswith('tx') and trans_id[23] == '-':
try:
return int(trans_id[24:34], 16)
except ValueError:
pass
return None
class FileLikeIter(object):
def __init__(self, iterable):
"""
Wraps an iterable to behave as a file-like object.
The iterable must yield bytes strings.
"""
self.iterator = iter(iterable)
self.buf = None
self.closed = False
def __iter__(self):
return self
def next(self):
"""
next(x) -> the next value, or raise StopIteration
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if self.buf:
rv = self.buf
self.buf = None
return rv
else:
return next(self.iterator)
__next__ = next
def read(self, size=-1):
"""
read([size]) -> read at most size bytes, returned as a bytes string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was
requested may be returned, even if no size parameter was given.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if size < 0:
return b''.join(self)
elif not size:
chunk = b''
elif self.buf:
chunk = self.buf
self.buf = None
else:
try:
chunk = next(self.iterator)
except StopIteration:
return b''
if len(chunk) > size:
self.buf = chunk[size:]
chunk = chunk[:size]
return chunk
def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a bytes string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
data = b''
while b'\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024)
else:
chunk = self.read(size - len(data))
if not chunk:
break
data += chunk
if b'\n' in data:
data, sep, rest = data.partition(b'\n')
data += sep
if self.buf:
self.buf = rest + self.buf
else:
self.buf = rest
return data
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of bytes strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
self.iterator = None
self.closed = True
class FallocateWrapper(object):
def __init__(self, noop=False):
if noop:
self.func_name = 'posix_fallocate'
self.fallocate = noop_libc_function
return
# fallocate is preferred because we need the on-disk size to match
# the allocated size. Older versions of sqlite require that the
# two sizes match. However, fallocate is Linux only.
for func in ('fallocate', 'posix_fallocate'):
self.func_name = func
self.fallocate = load_libc_function(func, log_error=False)
if self.fallocate is not noop_libc_function:
break
if self.fallocate is noop_libc_function:
logging.warning(_("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op."))
def __call__(self, fd, mode, offset, length):
"""The length parameter must be a ctypes.c_uint64."""
if FALLOCATE_RESERVE > 0:
st = os.fstatvfs(fd)
free = st.f_frsize * st.f_bavail - length.value
if free <= FALLOCATE_RESERVE:
raise OSError(
errno.ENOSPC,
'FALLOCATE_RESERVE fail %s <= %s' % (free,
FALLOCATE_RESERVE))
args = {
'fallocate': (fd, mode, offset, length),
'posix_fallocate': (fd, offset, length)
}
return self.fallocate(*args[self.func_name])
def disable_fallocate():
global _sys_fallocate
_sys_fallocate = FallocateWrapper(noop=True)
def fallocate(fd, size):
"""
Pre-allocate disk space for a file.
:param fd: file descriptor
:param size: size to allocate (in bytes)
"""
global _sys_fallocate
if _sys_fallocate is None:
_sys_fallocate = FallocateWrapper()
if size < 0:
size = 0
# 1 means "FALLOC_FL_KEEP_SIZE", which means it pre-allocates invisibly
ret = _sys_fallocate(fd, 1, 0, ctypes.c_uint64(size))
err = ctypes.get_errno()
if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
errno.EINVAL):
raise OSError(err, 'Unable to fallocate(%s)' % size)
def fsync(fd):
"""
Sync modified file data and metadata to disk.
:param fd: file descriptor
"""
if hasattr(fcntl, 'F_FULLSYNC'):
try:
fcntl.fcntl(fd, fcntl.F_FULLSYNC)
except IOError as e:
raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd)
else:
os.fsync(fd)
def fdatasync(fd):
"""
Sync modified file data to disk.
:param fd: file descriptor
"""
try:
os.fdatasync(fd)
except AttributeError:
fsync(fd)
def fsync_dir(dirpath):
"""
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced.
"""
dirfd = None
try:
dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY)
fsync(dirfd)
except OSError as err:
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warning(_('Unable to perform fsync() on directory %(dir)s:'
' %(err)s'),
{'dir': dirpath, 'err': os.strerror(err.errno)})
finally:
if dirfd:
os.close(dirfd)
def drop_buffer_cache(fd, offset, length):
"""
Drop 'buffer' cache for the given range of the given file.
:param fd: file descriptor
:param offset: start offset
:param length: length
"""
global _posix_fadvise
if _posix_fadvise is None:
_posix_fadvise = load_libc_function('posix_fadvise64')
# 4 means "POSIX_FADV_DONTNEED"
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
if ret != 0:
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret})
NORMAL_FORMAT = "%016.05f"
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
SHORT_FORMAT = NORMAL_FORMAT + '_%x'
MAX_OFFSET = (16 ** 16) - 1
PRECISION = 1e-5
# Setting this to True will cause the internal format to always display
# extended digits - even when the value is equivalent to the normalized form.
# This isn't ideal during an upgrade when some servers might not understand
# the new time format - but flipping it to True works great for testing.
FORCE_INTERNAL = False # or True
@functools.total_ordering
class Timestamp(object):
"""
Internal Representation of Swift Time.
The normalized form of the X-Timestamp header looks like a float
with a fixed width to ensure stable string sorting - normalized
timestamps look like "1402464677.04188"
To support overwrites of existing data without modifying the original
timestamp but still maintain consistency a second internal offset vector
is append to the normalized timestamp form which compares and sorts
greater than the fixed width float format but less than a newer timestamp.
The internalized format of timestamps looks like
"1402464677.04188_0000000000000000" - the portion after the underscore is
the offset and is a formatted hexadecimal integer.
The internalized form is not exposed to clients in responses from
Swift. Normal client operations will not create a timestamp with an
offset.
The Timestamp class in common.utils supports internalized and
normalized formatting of timestamps and also comparison of timestamp
values. When the offset value of a Timestamp is 0 - it's considered
insignificant and need not be represented in the string format; to
support backwards compatibility during a Swift upgrade the
internalized and normalized form of a Timestamp with an
insignificant offset are identical. When a timestamp includes an
offset it will always be represented in the internalized form, but
is still excluded from the normalized form. Timestamps with an
equivalent timestamp portion (the float part) will compare and order
by their offset. Timestamps with a greater timestamp portion will
always compare and order greater than a Timestamp with a lesser
timestamp regardless of it's offset. String comparison and ordering
is guaranteed for the internalized string format, and is backwards
compatible for normalized timestamps which do not include an offset.
"""
def __init__(self, timestamp, offset=0, delta=0):
"""
Create a new Timestamp.
:param timestamp: time in seconds since the Epoch, may be any of:
* a float or integer
* normalized/internalized string
* another instance of this class (offset is preserved)
:param offset: the second internal offset vector, an int
:param delta: deca-microsecond difference from the base timestamp
param, an int
"""
if isinstance(timestamp, six.string_types):
parts = timestamp.split('_', 1)
self.timestamp = float(parts.pop(0))
if parts:
self.offset = int(parts[0], 16)
else:
self.offset = 0
else:
self.timestamp = float(timestamp)
self.offset = getattr(timestamp, 'offset', 0)
# increment offset
if offset >= 0:
self.offset += offset
else:
raise ValueError('offset must be non-negative')
if self.offset > MAX_OFFSET:
raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
self.raw = int(round(self.timestamp / PRECISION))
# add delta
if delta:
self.raw = self.raw + delta
if self.raw <= 0:
raise ValueError(
'delta must be greater than %d' % (-1 * self.raw))
self.timestamp = float(self.raw * PRECISION)
if self.timestamp < 0:
raise ValueError('timestamp cannot be negative')
if self.timestamp >= 10000000000:
raise ValueError('timestamp too large')
def __repr__(self):
return INTERNAL_FORMAT % (self.timestamp, self.offset)
def __str__(self):
raise TypeError('You must specify which string format is required')
def __float__(self):
return self.timestamp
def __int__(self):
return int(self.timestamp)
def __nonzero__(self):
return bool(self.timestamp or self.offset)
def __bool__(self):
return self.__nonzero__()
@property
def normal(self):
return NORMAL_FORMAT % self.timestamp
@property
def internal(self):
if self.offset or FORCE_INTERNAL:
return INTERNAL_FORMAT % (self.timestamp, self.offset)
else:
return self.normal
@property
def short(self):
if self.offset or FORCE_INTERNAL:
return SHORT_FORMAT % (self.timestamp, self.offset)
else:
return self.normal
@property
def isoformat(self):
t = float(self.normal)
if six.PY3:
# On Python 3, round manually using ROUND_HALF_EVEN rounding
# method, to use the same rounding method than Python 2. Python 3
# used a different rounding method, but Python 3.4.4 and 3.5.1 use
# again ROUND_HALF_EVEN as Python 2.
# See https://bugs.python.org/issue23517
frac, t = math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
dt = datetime.datetime.utcfromtimestamp(t)
dt = dt.replace(microsecond=us)
else:
dt = datetime.datetime.utcfromtimestamp(t)
isoformat = dt.isoformat()
# python isoformat() doesn't include msecs when zero
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
isoformat += ".000000"
return isoformat
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Timestamp):
other = Timestamp(other)
return self.internal == other.internal
def __ne__(self, other):
if other is None:
return True
if not isinstance(other, Timestamp):
other = Timestamp(other)
return self.internal != other.internal
def __lt__(self, other):
if other is None:
return False
if not isinstance(other, Timestamp):
other = Timestamp(other)
return self.internal < other.internal
def __hash__(self):
return hash(self.internal)
def encode_timestamps(t1, t2=None, t3=None, explicit=False):
"""
Encode up to three timestamps into a string. Unlike a Timestamp object, the
encoded string does NOT used fixed width fields and consequently no
relative chronology of the timestamps can be inferred from lexicographic
sorting of encoded timestamp strings.
The format of the encoded string is:
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
otherwise the time offsets for t2 and t3 are appended. If explicit is True
then the offsets for t2 and t3 are always appended even if zero.
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
are not preserved. In the anticipated use cases for this method (and the
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
not expected to have offsets as they will be timestamps associated with a
POST request. In the case where the encoding is used in a container objects
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
content type and metadata times (if different from the data file) i.e.
correspond to POST timestamps. In the case where the encoded form is used
in a .meta file name, t1 and t2 both correspond to POST timestamps.
"""
form = '{0}'
values = [t1.short]
if t2 is not None:
t2_t1_delta = t2.raw - t1.raw
explicit = explicit or (t2_t1_delta != 0)
values.append(t2_t1_delta)
if t3 is not None:
t3_t2_delta = t3.raw - t2.raw
explicit = explicit or (t3_t2_delta != 0)
values.append(t3_t2_delta)
if explicit:
form += '{1:+x}'
if t3 is not None:
form += '{2:+x}'
return form.format(*values)
def decode_timestamps(encoded, explicit=False):
"""
Parses a string of the form generated by encode_timestamps and returns
a tuple of the three component timestamps. If explicit is False, component
timestamps that are not explicitly encoded will be assumed to have zero
delta from the previous component and therefore take the value of the
previous component. If explicit is True, component timestamps that are
not explicitly encoded will be returned with value None.
"""
# TODO: some tests, e.g. in test_replicator, put float timestamps values
# into container db's, hence this defensive check, but in real world
# this may never happen.
if not isinstance(encoded, basestring):
ts = Timestamp(encoded)
return ts, ts, ts
parts = []
signs = []
pos_parts = encoded.split('+')
for part in pos_parts:
# parse time components and their signs
# e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
neg_parts = part.split('-')
parts = parts + neg_parts
signs = signs + [1] + [-1] * (len(neg_parts) - 1)
t1 = Timestamp(parts[0])
t2 = t3 = None
if len(parts) > 1:
t2 = t1
delta = signs[1] * int(parts[1], 16)
# if delta = 0 we want t2 = t3 = t1 in order to
# preserve any offset in t1 - only construct a distinct
# timestamp if there is a non-zero delta.
if delta:
t2 = Timestamp((t1.raw + delta) * PRECISION)
elif not explicit:
t2 = t1
if len(parts) > 2:
t3 = t2
delta = signs[2] * int(parts[2], 16)
if delta:
t3 = Timestamp((t2.raw + delta) * PRECISION)
elif not explicit:
t3 = t2
return t1, t2, t3
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return Timestamp(timestamp).normal
EPOCH = datetime.datetime(1970, 1, 1)
def last_modified_date_to_timestamp(last_modified_date_str):
"""
Convert a last modified date (like you'd get from a container listing,
e.g. 2014-02-28T23:22:36.698390) to a float.
"""
start = datetime.datetime.strptime(last_modified_date_str,
'%Y-%m-%dT%H:%M:%S.%f')
delta = start - EPOCH
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
# function delta_to_microseconds(), but written in Python.
return Timestamp(delta.total_seconds())
def normalize_delete_at_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx (10) format.
Note that timestamps less than 0000000000 are raised to
0000000000 and values greater than November 20th, 2286 at
17:46:39 UTC will be capped at that date and time, resulting in
no return value exceeding 9999999999.
This cap is because the expirer is already working through a
sorted list of strings that were all a length of 10. Adding
another digit would mess up the sort and cause the expirer to
break from processing early. By 2286, this problem will need to
be fixed, probably by creating an additional .expiring_objects
account to work from with 11 (or more) digit container names.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return '%010d' % min(max(0, float(timestamp)), 9999999999)
def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
exists but is a file or on permissions failure.
:param path: path to create
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def makedirs_count(path, count=0):
"""
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
count = makedirs_count(head, count)
if tail == os.path.curdir:
return
try:
os.mkdir(path)
except OSError as e:
# EEXIST may also be raised if path exists as a file
# Do not let that pass.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
else:
count += 1
return count
def renamer(old, new, fsync=True):
"""
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories.
"""
dirpath = os.path.dirname(new)
try:
count = makedirs_count(dirpath)
os.rename(old, new)
except OSError:
count = makedirs_count(dirpath)
os.rename(old, new)
if fsync:
# If count=0, no new directories were created. But we still need to
# fsync leaf dir after os.rename().
# If count>0, starting from leaf dir, fsync parent dirs of all
# directories created by makedirs_count()
for i in range(0, count + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath)
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises: ValueError if given an invalid device or partition
"""
if not device or '/' in device or device in ['.', '..']:
raise ValueError('Invalid device: %s' % quote(device or ''))
if not partition or '/' in partition or partition in ['.', '..']:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
class RateLimitedIterator(object):
"""
Wrap an iterator to only yield elements at a rate of N per second.
:param iterable: iterable to wrap
:param elements_per_second: the rate at which to yield elements
:param limit_after: rate limiting kicks in only after yielding
this many elements; default is 0 (rate limit
immediately)
"""
def __init__(self, iterable, elements_per_second, limit_after=0,
ratelimit_if=lambda _junk: True):
self.iterator = iter(iterable)
self.elements_per_second = elements_per_second
self.limit_after = limit_after
self.running_time = 0
self.ratelimit_if = ratelimit_if
def __iter__(self):
return self
def next(self):
next_value = next(self.iterator)
if self.ratelimit_if(next_value):
if self.limit_after > 0:
self.limit_after -= 1
else:
self.running_time = ratelimit_sleep(self.running_time,
self.elements_per_second)
return next_value
__next__ = next
class GreenthreadSafeIterator(object):
"""
Wrap an iterator to ensure that only one greenthread is inside its next()
method at a time.
This is useful if an iterator's next() method may perform network IO, as
that may trigger a greenthread context switch (aka trampoline), which can
give another greenthread a chance to call next(). At that point, you get
an error like "ValueError: generator already executing". By wrapping calls
to next() with a mutex, we avoid that error.
"""
def __init__(self, unsafe_iterable):
self.unsafe_iter = iter(unsafe_iterable)
self.semaphore = eventlet.semaphore.Semaphore(value=1)
def __iter__(self):
return self
def next(self):
with self.semaphore:
return next(self.unsafe_iter)
__next__ = next
class NullLogger(object):
"""A no-op logger for eventlet wsgi."""
def write(self, *args):
# "Logs" the args to nowhere
pass
class LoggerFileObject(object):
# Note: this is greenthread-local storage
_cls_thread_local = threading.local()
def __init__(self, logger, log_type='STDOUT'):
self.logger = logger
self.log_type = log_type
def write(self, value):
# We can get into a nasty situation when logs are going to syslog
# and syslog dies.
#
# It's something like this:
#
# (A) someone logs something
#
# (B) there's an exception in sending to /dev/log since syslog is
# not working
#
# (C) logging takes that exception and writes it to stderr (see
# logging.Handler.handleError)
#
# (D) stderr was replaced with a LoggerFileObject at process start,
# so the LoggerFileObject takes the provided string and tells
# its logger to log it (to syslog, naturally).
#
# Then, steps B through D repeat until we run out of stack.
if getattr(self._cls_thread_local, 'already_called_write', False):
return
self._cls_thread_local.already_called_write = True
try:
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error(
_('%s: Connection reset by peer'), self.log_type)
else:
self.logger.error(_('%(type)s: %(value)s'),
{'type': self.log_type, 'value': value})
finally:
self._cls_thread_local.already_called_write = False
def writelines(self, values):
if getattr(self._cls_thread_local, 'already_called_writelines', False):
return
self._cls_thread_local.already_called_writelines = True
try:
self.logger.error(_('%(type)s: %(value)s'),
{'type': self.log_type,
'value': '#012'.join(values)})
finally:
self._cls_thread_local.already_called_writelines = False
def close(self):
pass
def flush(self):
pass
def __iter__(self):
return self
def next(self):
raise IOError(errno.EBADF, 'Bad file descriptor')
__next__ = next
def read(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def readline(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def tell(self):
return 0
def xreadlines(self):
return self
class StatsdClient(object):
def __init__(self, host, port, base_prefix='', tail_prefix='',
default_sample_rate=1, sample_rate_factor=1, logger=None):
self._host = host
self._port = port
self._base_prefix = base_prefix
self.set_prefix(tail_prefix)
self._default_sample_rate = default_sample_rate
self._sample_rate_factor = sample_rate_factor
self.random = random
self.logger = logger
# Determine if host is IPv4 or IPv6
addr_info = None
try:
addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
self._sock_family = socket.AF_INET
except socket.gaierror:
try:
addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
self._sock_family = socket.AF_INET6
except socket.gaierror:
# Don't keep the server from starting from what could be a
# transient DNS failure. Any hostname will get re-resolved as
# necessary in the .sendto() calls.
# However, we don't know if we're IPv4 or IPv6 in this case, so
# we assume legacy IPv4.
self._sock_family = socket.AF_INET
# NOTE: we use the original host value, not the DNS-resolved one
# because if host is a hostname, we don't want to cache the DNS
# resolution for the entire lifetime of this process. Let standard
# name resolution caching take effect. This should help operators use
# DNS trickery if they want.
if addr_info is not None:
# addr_info is a list of 5-tuples with the following structure:
# (family, socktype, proto, canonname, sockaddr)
# where sockaddr is the only thing of interest to us, and we only
# use the first result. We want to use the originally supplied
# host (see note above) and the remainder of the variable-length
# sockaddr: IPv4 has (address, port) while IPv6 has (address,
# port, flow info, scope id).
sockaddr = addr_info[0][-1]
self._target = (host,) + (sockaddr[1:])
else:
self._target = (host, port)
def set_prefix(self, new_prefix):
if new_prefix and self._base_prefix:
self._prefix = '.'.join([self._base_prefix, new_prefix, ''])
elif new_prefix:
self._prefix = new_prefix + '.'
elif self._base_prefix:
self._prefix = self._base_prefix + '.'
else:
self._prefix = ''
def _send(self, m_name, m_value, m_type, sample_rate):
if sample_rate is None:
sample_rate = self._default_sample_rate
sample_rate = sample_rate * self._sample_rate_factor
parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type]
if sample_rate < 1:
if self.random() < sample_rate:
parts.append('@%s' % (sample_rate,))
else:
return
if six.PY3:
parts = [part.encode('utf-8') for part in parts]
# Ideally, we'd cache a sending socket in self, but that
# results in a socket getting shared by multiple green threads.
with closing(self._open_socket()) as sock:
try:
return sock.sendto(b'|'.join(parts), self._target)
except IOError as err:
if self.logger:
self.logger.warning(
'Error sending UDP message to %r: %s',
self._target, err)
def _open_socket(self):
return socket.socket(self._sock_family, socket.SOCK_DGRAM)
def update_stats(self, m_name, m_value, sample_rate=None):
return self._send(m_name, m_value, 'c', sample_rate)
def increment(self, metric, sample_rate=None):
return self.update_stats(metric, 1, sample_rate)
def decrement(self, metric, sample_rate=None):
return self.update_stats(metric, -1, sample_rate)
def timing(self, metric, timing_ms, sample_rate=None):
return self._send(metric, timing_ms, 'ms', sample_rate)
def timing_since(self, metric, orig_time, sample_rate=None):
return self.timing(metric, (time.time() - orig_time) * 1000,
sample_rate)
def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None):
if byte_xfer:
return self.timing(metric,
elapsed_time * 1000 / byte_xfer * 1000,
sample_rate)
def server_handled_successfully(status_int):
"""
True for successful responses *or* error codes that are not Swift's fault,
False otherwise. For example, 500 is definitely the server's fault, but
412 is an error code (4xx are all errors) that is due to a header the
client sent.
If one is tracking error rates to monitor server health, one would be
advised to use a function like this one, lest a client cause a flurry of
404s or 416s and make a spurious spike in your errors graph.
"""
return (is_success(status_int) or
is_redirection(status_int) or
status_int == HTTP_NOT_FOUND or
status_int == HTTP_PRECONDITION_FAILED or
status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE)
def timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
swift's wsgi server controllers, based on response code.
"""
def decorating_func(func):
method = func.__name__
@functools.wraps(func)
def _timing_stats(ctrl, *args, **kwargs):
start_time = time.time()
resp = func(ctrl, *args, **kwargs)
if server_handled_successfully(resp.status_int):
ctrl.logger.timing_since(method + '.timing',
start_time, **dec_kwargs)
else:
ctrl.logger.timing_since(method + '.errors.timing',
start_time, **dec_kwargs)
return resp
return _timing_stats
return decorating_func
# double inheritance to support property with setter
class LogAdapter(logging.LoggerAdapter, object):
"""
A Logger like object which performs some reformatting on calls to
:meth:`exception`. Can be used to store a threadlocal transaction id and
client ip.
"""
_cls_thread_local = threading.local()
def __init__(self, logger, server):
logging.LoggerAdapter.__init__(self, logger, {})
self.server = server
self.warn = self.warning
@property
def txn_id(self):
if hasattr(self._cls_thread_local, 'txn_id'):
return self._cls_thread_local.txn_id
@txn_id.setter
def txn_id(self, value):
self._cls_thread_local.txn_id = value
@property
def client_ip(self):
if hasattr(self._cls_thread_local, 'client_ip'):
return self._cls_thread_local.client_ip
@client_ip.setter
def client_ip(self, value):
self._cls_thread_local.client_ip = value
@property
def thread_locals(self):
return (self.txn_id, self.client_ip)
@thread_locals.setter
def thread_locals(self, value):
self.txn_id, self.client_ip = value
def getEffectiveLevel(self):
return self.logger.getEffectiveLevel()
def process(self, msg, kwargs):
"""
Add extra info to message
"""
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id,
'client_ip': self.client_ip}
return msg, kwargs
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _exception(self, msg, *args, **kwargs):
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
_junk, exc, _junk = sys.exc_info()
call = self.error
emsg = ''
if isinstance(exc, (OSError, socket.error)):
if exc.errno in (errno.EIO, errno.ENOSPC):
emsg = str(exc)
elif exc.errno == errno.ECONNREFUSED:
emsg = _('Connection refused')
elif exc.errno == errno.EHOSTUNREACH:
emsg = _('Host unreachable')
elif exc.errno == errno.ETIMEDOUT:
emsg = _('Connection timeout')
else:
call = self._exception
elif isinstance(exc, eventlet.Timeout):
emsg = exc.__class__.__name__
if hasattr(exc, 'seconds'):
emsg += ' (%ss)' % exc.seconds
if isinstance(exc, swift.common.exceptions.MessageTimeout):
if exc.msg:
emsg += ' %s' % exc.msg
else:
call = self._exception
call('%s: %s' % (msg, emsg), *args, **kwargs)
def set_statsd_prefix(self, prefix):
"""
The StatsD client prefix defaults to the "name" of the logger. This
method may override that default with a specific value. Currently used
in the proxy-server to differentiate the Account, Container, and Object
controllers.
"""
if self.logger.statsd_client:
self.logger.statsd_client.set_prefix(prefix)
def statsd_delegate(statsd_func_name):
"""
Factory to create methods which delegate to methods on
self.logger.statsd_client (an instance of StatsdClient). The
created methods conditionally delegate to a method whose name is given
in 'statsd_func_name'. The created delegate methods are a no-op when
StatsD logging is not configured.
:param statsd_func_name: the name of a method on StatsdClient.
"""
func = getattr(StatsdClient, statsd_func_name)
@functools.wraps(func)
def wrapped(self, *a, **kw):
if getattr(self.logger, 'statsd_client'):
return func(self.logger.statsd_client, *a, **kw)
return wrapped
update_stats = statsd_delegate('update_stats')
increment = statsd_delegate('increment')
decrement = statsd_delegate('decrement')
timing = statsd_delegate('timing')
timing_since = statsd_delegate('timing_since')
transfer_rate = statsd_delegate('transfer_rate')
class SwiftLogFormatter(logging.Formatter):
"""
Custom logging.Formatter will append txn_id to a log message if the
record has one and the message does not. Optionally it can shorten
overly long log lines.
"""
def __init__(self, fmt=None, datefmt=None, max_line_length=0):
logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
self.max_line_length = max_line_length
def format(self, record):
if not hasattr(record, 'server'):
# Catch log messages that were not initiated by swift
# (for example, the keystone auth middleware)
record.server = record.name
# Included from Python's logging.Formatter and then altered slightly to
# replace \n with #012
record.message = record.getMessage()
if self._fmt.find('%(asctime)') >= 0:
record.asctime = self.formatTime(record, self.datefmt)
msg = (self._fmt % record.__dict__).replace('\n', '#012')
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(
record.exc_info).replace('\n', '#012')
if record.exc_text:
if not msg.endswith('#012'):
msg = msg + '#012'
msg = msg + record.exc_text
if (hasattr(record, 'txn_id') and record.txn_id and
record.levelno != logging.INFO and
record.txn_id not in msg):
msg = "%s (txn: %s)" % (msg, record.txn_id)
if (hasattr(record, 'client_ip') and record.client_ip and
record.levelno != logging.INFO and
record.client_ip not in msg):
msg = "%s (client_ip: %s)" % (msg, record.client_ip)
if self.max_line_length > 0 and len(msg) > self.max_line_length:
if self.max_line_length < 7:
msg = msg[:self.max_line_length]
else:
approxhalf = (self.max_line_length - 5) // 2
msg = msg[:approxhalf] + " ... " + msg[-approxhalf:]
return msg
def get_logger(conf, name=None, log_to_console=False, log_route=None,
fmt="%(server)s: %(message)s"):
"""
Get the current system logger using config settings.
**Log config and defaults**::
log_facility = LOG_LOCAL0
log_level = INFO
log_name = swift
log_max_line_length = 0
log_udp_host = (disabled)
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
log_address = /dev/log
log_statsd_host = (disabled)
log_statsd_port = 8125
log_statsd_default_sample_rate = 1.0
log_statsd_sample_rate_factor = 1.0
log_statsd_metric_prefix = (empty-string)
:param conf: Configuration dict to read settings from
:param name: Name of the logger
:param log_to_console: Add handler which writes to console on stderr
:param log_route: Route for the logging, not emitted to the log, just used
to separate logging configurations
:param fmt: Override log format
"""
if not conf:
conf = {}
if name is None:
name = conf.get('log_name', 'swift')
if not log_route:
log_route = name
logger = logging.getLogger(log_route)
logger.propagate = False
# all new handlers will get the same formatter
formatter = SwiftLogFormatter(
fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
# get_logger will only ever add one SysLog Handler to a logger
if not hasattr(get_logger, 'handler4logger'):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
# facility for this logger will be set by last call wins
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0)
udp_host = conf.get('log_udp_host')
if udp_host:
udp_port = int(conf.get('log_udp_port',
logging.handlers.SYSLOG_UDP_PORT))
handler = SysLogHandler(address=(udp_host, udp_port),
facility=facility)
else:
log_address = conf.get('log_address', '/dev/log')
try:
handler = SysLogHandler(address=log_address, facility=facility)
except socket.error as e:
# Either /dev/log isn't a UNIX socket or it does not exist at all
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise e
handler = SysLogHandler(facility=facility)
handler.setFormatter(formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
# setup console logging
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
# remove pre-existing console handler for this logger
if not hasattr(get_logger, 'console_handler4logger'):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
# set the level for the logger
logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
# Setup logger with a StatsD client if so configured
statsd_host = conf.get('log_statsd_host')
if statsd_host:
statsd_port = int(conf.get('log_statsd_port', 8125))
base_prefix = conf.get('log_statsd_metric_prefix', '')
default_sample_rate = float(conf.get(
'log_statsd_default_sample_rate', 1))
sample_rate_factor = float(conf.get(
'log_statsd_sample_rate_factor', 1))
statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix,
name, default_sample_rate,
sample_rate_factor, logger=logger)
logger.statsd_client = statsd_client
else:
logger.statsd_client = None
adapted_logger = LogAdapter(logger, name)
other_handlers = conf.get('log_custom_handlers', None)
if other_handlers:
log_custom_handlers = [s.strip() for s in other_handlers.split(',')
if s.strip()]
for hook in log_custom_handlers:
try:
mod, fnc = hook.rsplit('.', 1)
logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
logger_hook(conf, name, log_to_console, log_route, fmt,
logger, adapted_logger)
except (AttributeError, ImportError):
print('Error calling custom handler [%s]' % hook,
file=sys.stderr)
except ValueError:
print('Invalid custom handler format [%s]' % hook,
file=sys.stderr)
return adapted_logger
def get_hub():
"""
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.openstack.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
"""
try:
import select
if hasattr(select, "poll"):
return "poll"
return "selects"
except ImportError:
return None
def drop_privileges(user, call_setsid=True):
"""
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to
"""
if os.geteuid() == 0:
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
os.setgroups(groups)
user = pwd.getpwnam(user)
os.setgid(user[3])
os.setuid(user[2])
os.environ['HOME'] = user[5]
if call_setsid:
try:
os.setsid()
except OSError:
pass
os.chdir('/') # in case you need to rmdir on where you started the daemon
os.umask(0o22) # ensure files are created with the correct privileges
def capture_stdio(logger, **kwargs):
"""
Log unhandled exceptions, close stdio, capture stdout and stderr.
param logger: Logger object to use
"""
# log uncaught exceptions
sys.excepthook = lambda * exc_info: \
logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info)
# collect stdio file desc not in use for logging
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
console_fds = [h.stream.fileno() for _junk, h in getattr(
get_logger, 'console_handler4logger', {}).items()]
stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
with open(os.devnull, 'r+b') as nullfile:
# close stdio (excludes fds open for logging)
for f in stdio_files:
# some platforms throw an error when attempting an stdin flush
try:
f.flush()
except IOError:
pass
try:
os.dup2(nullfile.fileno(), f.fileno())
except OSError:
pass
# redirect stdio
if kwargs.pop('capture_stdout', True):
sys.stdout = LoggerFileObject(logger)
if kwargs.pop('capture_stderr', True):
sys.stderr = LoggerFileObject(logger, 'STDERR')
def parse_options(parser=None, once=False, test_args=None):
"""
Parse standard swift server/daemon options with optparse.OptionParser.
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_args: Override sys.argv; used in testing
:returns : Tuple of (config, options); config is an absolute path to the
config file, options is the parser options as a dictionary.
:raises SystemExit: First arg (CONFIG) is required, file must exist
"""
if not parser:
parser = OptionParser(usage="%prog CONFIG [options]")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="log to console")
if once:
parser.add_option("-o", "--once", default=False, action="store_true",
help="only run one pass of daemon")
# if test_args is None, optparse will use sys.argv[:1]
options, args = parser.parse_args(args=test_args)
if not args:
parser.print_usage()
print(_("Error: missing config path argument"))
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print(_("Error: unable to locate %s") % config)
sys.exit(1)
extra_args = []
# if any named options appear in remaining args, set the option to True
for arg in args:
if arg in options.__dict__:
setattr(options, arg, True)
else:
extra_args.append(arg)
options = vars(options)
if extra_args:
options['extra_args'] = extra_args
return config, options
def expand_ipv6(address):
"""
Expand ipv6 address.
:param address: a string indicating valid ipv6 address
:returns: a string indicating fully expanded ipv6 address
"""
packed_ip = socket.inet_pton(socket.AF_INET6, address)
return socket.inet_ntop(socket.AF_INET6, packed_ip)
def whataremyips(bind_ip=None):
"""
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
:param str bind_ip: Optional bind_ip from a config file; may be IP address
or hostname.
:returns: list of Strings of ip addresses
"""
if bind_ip:
# See if bind_ip is '0.0.0.0'/'::'
try:
_, _, _, _, sockaddr = socket.getaddrinfo(
bind_ip, None, 0, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)[0]
if sockaddr[0] not in ('0.0.0.0', '::'):
return [bind_ip]
except socket.gaierror:
pass
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = expand_ipv6(addr.split('%')[0])
addresses.append(addr)
except ValueError:
pass
return addresses
def parse_socket_string(socket_string, default_port):
"""
Given a string representing a socket, returns a tuple of (host, port).
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
optional port. If an IPv6 address is specified it **must** be enclosed in
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
for `IPv6 host literals`_.
Examples::
server.org
server.org:1337
127.0.0.1:1337
[::1]:1337
[::1]
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
"""
port = default_port
# IPv6 addresses must be between '[]'
if socket_string.startswith('['):
match = IPV6_RE.match(socket_string)
if not match:
raise ValueError("Invalid IPv6 address: %s" % socket_string)
host = match.group('address')
port = match.group('port') or port
else:
if ':' in socket_string:
tokens = socket_string.split(':')
if len(tokens) > 2:
raise ValueError("IPv6 addresses must be between '[]'")
host, port = tokens
else:
host = socket_string
return (host, port)
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account]
if container:
paths.append(container)
if object:
paths.append(object)
if raw_digest:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).digest()
else:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).hexdigest()
@contextmanager
def lock_path(directory, timeout=10, timeout_class=None):
"""
Context manager that acquires a lock on a directory. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
For locking exclusively, file or directory has to be opened in Write mode.
Python doesn't allow directories to be opened in Write Mode. So we
workaround by locking a hidden file in the directory.
:param directory: directory to be locked
:param timeout: timeout (in seconds)
:param timeout_class: The class of the exception to raise if the
lock cannot be granted within the timeout. Will be
constructed as timeout_class(timeout, lockpath). Default:
LockTimeout
"""
if timeout_class is None:
timeout_class = swift.common.exceptions.LockTimeout
mkdirs(directory)
lockpath = '%s/.lock' % directory
fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT)
sleep_time = 0.01
slower_sleep_time = max(timeout * 0.01, sleep_time)
slowdown_at = timeout * 0.01
time_slept = 0
try:
with timeout_class(timeout, lockpath):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
if time_slept > slowdown_at:
sleep_time = slower_sleep_time
sleep(sleep_time)
time_slept += sleep_time
yield True
finally:
os.close(fd)
@contextmanager
def lock_file(filename, timeout=10, append=False, unlink=True):
"""
Context manager that acquires a lock on a file. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
:param filename: file to be locked
:param timeout: timeout (in seconds)
:param append: True if file should be opened in append mode
:param unlink: True if the file should be unlinked at the end
"""
flags = os.O_CREAT | os.O_RDWR
if append:
flags |= os.O_APPEND
mode = 'a+'
else:
mode = 'r+'
while True:
fd = os.open(filename, flags)
file_obj = os.fdopen(fd, mode)
try:
with swift.common.exceptions.LockTimeout(timeout, filename):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
sleep(0.01)
try:
if os.stat(filename).st_ino != os.fstat(fd).st_ino:
continue
except OSError as err:
if err.errno == errno.ENOENT:
continue
raise
yield file_obj
if unlink:
os.unlink(filename)
break
finally:
file_obj.close()
def lock_parent_directory(filename, timeout=10):
"""
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds)
"""
return lock_path(os.path.dirname(filename), timeout=timeout)
def get_time_units(time_amount):
"""
Get a nomralized length of time in the largest unit of time (hours,
minutes, or seconds.)
:param time_amount: length of time in seconds
:returns: A touple of (length of time, unit of time) where unit of time is
one of ('h', 'm', 's')
"""
time_unit = 's'
if time_amount > 60:
time_amount /= 60
time_unit = 'm'
if time_amount > 60:
time_amount /= 60
time_unit = 'h'
return time_amount, time_unit
def compute_eta(start_time, current_value, final_value):
"""
Compute an ETA. Now only if we could also have a progress bar...
:param start_time: Unix timestamp when the operation began
:param current_value: Current value
:param final_value: Final value
:returns: ETA as a tuple of (length of time, unit of time) where unit of
time is one of ('h', 'm', 's')
"""
elapsed = time.time() - start_time
completion = (float(current_value) / final_value) or 0.00001
return get_time_units(1.0 / completion * elapsed - elapsed)
def unlink_older_than(path, mtime):
"""
Remove any file in a given path that that was last modified before mtime.
:param path: path to remove file from
:param mtime: timestamp of oldest file to keep
"""
filepaths = map(functools.partial(os.path.join, path), listdir(path))
return unlink_paths_older_than(filepaths, mtime)
def unlink_paths_older_than(filepaths, mtime):
"""
Remove any files from the given list that that were
last modified before mtime.
:param filepaths: a list of strings, the full paths of files to check
:param mtime: timestamp of oldest file to keep
"""
for fpath in filepaths:
try:
if os.path.getmtime(fpath) < mtime:
os.unlink(fpath)
except OSError:
pass
def item_from_env(env, item_name, allow_none=False):
"""
Get a value from the wsgi environment
:param env: wsgi environment dict
:param item_name: name of item to get
:returns: the value from the environment
"""
item = env.get(item_name, None)
if item is None and not allow_none:
logging.error("ERROR: %s could not be found in env!", item_name)
return item
def cache_from_env(env, allow_none=False):
"""
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
:param env: wsgi environment dict
:returns: swift.common.memcached.MemcacheRing from environment
"""
return item_from_env(env, 'swift.cache', allow_none)
def read_conf_dir(parser, conf_dir):
conf_files = []
for f in os.listdir(conf_dir):
if f.endswith('.conf') and not f.startswith('.'):
conf_files.append(os.path.join(conf_dir, f))
return parser.read(sorted(conf_files))
def readconf(conf_path, section_name=None, log_name=None, defaults=None,
raw=False):
"""
Read config file(s) and return config items as a dict
:param conf_path: path to config file/directory, or a file-like object
(hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
not defined)
:param defaults: dict of default values to pre-populate the config with
:returns: dict of config items
"""
if defaults is None:
defaults = {}
if raw:
c = RawConfigParser(defaults)
else:
c = ConfigParser(defaults)
if hasattr(conf_path, 'readline'):
c.readfp(conf_path)
else:
if os.path.isdir(conf_path):
# read all configs in directory
success = read_conf_dir(c, conf_path)
else:
success = c.read(conf_path)
if not success:
print(_("Unable to read config from %s") % conf_path)
sys.exit(1)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
print(_("Unable to find %(section)s config section in %(conf)s") %
{'section': section_name, 'conf': conf_path})
sys.exit(1)
if "log_name" not in conf:
if log_name is not None:
conf['log_name'] = log_name
else:
conf['log_name'] = section_name
else:
conf = {}
for s in c.sections():
conf.update({s: dict(c.items(s))})
if 'log_name' not in conf:
conf['log_name'] = log_name
conf['__file__'] = conf_path
return conf
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
"""
Ensure that a pickle file gets written to disk. The file
is first written to a tmp location, ensure it is synced to disk, then
perform a move to its final location
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
"""
if tmp is None:
tmp = os.path.dirname(dest)
fd, tmppath = mkstemp(dir=tmp, suffix='.tmp')
with os.fdopen(fd, 'wb') as fo:
pickle.dump(obj, fo, pickle_protocol)
fo.flush()
os.fsync(fd)
renamer(tmppath, dest)
def search_tree(root, glob_match, ext='', exts=None, dir_ext=None):
"""Look in root, for any files/dirs matching glob, recursively traversing
any found directories looking for files ending with ext
:param root: start of search path
:param glob_match: glob to match in root, matching dirs are traversed with
os.walk
:param ext: only files that end in ext will be returned
:param exts: a list of file extensions; only files that end in one of these
extensions will be returned; if set this list overrides any
extension specified using the 'ext' param.
:param dir_ext: if present directories that end with dir_ext will not be
traversed and instead will be returned as a matched path
:returns: list of full paths to matching files, sorted
"""
exts = exts or [ext]
found_files = []
for path in glob.glob(os.path.join(root, glob_match)):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if dir_ext and root.endswith(dir_ext):
found_files.append(root)
# the root is a config dir, descend no further
break
for file_ in files:
if any(exts) and not any(file_.endswith(e) for e in exts):
continue
found_files.append(os.path.join(root, file_))
found_dir = False
for dir_ in dirs:
if dir_ext and dir_.endswith(dir_ext):
found_dir = True
found_files.append(os.path.join(root, dir_))
if found_dir:
# do not descend further into matching directories
break
else:
if ext and not path.endswith(ext):
continue
found_files.append(path)
return sorted(found_files)
def write_file(path, contents):
"""Write contents to file at path
:param path: any path, subdirs will be created as needed
:param contents: data to write to file, will be converted to string
"""
dirname, name = os.path.split(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as err:
if err.errno == errno.EACCES:
sys.exit('Unable to create %s. Running as '
'non-root?' % dirname)
with open(path, 'w') as f:
f.write('%s' % contents)
def remove_file(path):
"""Quiet wrapper for os.unlink, OSErrors are suppressed
:param path: first and only argument passed to os.unlink
"""
try:
os.unlink(path)
except OSError:
pass
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None):
'''
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
'''
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
for device in device_dir:
if mount_check and not ismount(os.path.join(devices, device)):
if logger:
logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, datadir)
try:
partitions = listdir(datadir_path)
except OSError as e:
if logger:
logger.warning('Skipping %s because %s', datadir_path, e)
continue
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
'''
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
maximum recommended rate should not exceed (1000 * incr_by) a second
as eventlet.sleep() does involve some overhead. Returns running_time
that should be used for subsequent calls.
:param running_time: the running time in milliseconds of the next
allowable request. Best to start at zero.
:param max_rate: The maximum rate per second allowed for the process.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy. Must be > 0 to engage
rate-limiting behavior.
'''
if max_rate <= 0 or incr_by <= 0:
return running_time
# 1,000 milliseconds = 1 second
clock_accuracy = 1000.0
# Convert seconds to milliseconds
now = time.time() * clock_accuracy
# Calculate time per request in milliseconds
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
# Convert rate_buffer to milliseconds and compare
if now - running_time > rate_buffer * clock_accuracy:
running_time = now
elif running_time - now > time_per_request:
# Convert diff back to a floating point number of seconds and sleep
eventlet.sleep((running_time - now) / clock_accuracy)
# Return the absolute time for the next interval in milliseconds; note
# that time could have passed well beyond that point, but the next call
# will catch that and skip the sleep.
return running_time + time_per_request
class ContextPool(GreenPool):
"GreenPool subclassed to kill its coros when it gets gc'ed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for coro in list(self.coroutines_running):
coro.kill()
class GreenAsyncPileWaitallTimeout(Timeout):
pass
class GreenAsyncPile(object):
"""
Runs jobs in a pool of green threads, and the results can be retrieved by
using this object as an iterator.
This is very similar in principle to eventlet.GreenPile, except it returns
results as they become available rather than in the order they were
launched.
Correlating results with jobs (if necessary) is left to the caller.
"""
def __init__(self, size_or_pool):
"""
:param size_or_pool: thread pool size or a pool to use
"""
if isinstance(size_or_pool, GreenPool):
self._pool = size_or_pool
size = self._pool.size
else:
self._pool = GreenPool(size_or_pool)
size = size_or_pool
self._responses = eventlet.queue.LightQueue(size)
self._inflight = 0
self._pending = 0
def _run_func(self, func, args, kwargs):
try:
self._responses.put(func(*args, **kwargs))
finally:
self._inflight -= 1
@property
def inflight(self):
return self._inflight
def spawn(self, func, *args, **kwargs):
"""
Spawn a job in a green thread on the pile.
"""
self._pending += 1
self._inflight += 1
self._pool.spawn(self._run_func, func, args, kwargs)
def waitfirst(self, timeout):
"""
Wait up to timeout seconds for first result to come in.
:param timeout: seconds to wait for results
:returns: first item to come back, or None
"""
for result in self._wait(timeout, first_n=1):
return result
def waitall(self, timeout):
"""
Wait timeout seconds for any results to come in.
:param timeout: seconds to wait for results
:returns: list of results accrued in that time
"""
return self._wait(timeout)
def _wait(self, timeout, first_n=None):
results = []
try:
with GreenAsyncPileWaitallTimeout(timeout):
while True:
results.append(next(self))
if first_n and len(results) >= first_n:
break
except (GreenAsyncPileWaitallTimeout, StopIteration):
pass
return results
def __iter__(self):
return self
def next(self):
try:
rv = self._responses.get_nowait()
except eventlet.queue.Empty:
if self._inflight == 0:
raise StopIteration()
rv = self._responses.get()
self._pending -= 1
return rv
__next__ = next
class ModifiedParseResult(ParseResult):
"Parse results class for urlparse."
@property
def hostname(self):
netloc = self.netloc.split('@', 1)[-1]
if netloc.startswith('['):
return netloc[1:].split(']')[0]
elif ':' in netloc:
return netloc.rsplit(':')[0]
return netloc
@property
def port(self):
netloc = self.netloc.split('@', 1)[-1]
if netloc.startswith('['):
netloc = netloc.rsplit(']')[1]
if ':' in netloc:
return int(netloc.rsplit(':')[1])
return None
def urlparse(url):
"""
urlparse augmentation.
This is necessary because urlparse can't handle RFC 2732 URLs.
:param url: URL to parse.
"""
return ModifiedParseResult(*stdlib_urlparse(url))
def validate_sync_to(value, allowed_sync_hosts, realms_conf):
"""
Validates an X-Container-Sync-To header value, returning the
validated endpoint, realm, and realm_key, or an error string.
:param value: The X-Container-Sync-To header value to validate.
:param allowed_sync_hosts: A list of allowed hosts in endpoints,
if realms_conf does not apply.
:param realms_conf: A instance of
swift.common.container_sync_realms.ContainerSyncRealms to
validate against.
:returns: A tuple of (error_string, validated_endpoint, realm,
realm_key). The error_string will None if the rest of the
values have been validated. The validated_endpoint will be
the validated endpoint to sync to. The realm and realm_key
will be set if validation was done through realms_conf.
"""
orig_value = value
value = value.rstrip('/')
if not value:
return (None, None, None, None)
if value.startswith('//'):
if not realms_conf:
return (None, None, None, None)
data = value[2:].split('/')
if len(data) != 4:
return (
_('Invalid X-Container-Sync-To format %r') % orig_value,
None, None, None)
realm, cluster, account, container = data
realm_key = realms_conf.key(realm)
if not realm_key:
return (_('No realm key for %r') % realm, None, None, None)
endpoint = realms_conf.endpoint(realm, cluster)
if not endpoint:
return (
_('No cluster endpoint for %r %r') % (realm, cluster),
None, None, None)
return (
None,
'%s/%s/%s' % (endpoint.rstrip('/'), account, container),
realm.upper(), realm_key)
p = urlparse(value)
if p.scheme not in ('http', 'https'):
return (
_('Invalid scheme %r in X-Container-Sync-To, must be "//", '
'"http", or "https".') % p.scheme,
None, None, None)
if not p.path:
return (_('Path required in X-Container-Sync-To'), None, None, None)
if p.params or p.query or p.fragment:
return (
_('Params, queries, and fragments not allowed in '
'X-Container-Sync-To'),
None, None, None)
if p.hostname not in allowed_sync_hosts:
return (
_('Invalid host %r in X-Container-Sync-To') % p.hostname,
None, None, None)
return (None, value, None, None)
def affinity_key_function(affinity_str):
"""Turns an affinity config value into a function suitable for passing to
sort(). After doing so, the array will be sorted with respect to the given
ordering.
For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
will be sorted with all nodes from region 1 (r1=1) first, then all the
nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
else.
Note that the order of the pieces of affinity_str is irrelevant; the
priority values are what comes after the equals sign.
If affinity_str is empty or all whitespace, then the resulting function
will not alter the ordering of the nodes.
:param affinity_str: affinity config value, e.g. "r1z2=3"
or "r1=1, r2z1=2, r2z2=2"
:returns: single-argument function
:raises: ValueError if argument invalid
"""
affinity_str = affinity_str.strip()
if not affinity_str:
return lambda x: 0
priority_matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number>=<number> or r<number>z<number>=<number>
match = re.match("r(\d+)(?:z(\d+))?=(\d+)$", piece)
if match:
region, zone, priority = match.groups()
region = int(region)
priority = int(priority)
zone = int(zone) if zone else None
matcher = {'region': region, 'priority': priority}
if zone is not None:
matcher['zone'] = zone
priority_matchers.append(matcher)
else:
raise ValueError("Invalid affinity value: %r" % affinity_str)
priority_matchers.sort(key=operator.itemgetter('priority'))
def keyfn(ring_node):
for matcher in priority_matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return matcher['priority']
return 4294967296 # 2^32, i.e. "a big number"
return keyfn
def affinity_locality_predicate(write_affinity_str):
"""
Turns a write-affinity config value into a predicate function for nodes.
The returned value will be a 1-arg function that takes a node dictionary
and returns a true value if it is "local" and a false value otherwise. The
definition of "local" comes from the affinity_str argument passed in here.
For example, if affinity_str is "r1, r2z2", then only nodes where region=1
or where (region=2 and zone=2) are considered local.
If affinity_str is empty or all whitespace, then the resulting function
will consider everything local
:param affinity_str: affinity config value, e.g. "r1z2"
or "r1, r2z1, r2z2"
:returns: single-argument function, or None if affinity_str is empty
:raises: ValueError if argument invalid
"""
affinity_str = write_affinity_str.strip()
if not affinity_str:
return None
matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number> or r<number>z<number>
match = re.match("r(\d+)(?:z(\d+))?$", piece)
if match:
region, zone = match.groups()
region = int(region)
zone = int(zone) if zone else None
matcher = {'region': region}
if zone is not None:
matcher['zone'] = zone
matchers.append(matcher)
else:
raise ValueError("Invalid write-affinity value: %r" % affinity_str)
def is_local(ring_node):
for matcher in matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return True
return False
return is_local
def get_remote_client(req):
# remote host for zeus
client = req.headers.get('x-cluster-client-ip')
if not client and 'x-forwarded-for' in req.headers:
# remote host for other lbs
client = req.headers['x-forwarded-for'].split(',')[0].strip()
if not client:
client = req.remote_addr
return client
def human_readable(value):
"""
Returns the number in a human readable format; for example 1048576 = "1Mi".
"""
value = float(value)
index = -1
suffixes = 'KMGTPEZY'
while value >= 1024 and index + 1 < len(suffixes):
index += 1
value = round(value / 1024)
if index == -1:
return '%d' % value
return '%d%si' % (round(value), suffixes[index])
def put_recon_cache_entry(cache_entry, key, item):
"""
Function that will check if item is a dict, and if so put it under
cache_entry[key]. We use nested recon cache entries when the object
auditor runs in parallel or else in 'once' mode with a specified
subset of devices.
"""
if isinstance(item, dict):
if key not in cache_entry or key in cache_entry and not \
isinstance(cache_entry[key], dict):
cache_entry[key] = {}
elif key in cache_entry and item == {}:
cache_entry.pop(key, None)
return
for k, v in item.items():
if v == {}:
cache_entry[key].pop(k, None)
else:
cache_entry[key][k] = v
else:
cache_entry[key] = item
def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2):
"""Update recon cache values
:param cache_dict: Dictionary of cache key/value pairs to write out
:param cache_file: cache file to update
:param logger: the logger to use to log an encountered error
:param lock_timeout: timeout (in seconds)
"""
try:
with lock_file(cache_file, lock_timeout, unlink=False) as cf:
cache_entry = {}
try:
existing_entry = cf.readline()
if existing_entry:
cache_entry = json.loads(existing_entry)
except ValueError:
# file doesn't have a valid entry, we'll recreate it
pass
for cache_key, cache_value in cache_dict.items():
put_recon_cache_entry(cache_entry, cache_key, cache_value)
tf = None
try:
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
delete=False) as tf:
tf.write(json.dumps(cache_entry) + '\n')
renamer(tf.name, cache_file, fsync=False)
finally:
if tf is not None:
try:
os.unlink(tf.name)
except OSError as err:
if err.errno != errno.ENOENT:
raise
except (Exception, Timeout):
logger.exception(_('Exception dumping recon cache'))
def listdir(path):
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
return []
def streq_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def pairs(item_list):
"""
Returns an iterator of all pairs of elements from item_list.
:param items: items (no duplicates allowed)
"""
for i, item1 in enumerate(item_list):
for item2 in item_list[(i + 1):]:
yield (item1, item2)
def replication(func):
"""
Decorator to declare which methods are accessible for different
type of servers:
* If option replication_server is None then this decorator
doesn't matter.
* If option replication_server is True then ONLY decorated with
this decorator methods will be started.
* If option replication_server is False then decorated with this
decorator methods will NOT be started.
:param func: function to mark accessible for replication
"""
func.replication = True
return func
def public(func):
"""
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public
"""
func.publicly_accessible = True
return func
def quorum_size(n):
"""
quorum size as it applies to services that use 'replication' for data
integrity (Account/Container services). Object quorum_size is defined
on a storage policy basis.
Number of successful backend requests needed for the proxy to consider
the client request successful.
"""
return (n // 2) + 1
def rsync_ip(ip):
"""
Transform ip string to an rsync-compatible form
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
:param ip: an ip string (ipv4 or ipv6)
:returns: a string ip address
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # it's IPv4
return ip
else:
return '[%s]' % ip
def rsync_module_interpolation(template, device):
"""
Interpolate devices variables inside a rsync module template
:param template: rsync module template as a string
:param device: a device from a ring
:returns: a string with all variables replaced by device attributes
"""
replacements = {
'ip': rsync_ip(device.get('ip', '')),
'port': device.get('port', ''),
'replication_ip': rsync_ip(device.get('replication_ip', '')),
'replication_port': device.get('replication_port', ''),
'region': device.get('region', ''),
'zone': device.get('zone', ''),
'device': device.get('device', ''),
'meta': device.get('meta', ''),
}
try:
module = template.format(**replacements)
except KeyError as e:
raise ValueError('Cannot interpolate rsync_module, invalid variable: '
'%s' % e)
return module
def get_valid_utf8_str(str_or_unicode):
"""
Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
:param str_or_unicode: a string or an unicode which can be invalid utf-8
"""
if isinstance(str_or_unicode, six.text_type):
(str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace')
(valid_utf8_str, _len) = utf8_decoder(str_or_unicode, 'replace')
return valid_utf8_str.encode('utf-8')
def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
separated values.
"""
if comma_separated_str:
return [v.strip() for v in comma_separated_str.split(',') if v.strip()]
return []
def csv_append(csv_string, item):
"""
Appends an item to a comma-separated string.
If the comma-separated string is empty/None, just returns item.
"""
if csv_string:
return ",".join((csv_string, item))
else:
return item
class CloseableChain(object):
"""
Like itertools.chain, but with a close method that will attempt to invoke
its sub-iterators' close methods, if any.
"""
def __init__(self, *iterables):
self.iterables = iterables
def __iter__(self):
return iter(itertools.chain(*(self.iterables)))
def close(self):
for it in self.iterables:
close_method = getattr(it, 'close', None)
if close_method:
close_method()
def reiterate(iterable):
"""
Consume the first item from an iterator, then re-chain it to the rest of
the iterator. This is useful when you want to make sure the prologue to
downstream generators have been executed before continuing.
:param iterable: an iterable object
"""
if isinstance(iterable, (list, tuple)):
return iterable
else:
iterator = iter(iterable)
try:
chunk = ''
while not chunk:
chunk = next(iterator)
return CloseableChain([chunk], iterator)
except StopIteration:
return []
class InputProxy(object):
"""
File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
"""
def __init__(self, wsgi_input):
"""
:param wsgi_input: file-like object to wrap the functionality of
"""
self.wsgi_input = wsgi_input
self.bytes_received = 0
self.client_disconnect = False
def read(self, *args, **kwargs):
"""
Pass read request to the underlying file-like object and
add bytes read to total.
"""
try:
chunk = self.wsgi_input.read(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(chunk)
return chunk
def readline(self, *args, **kwargs):
"""
Pass readline request to the underlying file-like object and
add bytes read to total.
"""
try:
line = self.wsgi_input.readline(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(line)
return line
class LRUCache(object):
"""
Decorator for size/time bound memoization that evicts the least
recently used members.
"""
PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields
def __init__(self, maxsize=1000, maxtime=3600):
self.maxsize = maxsize
self.maxtime = maxtime
self.reset()
def reset(self):
self.mapping = {}
self.head = [None, None, None, None, None] # oldest
self.tail = [self.head, None, None, None, None] # newest
self.head[self.NEXT] = self.tail
def set_cache(self, value, *key):
while len(self.mapping) >= self.maxsize:
old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2]
self.head[self.NEXT], old_next[self.PREV] = old_next, self.head
del self.mapping[old_key]
last = self.tail[self.PREV]
link = [last, self.tail, key, time.time(), value]
self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link
return value
def get_cached(self, link, *key):
link_prev, link_next, key, cached_at, value = link
if cached_at + self.maxtime < time.time():
raise KeyError('%r has timed out' % (key,))
link_prev[self.NEXT] = link_next
link_next[self.PREV] = link_prev
last = self.tail[self.PREV]
last[self.NEXT] = self.tail[self.PREV] = link
link[self.PREV] = last
link[self.NEXT] = self.tail
return value
def __call__(self, f):
class LRUCacheWrapped(object):
@functools.wraps(f)
def __call__(im_self, *key):
link = self.mapping.get(key, self.head)
if link is not self.head:
try:
return self.get_cached(link, *key)
except KeyError:
pass
value = f(*key)
self.set_cache(value, *key)
return value
def size(im_self):
"""
Return the size of the cache
"""
return len(self.mapping)
def reset(im_self):
return self.reset()
def get_maxsize(im_self):
return self.maxsize
def set_maxsize(im_self, i):
self.maxsize = i
def get_maxtime(im_self):
return self.maxtime
def set_maxtime(im_self, i):
self.maxtime = i
maxsize = property(get_maxsize, set_maxsize)
maxtime = property(get_maxtime, set_maxtime)
def __repr__(im_self):
return '<%s %r>' % (im_self.__class__.__name__, f)
return LRUCacheWrapped()
def tpool_reraise(func, *args, **kwargs):
"""
Hack to work around Eventlet's tpool not catching and reraising Timeouts.
"""
def inner():
try:
return func(*args, **kwargs)
except BaseException as err:
return err
resp = tpool.execute(inner)
if isinstance(resp, BaseException):
raise resp
return resp
class ThreadPool(object):
"""
Perform blocking operations in background threads.
Call its methods from within greenlets to green-wait for results without
blocking the eventlet reactor (hopefully).
"""
BYTE = 'a'.encode('utf-8')
def __init__(self, nthreads=2):
self.nthreads = nthreads
self._run_queue = stdlib_queue.Queue()
self._result_queue = stdlib_queue.Queue()
self._threads = []
self._alive = True
if nthreads <= 0:
return
# We spawn a greenthread whose job it is to pull results from the
# worker threads via a real Queue and send them to eventlet Events so
# that the calling greenthreads can be awoken.
#
# Since each OS thread has its own collection of greenthreads, it
# doesn't work to have the worker thread send stuff to the event, as
# it then notifies its own thread-local eventlet hub to wake up, which
# doesn't do anything to help out the actual calling greenthread over
# in the main thread.
#
# Thus, each worker sticks its results into a result queue and then
# writes a byte to a pipe, signaling the result-consuming greenlet (in
# the main thread) to wake up and consume results.
#
# This is all stuff that eventlet.tpool does, but that code can't have
# multiple instances instantiated. Since the object server uses one
# pool per disk, we have to reimplement this stuff.
_raw_rpipe, self.wpipe = os.pipe()
self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb')
for _junk in range(nthreads):
thr = stdlib_threading.Thread(
target=self._worker,
args=(self._run_queue, self._result_queue))
thr.daemon = True
thr.start()
self._threads.append(thr)
# This is the result-consuming greenthread that runs in the main OS
# thread, as described above.
self._consumer_coro = greenthread.spawn_n(self._consume_results,
self._result_queue)
def _worker(self, work_queue, result_queue):
"""
Pulls an item from the queue and runs it, then puts the result into
the result queue. Repeats forever.
:param work_queue: queue from which to pull work
:param result_queue: queue into which to place results
"""
while True:
item = work_queue.get()
if item is None:
break
ev, func, args, kwargs = item
try:
result = func(*args, **kwargs)
result_queue.put((ev, True, result))
except BaseException:
result_queue.put((ev, False, sys.exc_info()))
finally:
work_queue.task_done()
os.write(self.wpipe, self.BYTE)
def _consume_results(self, queue):
"""
Runs as a greenthread in the same OS thread as callers of
run_in_thread().
Takes results from the worker OS threads and sends them to the waiting
greenthreads.
"""
while True:
try:
self.rpipe.read(1)
except ValueError:
# can happen at process shutdown when pipe is closed
break
while True:
try:
ev, success, result = queue.get(block=False)
except stdlib_queue.Empty:
break
try:
if success:
ev.send(result)
else:
ev.send_exception(*result)
finally:
queue.task_done()
def run_in_thread(self, func, *args, **kwargs):
"""
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
until results are available.
Exceptions thrown will be reraised in the calling thread.
If the threadpool was initialized with nthreads=0, it invokes
``func(*args, **kwargs)`` directly, followed by eventlet.sleep() to
ensure the eventlet hub has a chance to execute. It is more likely the
hub will be invoked when queuing operations to an external thread.
:returns: result of calling func
:raises: whatever func raises
"""
if not self._alive:
raise swift.common.exceptions.ThreadPoolDead()
if self.nthreads <= 0:
result = func(*args, **kwargs)
sleep()
return result
ev = event.Event()
self._run_queue.put((ev, func, args, kwargs), block=False)
# blocks this greenlet (and only *this* greenlet) until the real
# thread calls ev.send().
result = ev.wait()
return result
def _run_in_eventlet_tpool(self, func, *args, **kwargs):
"""
Really run something in an external thread, even if we haven't got any
threads of our own.
"""
def inner():
try:
return (True, func(*args, **kwargs))
except (Timeout, BaseException) as err:
return (False, err)
success, result = tpool.execute(inner)
if success:
return result
else:
raise result
def force_run_in_thread(self, func, *args, **kwargs):
"""
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
until results are available.
Exceptions thrown will be reraised in the calling thread.
If the threadpool was initialized with nthreads=0, uses eventlet.tpool
to run the function. This is in contrast to run_in_thread(), which
will (in that case) simply execute func in the calling thread.
:returns: result of calling func
:raises: whatever func raises
"""
if not self._alive:
raise swift.common.exceptions.ThreadPoolDead()
if self.nthreads <= 0:
return self._run_in_eventlet_tpool(func, *args, **kwargs)
else:
return self.run_in_thread(func, *args, **kwargs)
def terminate(self):
"""
Releases the threadpool's resources (OS threads, greenthreads, pipes,
etc.) and renders it unusable.
Don't call run_in_thread() or force_run_in_thread() after calling
terminate().
"""
self._alive = False
if self.nthreads <= 0:
return
for _junk in range(self.nthreads):
self._run_queue.put(None)
for thr in self._threads:
thr.join()
self._threads = []
self.nthreads = 0
greenthread.kill(self._consumer_coro)
self.rpipe.close()
os.close(self.wpipe)
def ismount(path):
"""
Test whether a path is a mount point. This will catch any
exceptions and translate them into a False return value
Use ismount_raw to have the exceptions raised instead.
"""
try:
return ismount_raw(path)
except OSError:
return False
def ismount_raw(path):
"""
Test whether a path is a mount point. Whereas ismount will catch
any exceptions and just return False, this raw version will not
catch exceptions.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# A symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
return False
def close_if_possible(maybe_closable):
close_method = getattr(maybe_closable, 'close', None)
if callable(close_method):
return close_method()
@contextmanager
def closing_if_possible(maybe_closable):
"""
Like contextlib.closing(), but doesn't crash if the object lacks a close()
method.
PEP 333 (WSGI) says: "If the iterable returned by the application has a
close() method, the server or gateway must call that method upon
completion of the current request[.]" This function makes that easier.
"""
try:
yield maybe_closable
finally:
close_if_possible(maybe_closable)
_rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
_rfc_extension_pattern = re.compile(
r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token +
r'|"(?:[^"\\]|\\.)*"))?)')
_content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
def parse_content_range(content_range):
"""
Parse a content-range header into (first_byte, last_byte, total_size).
See RFC 7233 section 4.2 for details on the header format, but it's
basically "Content-Range: bytes ${start}-${end}/${total}".
:param content_range: Content-Range header value to parse,
e.g. "bytes 100-1249/49004"
:returns: 3-tuple (start, end, total)
:raises: ValueError if malformed
"""
found = re.search(_content_range_pattern, content_range)
if not found:
raise ValueError("malformed Content-Range %r" % (content_range,))
return tuple(int(x) for x in found.groups())
def parse_content_type(content_type):
"""
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples)
"""
parm_list = []
if ';' in content_type:
content_type, parms = content_type.split(';', 1)
parms = ';' + parms
for m in _rfc_extension_pattern.findall(parms):
key = m[0].strip()
value = m[1].strip()
parm_list.append((key, value))
return content_type, parm_list
def extract_swift_bytes(content_type):
"""
Parse a content-type and return a tuple containing:
- the content_type string minus any swift_bytes param,
- the swift_bytes value or None if the param was not found
:param content_type: a content-type string
:return: a tuple of (content-type, swift_bytes or None)
"""
content_type, params = parse_content_type(content_type)
swift_bytes = None
for k, v in params:
if k == 'swift_bytes':
swift_bytes = v
else:
content_type += ';%s=%s' % (k, v)
return content_type, swift_bytes
def override_bytes_from_content_type(listing_dict, logger=None):
"""
Takes a dict from a container listing and overrides the content_type,
bytes fields if swift_bytes is set.
"""
content_type, params = parse_content_type(listing_dict['content_type'])
for key, value in params:
if key == 'swift_bytes':
try:
listing_dict['bytes'] = int(value)
except ValueError:
if logger:
logger.exception("Invalid swift_bytes")
else:
content_type += ';%s=%s' % (key, value)
listing_dict['content_type'] = content_type
def clean_content_type(value):
if ';' in value:
left, right = value.rsplit(';', 1)
if right.lstrip().startswith('swift_bytes='):
return left
return value
def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf-8 strings before quoting
"""
return _quote(get_valid_utf8_str(value), safe)
def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
"""
Returns a expiring object container name for given X-Delete-At and
a/c/o.
"""
shard_int = int(hash_path(acc, cont, obj), 16) % 100
return normalize_delete_at_timestamp(
int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int)
class _MultipartMimeFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size):
self.no_more_data_for_this_file = False
self.no_more_files = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
self.read_chunk_size = read_chunk_size
def read(self, length=None):
if not length:
length = self.read_chunk_size
if self.no_more_data_for_this_file:
return b''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
try:
chunk = self.wsgi_input.read(to_read)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_files = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_files = self.input_buffer.startswith(b'--')
self.no_more_data_for_this_file = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_file:
return b''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
try:
chunk = self.wsgi_input.read(self.read_chunk_size)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
self.input_buffer += chunk
newline_pos = self.input_buffer.find(b'\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_files = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = b''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096):
"""
Given a multi-part-mime-encoded input file object and boundary,
yield file-like objects for each part. Note that this does not
split each part into headers and body; the caller is responsible
for doing that if necessary.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like
objects on.
:returns: A generator of file-like objects for each part.
:raises: MimeInvalid if the document is malformed
"""
boundary = '--' + boundary
blen = len(boundary) + 2 # \r\n
try:
got = wsgi_input.readline(blen)
while got == '\r\n':
got = wsgi_input.readline(blen)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
if got.strip() != boundary:
raise swift.common.exceptions.MimeInvalid(
'invalid starting boundary: wanted %r, got %r', (boundary, got))
boundary = '\r\n' + boundary
input_buffer = ''
done = False
while not done:
it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer,
read_chunk_size)
yield it
done = it.no_more_files
input_buffer = it.input_buffer
def parse_mime_headers(doc_file):
"""
Takes a file-like object containing a MIME document and returns a
HeaderKeyDict containing the headers. The body of the message is not
consumed: the position in doc_file is left at the beginning of the body.
This function was inspired by the Python standard library's
http.client.parse_headers.
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers
"""
headers = []
while True:
line = doc_file.readline()
done = line in (b'\r\n', b'\n', b'')
if six.PY3:
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
line = line.decode('latin1')
headers.append(line)
if done:
break
if six.PY3:
header_string = ''.join(headers)
else:
header_string = b''.join(headers)
headers = email.parser.Parser().parsestr(header_string)
return HeaderKeyDict(headers)
def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
"""
Takes a file-like object containing a multipart MIME document and
returns an iterator of (headers, body-file) tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
doc_files = iter_multipart_mime_documents(input_file, boundary,
read_chunk_size)
for i, doc_file in enumerate(doc_files):
# this consumes the headers and leaves just the body in doc_file
headers = parse_mime_headers(doc_file)
yield (headers, doc_file)
def maybe_multipart_byteranges_to_document_iters(app_iter, content_type):
"""
Takes an iterator that may or may not contain a multipart MIME document
as well as content type and returns an iterator of body iterators.
:param app_iter: iterator that may contain a multipart MIME document
:param content_type: content type of the app_iter, used to determine
whether it conains a multipart document and, if
so, what the boundary is between documents
"""
content_type, params_list = parse_content_type(content_type)
if content_type != 'multipart/byteranges':
yield app_iter
return
body_file = FileLikeIter(app_iter)
boundary = dict(params_list)['boundary']
for _headers, body in mime_to_document_iters(body_file, boundary):
yield (chunk for chunk in iter(lambda: body.read(65536), ''))
def document_iters_to_multipart_byteranges(ranges_iter, boundary):
"""
Takes an iterator of range iters and yields a multipart/byteranges MIME
document suitable for sending as the body of a multi-range 206 response.
See document_iters_to_http_response_body for parameter descriptions.
"""
divider = "--" + boundary + "\r\n"
terminator = "--" + boundary + "--"
for range_spec in ranges_iter:
start_byte = range_spec["start_byte"]
end_byte = range_spec["end_byte"]
entity_length = range_spec.get("entity_length", "*")
content_type = range_spec["content_type"]
part_iter = range_spec["part_iter"]
part_header = ''.join((
divider,
"Content-Type: ", str(content_type), "\r\n",
"Content-Range: ", "bytes %d-%d/%s\r\n" % (
start_byte, end_byte, entity_length),
"\r\n"
))
yield part_header
for chunk in part_iter:
yield chunk
yield "\r\n"
yield terminator
def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
logger):
"""
Takes an iterator of range iters and turns it into an appropriate
HTTP response body, whether that's multipart/byteranges or not.
This is almost, but not quite, the inverse of
request_helpers.http_response_to_document_iters(). This function only
yields chunks of the body, not any headers.
:param ranges_iter: an iterator of dictionaries, one per range.
Each dictionary must contain at least the following key:
"part_iter": iterator yielding the bytes in the range
Additionally, if multipart is True, then the following other keys
are required:
"start_byte": index of the first byte in the range
"end_byte": index of the last byte in the range
"content_type": value for the range's Content-Type header
Finally, there is one optional key that is used in the
multipart/byteranges case:
"entity_length": length of the requested entity (not necessarily
equal to the response length). If omitted, "*" will be used.
Each part_iter will be exhausted prior to calling next(ranges_iter).
:param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not
"--boundary").
:param multipart: True if the response should be multipart/byteranges,
False otherwise. This should be True if and only if you have 2 or
more ranges.
:param logger: a logger
"""
if multipart:
return document_iters_to_multipart_byteranges(ranges_iter, boundary)
else:
try:
response_body_iter = next(ranges_iter)['part_iter']
except StopIteration:
return ''
# We need to make sure ranges_iter does not get garbage-collected
# before response_body_iter is exhausted. The reason is that
# ranges_iter has a finally block that calls close_swift_conn, and
# so if that finally block fires before we read response_body_iter,
# there's nothing there.
def string_along(useful_iter, useless_iter_iter, logger):
for x in useful_iter:
yield x
try:
next(useless_iter_iter)
except StopIteration:
pass
else:
logger.warning(
"More than one part in a single-part response?")
return string_along(response_body_iter, ranges_iter, logger)
def multipart_byteranges_to_document_iters(input_file, boundary,
read_chunk_size=4096):
"""
Takes a file-like object containing a multipart/byteranges MIME document
(see RFC 7233, Appendix A) and returns an iterator of (first-byte,
last-byte, length, document-headers, body-file) 5-tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
for headers, body in mime_to_document_iters(input_file, boundary,
read_chunk_size):
first_byte, last_byte, length = parse_content_range(
headers.get('content-range'))
yield (first_byte, last_byte, length, headers.items(), body)
#: Regular expression to match form attributes.
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
def parse_content_disposition(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if ';' in header:
header, attrs = [x.strip() for x in header.split(';', 1)]
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class sockaddr_alg(ctypes.Structure):
_fields_ = [("salg_family", ctypes.c_ushort),
("salg_type", ctypes.c_ubyte * 14),
("salg_feat", ctypes.c_uint),
("salg_mask", ctypes.c_uint),
("salg_name", ctypes.c_ubyte * 64)]
_bound_md5_sockfd = None
def get_md5_socket():
"""
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
to the socket with os.write, then os.read the 16 bytes of the checksum out
later.
NOTE: It is the caller's responsibility to ensure that os.close() is
called on the returned file descriptor. This is a bare file descriptor,
not a Python object. It doesn't close itself.
"""
# Linux's AF_ALG sockets work like this:
#
# First, initialize a socket with socket() and bind(). This tells the
# socket what algorithm to use, as well as setting up any necessary bits
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
# algorithm name.
#
# Second, to hash some data, get a second socket by calling accept() on
# the first socket. Write data to the socket, then when finished, read the
# checksum from the socket and close it. This lets you checksum multiple
# things without repeating all the setup code each time.
#
# Since we only need to bind() one socket, we do that here and save it for
# future re-use. That way, we only use one file descriptor to get an MD5
# socket instead of two, and we also get to save some syscalls.
global _bound_md5_sockfd
global _libc_socket
global _libc_bind
global _libc_accept
if _libc_accept is None:
_libc_accept = load_libc_function('accept', fail_if_missing=True)
if _libc_socket is None:
_libc_socket = load_libc_function('socket', fail_if_missing=True)
if _libc_bind is None:
_libc_bind = load_libc_function('bind', fail_if_missing=True)
# Do this at first call rather than at import time so that we don't use a
# file descriptor on systems that aren't using any MD5 sockets.
if _bound_md5_sockfd is None:
sockaddr_setup = sockaddr_alg(
AF_ALG,
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
0, 0,
(ord('m'), ord('d'), ord('5'), 0))
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
ctypes.c_int(socket.SOCK_SEQPACKET),
ctypes.c_int(0))
if hash_sockfd < 0:
raise IOError(ctypes.get_errno(),
"Failed to initialize MD5 socket")
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
ctypes.pointer(sockaddr_setup),
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
if bind_result < 0:
os.close(hash_sockfd)
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
_bound_md5_sockfd = hash_sockfd
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
if md5_sockfd < 0:
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
return md5_sockfd
| {
"content_hash": "f9615c9724c9e0fb7310405f33cf96cd",
"timestamp": "",
"source": "github",
"line_count": 3989,
"max_line_length": 79,
"avg_line_length": 34.83253948357984,
"alnum_prop": 0.5964936270664354,
"repo_name": "aerwin3/swift",
"id": "fb4baa439652c0f21f55368957aa02368ff3319e",
"size": "139542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6931295"
},
{
"name": "Shell",
"bytes": "1497"
}
],
"symlink_target": ""
} |
import socket
import yaml
import pymongo
class Kwargs(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
self.__setattr__(key, val)
class Config(object):
def __init__(self, filename):
self.conf = yaml.load(open(filename))
self.mongo = Kwargs()
_mongo = self.conf.get('db', {}).get('mongo', {})
self.mongo = Kwargs(**{
hostname: Kwargs(**{
dbname: Kwargs(**{
collname: pymongo.MongoClient(host['address'],
connect=False)[db['name']][coll['name']]
for collname, coll in _mongo.get('collection', {}).items()
if coll['database'] == dbname
})
for dbname, db in _mongo.get('database', {}).items()
if db['host'] == hostname
})
for hostname, host in _mongo.get('host', {}).items()
})
def get_port(self):
if 'port' not in self.conf:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
self.conf['port'] = s.getsockname()[1]
s.close()
return self.conf['port']
| {
"content_hash": "6d72d7474f9713e291bb88464358b05b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 33.810810810810814,
"alnum_prop": 0.47162270183852917,
"repo_name": "factornado/coco.registry",
"id": "70544ce30c733e088abfc37a503ad7c921197e45",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7099"
},
{
"name": "Shell",
"bytes": "1862"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
class StackdioWarning(Warning):
pass
| {
"content_hash": "91ae0c60c38c356aa9beb78a846a062d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 16.6,
"alnum_prop": 0.7469879518072289,
"repo_name": "stackdio/stackdio",
"id": "7c471e66bb8580f8e2e86ced781696a29df57586",
"size": "694",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/core/warnings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'troup'
copyright = '2016, Pavle Jonoski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'troupdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'troup.tex', 'troup Documentation',
'Pavle Jonoski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'troup', 'troup Documentation',
['Pavle Jonoski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'troup', 'troup Documentation',
'Pavle Jonoski', 'troup', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "ce5e3727f9ee2c102d252b1db00b0174",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 31.414634146341463,
"alnum_prop": 0.7036749482401656,
"repo_name": "troup-system/troup",
"id": "eed8dc1663a4f62477e1f05e85d4cb8be66542ed",
"size": "8169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "112182"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import re
import os
def get_version():
fn = os.path.join(os.path.dirname(__file__), "eli5", "__init__.py")
with open(fn) as f:
return re.findall("__version__ = '([\d.\w]+)'", f.read())[0]
def get_long_description():
readme = open('README.rst').read()
changelog = open('CHANGES.rst').read()
return "\n\n".join([
readme,
changelog.replace(':func:', '').replace(':ref:', '')
])
setup(
name='eli5',
version=get_version(),
author='Mikhail Korobov, Konstantin Lopuhin',
author_email='[email protected], [email protected]',
license='MIT license',
long_description=get_long_description(),
description="Debug machine learning classifiers and explain their predictions",
url='https://github.com/TeamHG-Memex/eli5',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['tests']),
install_requires=[
'attrs > 16.0.0',
'jinja2',
'numpy >= 1.9.0',
'scipy',
'six',
'scikit-learn >= 0.18',
'graphviz',
'tabulate>=0.7.7',
],
extras_require={
":python_version<'3.5.6'": [
'singledispatch >= 3.4.0.3',
],
":python_version<'3.5'": ['typing'],
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| {
"content_hash": "8f97af70fa5fe1128da3d4bbe3fb336f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 30.661290322580644,
"alnum_prop": 0.563387690689111,
"repo_name": "TeamHG-Memex/eli5",
"id": "ec561515eff847663d900f9ad73185264c7ff3e1",
"size": "1923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8731"
},
{
"name": "Jupyter Notebook",
"bytes": "7973230"
},
{
"name": "Python",
"bytes": "503460"
},
{
"name": "Shell",
"bytes": "1302"
}
],
"symlink_target": ""
} |
import rospy, math, time, copy
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import sys, select, termios, tty, signal
msg = """
Reading from the keyboard to actuate individual joints
---------------------------
Initial configuration is 0 rads at each joint
To actuate specific joint pass the joint index, for example:
1
This increases the desired configuration by 0.1 rads for that particular joint
To switch between increase/decreasing joint angles use:
d for decreasing
i for increasing
CTRL-C to quit
"""
jointBindings = [
'1', # joint1
'2', # joint2
'3', # joint3
'4', # joint4
'5' ]
moveBindings = [
'd', # decrease angle
'i', # increase angle
]
class TimeoutException(Exception):
pass
def getKey():
def timeout_handler(signum, frame):
raise TimeoutException()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(1) #this is the watchdog timing
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
try:
key = sys.stdin.read(1)
#print "Read key"
except TimeoutException:
#print "Timeout"
return "-"
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
TOPIC_J_CMD = '/arm_1/arm_controller/command'
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
# pub = rospy.Publisher('cmd_vel', Twist)
pub = rospy.Publisher(TOPIC_J_CMD, JointTrajectory)
rospy.init_node('teleop_arm_keyboard')
dt=2
dtheta = 0.1
desired = [0,0,0,0,0]
mov_direction = 1
try:
print msg
while(1):
key = getKey()
if key in jointBindings:
jt = JointTrajectory()
# fill the header
jt.header.seq = 0
jt.header.stamp.secs = 0 #secs
jt.header.stamp.nsecs = 0 #nsecs
jt.header.frame_id = 'base_link'
# specify the joint names
jt.joint_names = ['arm_joint_1', 'arm_joint_2', 'arm_joint_3', 'arm_joint_4', 'arm_joint_5']
# joint points
jtp = JointTrajectoryPoint()
desired[jointBindings.index(key)] += mov_direction*dtheta
jtp.positions = desired
jtp.velocities = [0.0, 0.0, 0.0, 0.0, 0.0]
jtp.accelerations =[0.0, 0.0, 0.0, 0.0, 0.0]
jtp.time_from_start = rospy.Duration.from_sec(2*(0+1))
jt.points.append(copy.deepcopy(jtp))
pub.publish(jt)
print 'instructed desired joint configuration:'
print desired
elif key in moveBindings:
# decrease
if moveBindings.index(key)==0 :
mov_direction = -1
print 'now decreasing joint angles'
else:
mov_direction = 1
print 'now increasing joint angles'
except:
print "Unexpected error:", sys.exc_info()[0]
finally:
jt = JointTrajectory()
pub.publish(jt)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| {
"content_hash": "f8255c5fafa242bd61a6aa090e9bcac1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 96,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6496881496881497,
"repo_name": "ipab-rad/rad_youbot_stack",
"id": "a8e5089b983296154303d7d745514fe6cc9ee75d",
"size": "3094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youbot_driver_ros_interface/src/examples/youbot_keyboard_arm_teleop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2711"
},
{
"name": "C++",
"bytes": "388896"
},
{
"name": "CMake",
"bytes": "21523"
},
{
"name": "Makefile",
"bytes": "257"
},
{
"name": "Python",
"bytes": "12574"
},
{
"name": "Shell",
"bytes": "671"
}
],
"symlink_target": ""
} |
"""
Verifies that 'copies' with app bundles are handled correctly.
"""
import TestGyp
import os
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that the copy succeeded.
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/foo/Dependency Bundle.framework/Versions/A',
chdir='framework')
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Libraries/empty.c',
chdir='framework')
# Check that rebuilding the target a few times works.
dep_bundle = test.built_file_path('Dependency Bundle.framework',
chdir='framework')
mtime = os.path.getmtime(dep_bundle)
atime = os.path.getatime(dep_bundle)
for i in range(3):
os.utime(dep_bundle, (atime + i * 1000, mtime + i * 1000))
test.build('framework.gyp', 'copy_target', chdir='framework')
# Check that actions ran.
test.built_file_must_exist('action_file', chdir='framework')
test.pass_test()
| {
"content_hash": "30225fdb15303ff0f31bb805cb4fa011",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.6768,
"repo_name": "dtebbs/gyp",
"id": "a6e47ffd1b59006651548221040769b5be4597a0",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mac/gyptest-copies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "400"
},
{
"name": "C",
"bytes": "24222"
},
{
"name": "C++",
"bytes": "2188"
},
{
"name": "Objective-C",
"bytes": "1566"
},
{
"name": "Objective-C++",
"bytes": "1274"
},
{
"name": "Python",
"bytes": "1118819"
},
{
"name": "Shell",
"bytes": "4812"
}
],
"symlink_target": ""
} |
"""VTA RPC client function"""
import os
from .environment import get_env
from .bitstream import download_bitstream, get_bitstream_path
def reconfig_runtime(remote):
"""Reconfigure remote runtime based on current hardware spec.
Parameters
----------
remote : RPCSession
The TVM RPC session
"""
env = get_env()
freconfig = remote.get_function("tvm.contrib.vta.reconfig_runtime")
freconfig(env.pkg_config().cfg_json)
def program_fpga(remote, bitstream=None):
"""Upload and program bistream
Parameters
----------
remote : RPCSession
The TVM RPC session
bitstream : str, optional
Path to a local bistream file. If unset, tries to download from cache server.
"""
if bitstream:
assert os.path.isfile(bitstream)
else:
bitstream = get_bitstream_path()
if not os.path.isfile(bitstream):
download_bitstream()
fprogram = remote.get_function("tvm.contrib.vta.init")
remote.upload(bitstream)
fprogram(os.path.basename(bitstream))
| {
"content_hash": "b277efbe1db08decb7bd91fff6751ca8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 26.5,
"alnum_prop": 0.6584905660377358,
"repo_name": "mlperf/training_results_v0.7",
"id": "a5bafab498a524aa61a4aa928458a2b77e56f4e9",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/python/vta/rpc_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
from alembic import op
import sqlalchemy as sa
from neutron.plugins.cisco.common import cisco_constants
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
def upgrade():
op.create_table(
'cisco_policy_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_network_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('segment_type', segment_type, nullable=False),
sa.Column('sub_type', sa.String(length=255), nullable=True),
sa.Column('segment_range', sa.String(length=255), nullable=True),
sa.Column('multicast_ip_index', sa.Integer(), nullable=True,
server_default='0'),
sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
sa.Column('physical_network', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vxlan_allocations',
sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False,
server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vxlan_allocations_ibfk_1'),
sa.PrimaryKeyConstraint('vxlan_id'))
op.create_table(
'cisco_n1kv_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False, server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vlan_allocations_ibfk_1'))
op.create_table(
'cisco_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('credential_name'))
op.create_table(
'cisco_qos_policies',
sa.Column('qos_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('qos_name', sa.String(length=255), nullable=False),
sa.Column('qos_desc', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
op.create_table(
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False,
server_default=cisco_constants.TENANT_ID_NOT_SET),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
op.create_table(
'cisco_n1kv_vmnetworks',
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('port_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_policy_profiles.id'], ),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'cisco_n1kv_trunk_segments',
sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment_id', sa.String(length=36), nullable=False),
sa.Column('dot1qtag', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_multi_segments',
sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment1_id', sa.String(length=36), nullable=False),
sa.Column('segment2_id', sa.String(length=36), nullable=False),
sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
'segment2_id'))
op.create_table(
'cisco_n1kv_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('multicast_ip', sa.String(length=32), nullable=True),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_network_profiles.id']),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'cisco_csr_identifier_map',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('ipsec_site_conn_id', sa.String(length=64),
primary_key=True),
sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
['ipsec_site_connections.id'],
ondelete='CASCADE')
)
op.create_table(
'cisco_ml2_apic_host_links',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('ifname', sa.String(length=64), nullable=False),
sa.Column('ifmac', sa.String(length=32), nullable=True),
sa.Column('swid', sa.String(length=32), nullable=False),
sa.Column('module', sa.String(length=32), nullable=False),
sa.Column('port', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('host', 'ifname'))
op.create_table(
'cisco_ml2_apic_names',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('neutron_type', sa.String(length=32), nullable=False),
sa.Column('apic_name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('neutron_id', 'neutron_type'))
op.create_table(
'cisco_ml2_apic_contracts',
sa.Column('tenant_id', sa.String(length=255)),
sa.Column('router_id', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('router_id'))
op.create_table('cisco_hosting_devices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('complementary_id', sa.String(length=36), nullable=True),
sa.Column('device_id', sa.String(length=255), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('management_port_id', sa.String(length=36), nullable=True),
sa.Column('protocol_port', sa.Integer(), nullable=True),
sa.Column('cfg_agent_id', sa.String(length=36), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=True),
sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ),
sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cisco_port_mappings',
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
sa.Column('port_type', sa.String(length=32), nullable=True),
sa.Column('network_type', sa.String(length=32), nullable=True),
sa.Column('hosting_port_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_id', sa.Integer(), autoincrement=False,
nullable=True),
sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
)
op.create_table('cisco_router_mappings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['hosting_device_id'],
['cisco_hosting_devices.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
| {
"content_hash": "9c520272dc77d50fa2633905b6c72a2d",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 79,
"avg_line_length": 50.432432432432435,
"alnum_prop": 0.598517327617006,
"repo_name": "NeCTAR-RC/neutron",
"id": "7df02a9312e2953248b5c53269e24ba762c45ee1",
"size": "11857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/db/migration/alembic_migrations/cisco_init_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7228162"
},
{
"name": "Shell",
"bytes": "12807"
}
],
"symlink_target": ""
} |
from enum import IntEnum
from functools import reduce
import io
import struct
from ipv6 import BuildableFromBytes
from ipv6 import ConvertibleToBytes
class HandshakeType(IntEnum):
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_HELLO_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
class ContentType(IntEnum):
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
class AlertLevel(IntEnum):
WARNING = 1
FATAL = 2
class AlertDescription(IntEnum):
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED_RESERVED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE_RESERVED = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION_RESERVED = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
USER_CANCELED = 90
NO_RENEGOTIATION = 100
UNSUPPORTED_EXTENSION = 110
class Record(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, content_type, version, epoch, sequence_number, length,
fragment):
self.content_type = content_type
self.version = version
self.epoch = epoch
self.sequence_number = sequence_number
self.length = length
self.fragment = fragment
def to_bytes(self):
return (struct.pack(">B", self.content_type) + self.version.to_bytes() +
struct.pack(">H", self.epoch) +
self.sequence_number.to_bytes(6, byteorder='big') +
struct.pack(">H", self.length) + self.fragment)
@classmethod
def from_bytes(cls, data):
content_type = ContentType(struct.unpack(">B", data.read(1))[0])
version = ProtocolVersion.from_bytes(data)
epoch = struct.unpack(">H", data.read(2))[0]
sequence_number = struct.unpack(">Q", b'\x00\x00' + data.read(6))[0]
length = struct.unpack(">H", data.read(2))[0]
fragment = bytes(data.read(length))
return cls(content_type, version, epoch, sequence_number, length,
fragment)
def __repr__(self):
return "Record(content_type={}, version={}, epoch={}, sequence_number={}, length={})".format(
str(self.content_type),
self.version,
self.epoch,
self.sequence_number,
self.length,
)
class Message(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, content_type):
self.content_type = content_type
def to_bytes(self):
raise NotImplementedError
@classmethod
def from_bytes(cls, data):
raise NotImplementedError
class HandshakeMessage(Message):
def __init__(
self,
handshake_type,
length,
message_seq,
fragment_offset,
fragment_length,
body,
):
super(HandshakeMessage, self).__init__(ContentType.HANDSHAKE)
self.handshake_type = handshake_type
self.length = length
self.message_seq = message_seq
self.fragment_offset = fragment_offset
self.fragment_length = fragment_length
self.body = body
def to_bytes(self):
return (struct.pack(">B", self.handshake_type) +
struct.pack(">I", self.length)[1:] +
struct.pack(">H", self.message_seq) +
struct.pack(">I", self.fragment_offset)[1:] +
struct.pack(">I", self.fragment_length)[1:] +
self.body.to_bytes())
@classmethod
def from_bytes(cls, data):
handshake_type = HandshakeType(struct.unpack(">B", data.read(1))[0])
length = struct.unpack(">I", b'\x00' + data.read(3))[0]
message_seq = struct.unpack(">H", data.read(2))[0]
fragment_offset = struct.unpack(">I", b'\x00' + bytes(data.read(3)))[0]
fragment_length = struct.unpack(">I", b'\x00' + bytes(data.read(3)))[0]
end_position = data.tell() + fragment_length
# TODO(wgtdkp): handle fragmentation
message_class, body = handshake_map[handshake_type], None
if message_class:
body = message_class.from_bytes(data)
else:
print("{} messages are not handled".format(str(handshake_type)))
body = bytes(data.read(fragment_length))
assert data.tell() == end_position
return cls(
handshake_type,
length,
message_seq,
fragment_offset,
fragment_length,
body,
)
def __repr__(self):
return "Handshake(type={}, length={})".format(str(self.handshake_type),
self.length)
class ProtocolVersion(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, major, minor):
self.major = major
self.minor = minor
def __eq__(self, other):
return (isinstance(self, type(other)) and self.major == other.major and
self.minor == other.minor)
def to_bytes(self):
return struct.pack(">BB", self.major, self.minor)
@classmethod
def from_bytes(cls, data):
major, minor = struct.unpack(">BB", data.read(2))
return cls(major, minor)
def __repr__(self):
return "ProtocolVersion(major={}, minor={})".format(
self.major, self.minor)
class Random(ConvertibleToBytes, BuildableFromBytes):
random_bytes_length = 28
def __init__(self, gmt_unix_time, random_bytes):
self.gmt_unix_time = gmt_unix_time
self.random_bytes = random_bytes
assert len(self.random_bytes) == Random.random_bytes_length
def __eq__(self, other):
return (isinstance(self, type(other)) and
self.gmt_unix_time == other.gmt_unix_time and
self.random_bytes == other.random_bytes)
def to_bytes(self):
return struct.pack(">I", self.gmt_unix_time) + (self.random_bytes)
@classmethod
def from_bytes(cls, data):
gmt_unix_time = struct.unpack(">I", data.read(4))[0]
random_bytes = bytes(data.read(cls.random_bytes_length))
return cls(gmt_unix_time, random_bytes)
class VariableVector(ConvertibleToBytes):
def __init__(self, subrange, ele_cls, elements):
self.subrange = subrange
self.ele_cls = ele_cls
self.elements = elements
assert self.subrange[0] <= len(self.elements) <= self.subrange[1]
def length(self):
return len(self.elements)
def __eq__(self, other):
return (isinstance(self, type(other)) and
self.subrange == other.subrange and
self.ele_cls == other.ele_cls and
self.elements == other.elements)
def to_bytes(self):
data = reduce(lambda ele, acc: acc + ele.to_bytes(), self.elements)
return VariableVector._encode_length(len(data), self.subrange) + data
@classmethod
def from_bytes(cls, ele_cls, subrange, data):
length = cls._decode_length(subrange, data)
end_position = data.tell() + length
elements = []
while data.tell() < end_position:
elements.append(ele_cls.from_bytes(data))
return cls(subrange, ele_cls, elements)
@classmethod
def _decode_length(cls, subrange, data):
length_in_byte = cls._calc_length_in_byte(subrange[1])
return reduce(
lambda acc, byte: (acc << 8) | byte,
bytearray(data.read(length_in_byte)),
0,
)
@classmethod
def _encode_length(cls, length, subrange):
length_in_byte = cls._calc_length_in_byte(subrange[1])
ret = bytearray([])
while length_in_byte > 0:
ret += bytes(length_in_byte & 0xff)
length_in_byte = length_in_byte >> 8
return ret
@classmethod
def _calc_length_in_byte(cls, ceiling):
return (ceiling.bit_length() + 7) // 8
class Opaque(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, byte):
self.byte = byte
def __eq__(self, other):
return isinstance(self, type(other)) and self.byte == other.byte
def to_bytes(self):
return struct.pack(">B", self.byte)
@classmethod
def from_bytes(cls, data):
return cls(struct.unpack(">B", data.read(1))[0])
class CipherSuite(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, cipher):
self.cipher = cipher
def __eq__(self, other):
return isinstance(self, type(other)) and self.cipher == other.cipher
def to_bytes(self):
return struct.pack(">BB", self.cipher[0], self.cipher[1])
@classmethod
def from_bytes(cls, data):
return cls(struct.unpack(">BB", data.read(2)))
def __repr__(self):
return "CipherSuite({}, {})".format(self.cipher[0], self.cipher[1])
class CompressionMethod(ConvertibleToBytes, BuildableFromBytes):
NULL = 0
def __init__(self):
pass
def __eq__(self, other):
return isinstance(self, type(other))
def to_bytes(self):
return struct.pack(">B", CompressionMethod.NULL)
@classmethod
def from_bytes(cls, data):
method = struct.unpack(">B", data.read(1))[0]
assert method == cls.NULL
return cls()
class Extension(ConvertibleToBytes, BuildableFromBytes):
def __init__(self, extension_type, extension_data):
self.extension_type = extension_type
self.extension_data = extension_data
def __eq__(self, other):
return (isinstance(self, type(other)) and
self.extension_type == other.extension_type and
self.extension_data == other.extension_data)
def to_bytes(self):
return (struct.pack(">H", self.extension_type) +
self.extension_data.to_bytes())
@classmethod
def from_bytes(cls, data):
extension_type = struct.unpack(">H", data.read(2))[0]
extension_data = VariableVector.from_bytes(Opaque, (0, 2**16 - 1), data)
return cls(extension_type, extension_data)
class ClientHello(HandshakeMessage):
def __init__(
self,
client_version,
random,
session_id,
cookie,
cipher_suites,
compression_methods,
extensions,
):
self.client_version = client_version
self.random = random
self.session_id = session_id
self.cookie = cookie
self.cipher_suites = cipher_suites
self.compression_methods = compression_methods
self.extensions = extensions
def to_bytes(self):
return (self.client_version.to_bytes() + self.random.to_bytes() +
self.session_id.to_bytes() + self.cookie.to_bytes() +
self.cipher_suites.to_bytes() +
self.compression_methods.to_bytes() +
self.extensions.to_bytes())
@classmethod
def from_bytes(cls, data):
client_version = ProtocolVersion.from_bytes(data)
random = Random.from_bytes(data)
session_id = VariableVector.from_bytes(Opaque, (0, 32), data)
cookie = VariableVector.from_bytes(Opaque, (0, 2**8 - 1), data)
cipher_suites = VariableVector.from_bytes(CipherSuite, (2, 2**16 - 1),
data)
compression_methods = VariableVector.from_bytes(CompressionMethod,
(1, 2**8 - 1), data)
extensions = None
if data.tell() < len(data.getvalue()):
extensions = VariableVector.from_bytes(Extension, (0, 2**16 - 1),
data)
return cls(
client_version,
random,
session_id,
cookie,
cipher_suites,
compression_methods,
extensions,
)
class HelloVerifyRequest(HandshakeMessage):
def __init__(self, server_version, cookie):
self.server_version = server_version
self.cookie = cookie
def to_bytes(self):
return self.server_version.to_bytes() + self.cookie.to_bytes()
@classmethod
def from_bytes(cls, data):
server_version = ProtocolVersion.from_bytes(data)
cookie = VariableVector.from_bytes(Opaque, (0, 2**8 - 1), data)
return cls(server_version, cookie)
class ServerHello(HandshakeMessage):
def __init__(
self,
server_version,
random,
session_id,
cipher_suite,
compression_method,
extensions,
):
self.server_version = server_version
self.random = random
self.session_id = session_id
self.cipher_suite = cipher_suite
self.compression_method = compression_method
self.extensions = extensions
def to_bytes(self):
return (self.server_version.to_bytes() + self.random.to_bytes() +
self.session_id.to_bytes() + self.cipher_suite.to_bytes() +
self.compression_method.to_bytes() + self.extensions.to_bytes())
@classmethod
def from_bytes(cls, data):
server_version = ProtocolVersion.from_bytes(data)
random = Random.from_bytes(data)
session_id = VariableVector.from_bytes(Opaque, (0, 32), data)
cipher_suite = CipherSuite.from_bytes(data)
compression_method = CompressionMethod.from_bytes(data)
extensions = None
if data.tell() < len(data.getvalue()):
extensions = VariableVector.from_bytes(Extension, (0, 2**16 - 1),
data)
return cls(
server_version,
random,
session_id,
cipher_suite,
compression_method,
extensions,
)
class ServerHelloDone(HandshakeMessage):
def __init__(self):
pass
def to_bytes(self):
return bytearray([])
@classmethod
def from_bytes(cls, data):
return cls()
class HelloRequest(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class Certificate(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class ServerKeyExchange(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class CertificateRequest(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class CertificateVerify(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class ClientKeyExchange(HandshakeMessage):
def __init__(self):
raise NotImplementedError
class Finished(HandshakeMessage):
def __init__(self, verify_data):
raise NotImplementedError
class AlertMessage(Message):
def __init__(self, level, description):
super(AlertMessage, self).__init__(ContentType.ALERT)
self.level = level
self.description = description
def to_bytes(self):
struct.pack(">BB", self.level, self.description)
@classmethod
def from_bytes(cls, data):
level, description = struct.unpack(">BB", data.read(2))
try:
return cls(AlertLevel(level), AlertDescription(description))
except BaseException:
data.read()
# An AlertMessage could be encrypted and we can't parsing it.
return cls(None, None)
def __repr__(self):
return "Alert(level={}, description={})".format(str(self.level),
str(self.description))
class ChangeCipherSpecMessage(Message):
def __init__(self):
super(ChangeCipherSpecMessage,
self).__init__(ContentType.CHANGE_CIPHER_SPEC)
def to_bytes(self):
return struct.pack(">B", 1)
@classmethod
def from_bytes(cls, data):
assert struct.unpack(">B", data.read(1))[0] == 1
return cls()
def __repr__(self):
return "ChangeCipherSpec(value=1)"
class ApplicationDataMessage(Message):
def __init__(self, raw):
super(ApplicationDataMessage,
self).__init__(ContentType.APPLICATION_DATA)
self.raw = raw
self.body = None
def to_bytes(self):
return self.raw
@classmethod
def from_bytes(cls, data):
# It is safe to read until the end of this byte stream, because
# there is single application data message in a record.
length = len(data.getvalue()) - data.tell()
return cls(bytes(data.read(length)))
def __repr__(self):
if self.body:
return "ApplicationData(body={})".format(self.body)
else:
return "ApplicationData(raw_length={})".format(len(self.raw))
handshake_map = {
HandshakeType.HELLO_REQUEST: None, # HelloRequest
HandshakeType.CLIENT_HELLO: ClientHello,
HandshakeType.SERVER_HELLO: ServerHello,
HandshakeType.HELLO_VERIFY_REQUEST: HelloVerifyRequest,
HandshakeType.CERTIFICATE: None, # Certificate
HandshakeType.SERVER_KEY_EXCHANGE: None, # ServerKeyExchange
HandshakeType.CERTIFICATE_REQUEST: None, # CertificateRequest
HandshakeType.SERVER_HELLO_DONE: ServerHelloDone,
HandshakeType.CERTIFICATE_VERIFY: None, # CertificateVerify
HandshakeType.CLIENT_KEY_EXCHANGE: None, # ClientKeyExchange
HandshakeType.FINISHED: None, # Finished
}
content_map = {
ContentType.CHANGE_CIPHER_SPEC: ChangeCipherSpecMessage,
ContentType.ALERT: AlertMessage,
ContentType.HANDSHAKE: HandshakeMessage,
ContentType.APPLICATION_DATA: ApplicationDataMessage,
}
class MessageFactory(object):
last_msg_is_change_cipher_spec = False
def __init__(self):
pass
def parse(self, data, message_info):
messages = []
# Multiple records could be sent in the same UDP datagram
while data.tell() < len(data.getvalue()):
record = Record.from_bytes(data)
if record.version.major != 0xfe or record.version.minor != 0xFD:
raise ValueError("DTLS version error, expect DTLSv1.2")
last_msg_is_change_cipher_spec = type(
self).last_msg_is_change_cipher_spec
type(self).last_msg_is_change_cipher_spec = (
record.content_type == ContentType.CHANGE_CIPHER_SPEC)
# FINISHED message immediately follows CHANGE_CIPHER_SPEC message
# We skip FINISHED message as it is encrypted
if last_msg_is_change_cipher_spec:
continue
fragment_data = io.BytesIO(record.fragment)
# Multiple handshake messages could be sent in the same record
while fragment_data.tell() < len(fragment_data.getvalue()):
content_class = content_map[record.content_type]
assert content_class
messages.append(content_class.from_bytes(fragment_data))
return messages
| {
"content_hash": "004804831f2fc702b11331b819ff8317",
"timestamp": "",
"source": "github",
"line_count": 643,
"max_line_length": 101,
"avg_line_length": 30.055987558320375,
"alnum_prop": 0.6001241850357032,
"repo_name": "lanyuwen/openthread",
"id": "41fc801248d65c39b671772b7656b3ede423f4f5",
"size": "20931",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/dtls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "10128"
},
{
"name": "C",
"bytes": "504489"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3008688"
},
{
"name": "M4",
"bytes": "42638"
},
{
"name": "Makefile",
"bytes": "77019"
},
{
"name": "Python",
"bytes": "1017946"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "17185"
}
],
"symlink_target": ""
} |
"""
Documents module objects
"""
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
from anaf.core.models import Object
from anaf.core.conf import settings
from files import FileStorage
import os
import time
import re
# Folder model
class Folder(Object):
""" Every folder may have a parent folder """
name = models.CharField(max_length=255)
parent = models.ForeignKey(
'self', blank=True, null=True, related_name='child_set')
access_inherit = ('parent', '*module', '*user')
def __unicode__(self):
return self.name
def get_absolute_url(self):
"""Returns absolute URL of the object"""
return reverse('documents_folder_view', args=[self.id])
def generate_filename(instance, old_filename):
""" Generate filename """
extension = os.path.splitext(old_filename)[1]
filename = str(time.time()) + extension
return 'documents/files/' + filename
# File model
class File(Object):
""" A binary or other non-renderable file (i.e. an image) """
name = models.CharField(max_length=255)
folder = models.ForeignKey(Folder)
content = models.FileField(
upload_to=generate_filename, storage=FileStorage())
access_inherit = ('folder', '*module', '*user')
def __unicode__(self):
return self.name
def get_file_type(self):
match = re.match('.*\.(?P<extension>[a-z]+)', str(self.content))
if match:
return str(match.group('extension')).upper()
else:
return ''
def can_preview(self):
return self.get_file_type() in ('PNG', 'JPG', 'JPEG', 'BMP', 'GIF', 'SVG')
def get_preview_url(self):
return getattr(settings, 'MEDIA_URL', '/static/media/') + str(self.content)
class Meta:
""" File """
ordering = ['-last_updated']
def get_absolute_url(self):
"""Returns absolute URL of the object"""
return reverse('documents_file_view', args=[self.id])
# Document model
class Document(Object):
""" A readable document, i.e. HTML, which may be rendered directly """
title = models.CharField(max_length=255)
folder = models.ForeignKey(Folder)
body = models.TextField(null=True, blank=True)
access_inherit = ('folder', '*module', '*user')
def __unicode__(self):
return self.title
class Meta:
""" File """
ordering = ['-last_updated']
def get_absolute_url(self):
"""Returns absolute URL of the object"""
return reverse('documents_document_view', args=[self.id])
# WebLink model
class WebLink(Object):
""" A web link """
title = models.CharField(max_length=255)
folder = models.ForeignKey(Folder)
url = models.CharField(max_length=255)
access_inherit = ('folder', '*module', '*user')
def __unicode__(self):
return self.title
class Meta:
""" File """
ordering = ['-last_updated']
def get_absolute_url(self):
"""Returns absolute URL of the object"""
return reverse('documents_weblink_view', args=[self.id])
| {
"content_hash": "5d8fcb2747265284ac59bd7ca9008c6b",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 83,
"avg_line_length": 25.66393442622951,
"alnum_prop": 0.6231236026828489,
"repo_name": "tovmeod/anaf",
"id": "6be854802bcd846cc463034cde4ce1d5f1415a71",
"size": "3131",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "anaf/documents/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
} |
"""
Example of how influencers can be queried from the Engine API in CSV format
using Python 2.6+ (including Python 3.x). No extra modules are required beyond
those that come with a base Python install.
Usage:
python influencersToCsv.py <job> <server_hostname> [ <server_port> [ <result_limit> ] ]
The job ID and server hostname must be specified. The port defaults to 8080
if not specified and the number of maximum number of results to 10000.
Influencers are returned in descending order of influencer anomaly score; the
most unusual will be at the top of the list.
"""
import csv
import json
import sys
try:
# For Python 3.x
from urllib.request import urlopen
except ImportError:
# For Python 2.x
from urllib2 import urlopen
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s <job> <server_hostname> [ <server_port> [ <result_limit> ] ]\n' % sys.argv[0])
sys.exit(1)
job = sys.argv[1]
server = sys.argv[2]
port = 8080
if len(sys.argv) >= 4:
port = sys.argv[3]
limit = 10000
if len(sys.argv) >= 5:
limit = sys.argv[4]
url = 'http://%s:%s/engine/v2/results/%s/influencers?take=%s' % (server, port, job, limit)
response = urlopen(url).read()
json = json.loads(response.decode('utf-8'))
writtenHeader = False
csvWriter = csv.writer(sys.stdout)
for document in json['documents']:
if not writtenHeader:
csvWriter.writerow([ key for key in sorted(document) ])
writtenHeader = True
csvWriter.writerow([ str(document[key]) for key in sorted(document) ])
| {
"content_hash": "072a667060c63cca844ed025cb3f3c8f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 110,
"avg_line_length": 29.705882352941178,
"alnum_prop": 0.700990099009901,
"repo_name": "prelert/engine-python",
"id": "7f63fd7f923f1580af22420e5a4e51149fc21f66",
"size": "2846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv/influencersToCsv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102910"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 46653c4a2132
Revises: 1bab833a3cce
Create Date: 2014-03-20 05:14:20.237000
"""
# revision identifiers, used by Alembic.
revision = '46653c4a2132'
down_revision = '1bab833a3cce'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('languages', 'language')
op.drop_index('ix_languages_language', table_name='languages')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_languages_language', 'languages', ['language'], unique=1)
op.add_column('languages', sa.Column('language', sa.VARCHAR(length=35), nullable=True))
### end Alembic commands ###
| {
"content_hash": "30a70900754f62ef1dae3ad6ef3c5ee4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 91,
"avg_line_length": 27.607142857142858,
"alnum_prop": 0.6959896507115135,
"repo_name": "micknh/EdFirst",
"id": "7ebeec5228b35dfe9c4d35484665bb425c6222f9",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrationsold/versions/46653c4a2132_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1262"
},
{
"name": "JavaScript",
"bytes": "4206"
},
{
"name": "Python",
"bytes": "111349"
}
],
"symlink_target": ""
} |
"""
This runs a simulation using two different sampling techniques.
The model is specified in discrete time.
Usage:
sample.py [-h] [-v] [-q] priority <filename> <runs> <stream> [--sis=<N>]
sample.py [-h] [-v] [-q] shifted <filename> <runs> <stream> [--sis=<N>]
Options:
-h, --help Show this help message.
-v Verbose logging.
-q Log only warnings and errors.
--sis=<N> Use an SIS model with N individuals.
"""
import copy
import logging
import math
import time
import collections
import docopt
import numpy as np
import scipy
import scipy.stats
import networkx as nx
import h5py
import randomstate
import chemical
import pairing_heap
from gracefulinterrupthandler import GracefulInterruptHandler
logger=logging.getLogger("sample")
class GeometricTransition(object):
def __init__(self, p):
self.p=p
self.dist=scipy.stats.geom(p=self.p)
def Sample(self, rng):
return rng.geometric(p=self.p)
def Shifted(self, now, rng):
"""
This now is the time since enabling the transition, not
an absolute time.
"""
return rng.binomial(n=1, p=self.p)==1
class UniformTransition(object):
def __init__(self, a, b):
assert(type(a)==int)
assert(type(b)==int)
assert(b>a)
assert(a>0) # lower bound must be 1 or higher.
self.lims=[a, b]
self.a=a
self.b=b
self.p=1.0/(b-a)
def Sample(self, rng):
return rng.randint(*self.lims)
def Shifted(self, now, rng):
"""
This now is the time since enabling the transition, not
an absolute time.
"""
if now<self.a: return False
assert(now<self.b)
if self.b-now==1:
return True
logger.debug("UniformTransition b {0} now {0}".format(self.b, now))
return rng.binomial(n=1, p=1.0/(self.b-now))==1
class Transition(object):
def __init__(self, distribution, priority):
self.distribution=distribution
self.te=None
self.priority=priority
self.heap_node=None
def Enable(self, now):
self.te=now
def Disable(self, now):
self.te=None
def Sample(self, now, rng):
"""
This sampling method gets called the moment the transition is enabled,
so it marks the enabling time, too. It asks when, in the future,
the transition will fire.
"""
self.te=now
return now+self.distribution.Sample(rng)
def SampleShifted(self, now, rng):
"""
This sampling asks, given the current time, does this transition
fire or not? It's a different sampling technique. The enabling
time, self.te, will already be set.
"""
logger.debug("Transition now {0} te {1}".format(now, self.te))
return self.distribution.Shifted(now-self.te, rng)
def Clone(self):
return Transition(self.distribution, self.priority)
def CreateSISModel(N, step_max, transitions):
survival=chemical.DiscreteSurvival(step_max)
process=chemical.Process(survival)
G=nx.complete_graph(N)
node_to_idx=dict()
initial_marking=dict()
for ind_idx, pnode in enumerate(G.nodes_iter()):
node_to_idx[pnode]=ind_idx
for disease_state in ["S", "I"]:
place_name=(ind_idx, disease_state)
process.AddPlace(place_name, disease_state, 0)
initial_marking[place_name]=0
initial_marking[(0, "I")]=1
for s_idx in range(1, N):
initial_marking[(s_idx, "S")]=1
for recover_idx in range(N):
process.AddTransition(("R", recover_idx), "R",
transitions["R"].Clone(),
[((recover_idx, "I"), -1), ((recover_idx, "S"), 1)], 0)
for source_n in G.nodes_iter():
source_idx=node_to_idx[source_n]
for target_n in G.neighbors(source_n):
target_idx=node_to_idx[target_n]
if source_idx != target_idx:
process.AddTransition(("I", source_idx, target_idx), "I",
transitions["I"].Clone(),
[((source_idx, "I"), -1), ((source_idx, "I"), 1),
((target_idx, "S"), -1), ((target_idx, "I"), 1)], 0)
return process, initial_marking
def InterrupterModel(step_max, transitions):
"""
This is an interrupter where there is more than one transition
competing to fire. Maybe that's necessary.
"""
survival=chemical.DiscreteSurvival(step_max)
process=chemical.Process(survival)
initial_marking={ 1 : 1, 2: 1, 3: 0, 8 : 1}
process.AddPlace(1, 1, 0)
process.AddPlace(2, 2, 0)
process.AddPlace(3, 3, 0)
process.AddPlace(8, 8, 0)
process.AddTransition(4, 4, transitions["A"].Clone(),
[(1, -1), (1, 1), (2, -1), (2, 1)], 0)
process.AddTransition(5, 5, transitions["B"].Clone(),
[(2, -1), (3, 1)], 0)
process.AddTransition(6, 6, transitions["C"].Clone(),
[(3, -1), (2, 1)], 0)
process.AddTransition(7, 7, transitions["D"].Clone(),
[(8, -1), (8, 1), (2, -1), (2, 1)], 0)
process.AddTransition(9, 9, transitions["B"].Clone(),
[(2, -1), (3, 1)], 0)
return process, initial_marking
def InterrupterModelOne(step_max, transitions):
"""
Three places, three transitions. See interrupter.{png,pdf}.
"""
survival=chemical.DiscreteSurvival(step_max)
process=chemical.Process(survival)
initial_marking={ 1 : 1, 2: 1, 3: 0}
process.AddPlace(1, 1, 0)
process.AddPlace(2, 2, 0)
process.AddPlace(3, 3, 0)
process.AddTransition(4, 4, transitions["A"].Clone(),
[(1, -1), (1, 1), (2, -1), (2, 1)], 0)
process.AddTransition(5, 5, transitions["B"].Clone(),
[(2, -1), (3, 1)], 0)
process.AddTransition(6, 6, transitions["C"].Clone(),
[(3, -1), (2, 1)], 0)
return process, initial_marking
def SamplePriority(model, initial_marking, step_cnt, summary, rng):
"""
This sampling method draws for a future transition time
at the moment a transition is enabled. This is written like
Gibson and Bruck's Next Reaction method, except that it completely
disobeys statistics by failing to draw from geometric distributions
and then use a random variable transformation.
"""
logger.debug("SamplePriority enter step_cnt {0}".format(step_cnt))
now=0
last_step=-1
heap=pairing_heap.pairing_heap()
model.Reset(initial_marking, now)
for first_t_name, first_t in model.AllEnabled():
firing_time=first_t.Sample(now, rng)
first_t.heap_node=heap.insert(
(firing_time, first_t.priority, first_t_name))
while not heap.empty():
now, priority, who=heap.extract()
should_be, was_enabled=model.Enabled(who)
if should_be!=was_enabled:
logger.error("who {0} should {1} was {2}".format(who,
should_be, was_enabled))
assert(should_be==was_enabled)
assert(was_enabled)
if now>step_cnt:
break
logger.debug("SamplePriority {0} {1}".format(now, who))
model.Fire(who, now)
disable, enable=model.AffectedTransitions()
for dname, dtransition in disable:
heap.delete(dtransition.heap_node)
dtransition.heap_node=None
dtransition.te=None
model.Disable(dname, now)
for ename, etransition in enable:
efiring_time=etransition.Sample(now, rng)
etransition.heap_node=heap.insert(
(efiring_time, etransition.priority, ename))
model.Enable(ename, now)
if now!=last_step:
#summary[model.SummaryCounts()["I"]]+=1
last_step=now
model.FinishTiming(now)
return now
def SampleShifted(model, initial_marking, step_cnt, summary, rng):
"""
Think of Gillespie's First Reaction method. At every step,
sample every enabled transition to see whether it will fire.
Yes, this is incredibly slow.
"""
logger.debug("SampleShifted enter step_cnt {0}".format(step_cnt))
now=0
model.Reset(initial_marking, now)
for fname, ftransition in model.AllEnabled():
ftransition.te=now
now=1
while now<step_cnt:
prioritized=collections.defaultdict(list)
for first_t_name, first_t in model.AllEnabled():
prioritized[first_t.priority].append((first_t_name, first_t))
if not prioritized:
break
for priority_key in sorted(prioritized.keys()):
for name, transition in prioritized[priority_key]:
should_be, was_enabled=model.Enabled(name)
if should_be!=was_enabled:
logger.error("who {0} should {1} was {2}".format(name,
should_be, was_enabled))
assert(should_be==was_enabled)
# It's possible a transition was disabled by another
# transition scheduled for the same time.
logger.debug("SampleShifted now {0} name {1}".format(now, name))
if should_be and transition.SampleShifted(now, rng):
transition.te=None
model.Fire(name, now)
# How a transition affected the state of the system
# is usually calculated after a full sweep through all
# transitions, under the assumption that there are
# few or no conflicts. This assumption greatly reduces
# the order of computation, but putting this calculation
# here is assured to be correct in all cases.
disable, enable=model.AffectedTransitions()
for dname, dtransition in disable:
dtransition.te=None
model.Disable(dname, now)
for ename, etransition in enable:
etransition.te=now
model.Enable(ename, now)
else:
pass
now+=1
#summary[model.SummaryCounts()["I"]]+=1
model.FinishTiming(now)
return now
def ConfigureSISModel(arguments):
params=dict()
N=10
dt=0.01
step_cnt=int(100/dt)
# step_max is longest time it will take to fire a transition.
step_max=int(10/dt)
params["dt"]=dt
params["N"]=N
params["step_cnt"]=step_cnt
params["step_max"]=step_max
logger.info("step count {0}".format(step_cnt))
priority={ "I" : 0, "R" : 1 }
# The specification of distributions is discrete, but I'd like them
# to behave similarly, not necessarily the same, as dt changes.
# So we specify times in floating point and convert to integers.
beta=0.5
a=.2
b=1.5
transitions={
"I" : Transition(GeometricTransition(beta*dt/(1+beta*dt)), priority["I"]),
"R" : Transition(UniformTransition(round(a/dt), round(b/dt)), priority["R"])
}
model, initial_marking=CreateSISModel(N, step_max, transitions)
return model, initial_marking, params
def ConfigureInterrupter(arguments):
params=dict()
params["N"]=3
dt=0.01
step_cnt=int(100/dt)
step_max=int(20/dt)
priority={"A" : 0, "B" : 1, "C" : 1, "D" : 0}
a_limits=[round(0.2/dt), round(1.8/dt)]
b_limits=[round(0.8/dt), round(1.6/dt)]
betap=1.0*dt
params["dt"]=dt
params["step_cnt"]=step_cnt
params["step_max"]=step_max
transitions={
"A" : Transition(UniformTransition(a_limits[0], a_limits[1]), priority["A"]),
"B" : Transition(UniformTransition(b_limits[0], b_limits[1]), priority["B"]),
"C" : Transition(UniformTransition(1, 3), priority["C"]),
"D" : Transition(GeometricTransition(betap/(1+betap)), priority["D"])
}
model, initial_marking=InterrupterModel(step_max, transitions)
return model, initial_marking, params
def WriteFile(filename, model, duration, summary):
with GracefulInterruptHandler() as handler:
out_data=h5py.File(filename, "w")
grp_name="run{0:0>4d}".format(0)
grp=out_data.create_group(grp_name)
model.survival.write_hdf(grp)
grp.create_dataset("duration", data=duration)
grp.create_dataset("summary", data=summary)
out_data.close()
if __name__ == "__main__":
arguments = docopt.docopt(__doc__, version="sample 1.0")
if arguments["-v"]:
logging.basicConfig(level=logging.DEBUG)
elif arguments["-q"]:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
rng=randomstate.prng.pcg64.RandomState(3333333, int(arguments["<stream>"]))
filename=arguments["<filename>"]
instance_cnt=int(arguments["<runs>"])
model, initial_marking, params=ConfigureInterrupter(arguments)
#model, initial_marking, params=ConfigureSISModel(arguments)
if arguments["priority"]:
sampler=SamplePriority
elif arguments["shifted"]:
sampler=SampleShifted
N=params["N"]
step_cnt=params["step_cnt"]
minutes_save=10
logger.info("Writes data every {0} minutes".format(minutes_save))
time_limit_secs=minutes_save*60
walltime=time.time()
summary=np.zeros((N+1,), dtype=np.int)
duration=np.zeros((step_cnt,), dtype=np.int)
run_idx=0
for i in range(instance_cnt):
steps=sampler(model, initial_marking, step_cnt, summary, rng)
if steps<step_cnt:
duration[steps]+=1
if time.time()-walltime > time_limit_secs:
logger.info("Writing {0} to {1}".format(i, filename))
WriteFile(filename, model, duration, summary)
walltime=time.time()
run_idx+=1
WriteFile(filename, model, duration, summary)
logger.info("Density\n{0}".format(summary))
print_cnt=20
locations=np.where(duration>0)[0]
if len(locations)>1:
end=locations[-1]
else:
end=len(duration)
row_cnt=math.ceil(end/print_cnt)
logger.info("Duration {0} out of total {1}".format(
np.sum(duration), instance_cnt))
dt=params["dt"]
for pr_idx in range(print_cnt):
when=dt*pr_idx*row_cnt
row_end=min((pr_idx+1)*row_cnt, end)
amount=np.sum(duration[pr_idx*row_cnt:row_end])
logger.info("{0:>8.2f} {1}".format(when, amount))
survival=model.survival
for dist_kind in ["I", "R"]:
fire_cnt=np.sum(survival.fire[dist_kind])
disable_cnt=np.sum(survival.disable[dist_kind])
beyond=survival.beyond[dist_kind]
logger.info("{0}: F {1} D {2} beyond {3}".format(dist_kind,
fire_cnt, disable_cnt, beyond))
| {
"content_hash": "e67fdcdd6d904b9cf54ab7cea26a813d",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 85,
"avg_line_length": 34.02546296296296,
"alnum_prop": 0.6003809782978434,
"repo_name": "adolgert/discretenext",
"id": "c11f198cee45b0ecb5aaed9f7571332dc94e09f1",
"size": "14699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46156"
}
],
"symlink_target": ""
} |
import unittest
from flask_git import Git
from flask import Flask
import os
from repoutils import TempRepo
import shutil
import tempfile
class TestFlaskGitInit(unittest.TestCase):
"""Flask git extension - init"""
def setUp(self):
self.root_dir = tempfile.mkdtemp()
self.app = Flask(__name__)
self.app.config['GIT_REPOPATH'] = self.root_dir
def test_extension_can_initialize_repo(self):
git = Git()
git.init_app(self.app)
gitfile = os.path.join(self.root_dir,'.git')
print gitfile
self.assertFalse(os.path.isdir(gitfile))
with self.app.app_context():
git.init_repo()
self.assertTrue(os.path.isdir(gitfile))
def tearDown(self):
assert self.root_dir != '/tmp/' and self.root_dir.startswith('/tmp/')
shutil.rmtree(self.root_dir)
class TestFlaskGitFetches(unittest.TestCase):
"""Flask git extension - fetch commit"""
def setUp(self):
self.temprepo = setup_repo()
self.app = Flask(__name__)
self.app.config['GIT_REPOPATH'] = self.temprepo.root_dir
def test_fetches_all_commits(self):
git = Git()
git.init_app(self.app)
with self.app.app_context():
commits = git.commits()
self.assertEquals(3, len(list(commits)))
def test_fetches_all_commits_for_file_in_regular_order(self):
git = Git()
git.init_app(self.app)
with self.app.app_context():
commits = list(git.commits_for_path_recent_first('content/hello.md'))
self.assertEquals(2, len(commits))
self.assertEquals('second commit', commits[0].message)
self.assertEquals('first commit', commits[1].message)
commits = list(git.commits_for_path_recent_first('content/bar.md'))
self.assertEquals(1, len(commits))
def test_fetches_all_commits_for_file_in_reverse_order(self):
git = Git()
git.init_app(self.app)
with self.app.app_context():
commits = list(git.commits_for_path_recent_last('content/hello.md'))
self.assertEquals(2, len(commits))
self.assertEquals('first commit', commits[0].message)
self.assertEquals('second commit', commits[1].message)
commits = git.commits_for_path_recent_last('content/bar.md')
self.assertEquals(1, len(list(commits)))
def test_follows_renames(self):
git = Git()
git.init_app(self.app)
# move bar.md to bar2.md
self.temprepo.delete_contents('content/bar.md')
self.temprepo.copy_contents('content/bar2.md', medium_sized_content())
self.temprepo.commit('fourth commit', 400)
with self.app.app_context():
commits = list(git.commits_for_path_recent_first('content/bar2.md', follow=True))
self.assertEquals(2, len(commits))
self.assertEquals('fourth commit', commits[0].message)
self.assertEquals('third commit', commits[1].message)
def tearDown(self):
self.temprepo.delete()
def setup_repo():
tr = TempRepo()
tr.init()
tr.copy_contents('content/hello.md', 'stuff')
tr.commit("first commit", 100)
tr.copy_contents('content/hello.md', 'more stuff')
tr.commit("second commit", 200)
tr.copy_contents('content/bar.md', medium_sized_content())
tr.commit("third commit", 300)
return tr
def medium_sized_content():
"""the rename algorithm doesn't work well on content that's too small"""
contents = 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
contents += 'qwertyuiopasdfghjklzxcvbnmqwerty\n'
return contents
| {
"content_hash": "6f7a5b2a38ed0dd1a909a3e54fcfc5d2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 93,
"avg_line_length": 36.45192307692308,
"alnum_prop": 0.6296491690846743,
"repo_name": "drivet/flask-git",
"id": "e5f1cf6f4cc1d472475d102d62b8e17e3242dc30",
"size": "3791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_flask_git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16333"
}
],
"symlink_target": ""
} |
from matplotlib import rc
''' Make matplotlib plots look nice '''
def hidespines(ax, ticksize=0):
rc('font',**{'family':'sans-serif','sans-serif':['Times']})
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if (ticksize != 0):
ax.xaxis.set_tick_params(labelsize=ticksize)
ax.yaxis.set_tick_params(labelsize=ticksize)
| {
"content_hash": "8b5233a6144fa329fb4578757ce27557",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 60,
"avg_line_length": 33.07692307692308,
"alnum_prop": 0.7046511627906977,
"repo_name": "smsolivier/VEF",
"id": "017a5e95c19bb12b1f8cd28c0d830d68457717cd",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/hidespines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8044"
},
{
"name": "Python",
"bytes": "117935"
},
{
"name": "TeX",
"bytes": "367637"
}
],
"symlink_target": ""
} |
import sys
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import argparse
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import SafeConfigParser as ConfigParser
import plac
if sys.version >= '3':
from inspect import getfullargspec
else:
from plac import getfullargspec
# defaults are defined by the function
# defaults are overridden with values from config file
# defaults and config file are overridden with command line parameters
CONFIG_PARSER_CFG = getfullargspec(ConfigParser.__init__).args[1:]
# the default arguments accepted by a ConfigParser object
def config_conf(obj):
"Extracts the configuration of the underlying ConfigParser from obj"
# If we ever want to add some default options this is where to do that
cfg = {}
for name in dir(obj):
if name in CONFIG_PARSER_CFG: # argument of ConfigParser
cfg[name] = getattr(obj, name)
return cfg
def config_parser_from(obj, config, default_section=None, **confparams):
conf = config_conf(obj)
conf.update(confparams)
parser = ConfigParser(**conf)
parser.plac_ini_obj = obj
parser.plac_ini_config = config
parser.plac_ini_default_section = default_section
return parser
def _read_config(cp, config, default_section=None):
if sys.version >= '3':
try:
with open(config) as fp:
cp.readfp(fp)
except FileNotFoundError:
# consider raising an exception here.
# but, tools may operate fine without a config file.
return {}
else:
from StringIO import StringIO
try:
# this is needed in Python 2 to work with some kinds of ini files
data = StringIO('\n'.join(line.strip() for line in open(config)))
cp.readfp(data)
except IOError:
# consider raising an exception here.
# but, tools may operate fine without a config file.
return {}
cfg = {}
for section in cp.sections():
if default_section is not None and default_section == section:
prefix = ''
else:
prefix = '%s_' % section
for k, v in cp.items(section):
cfg['%s%s' % (prefix, k)] = v
return cfg
def add_gnu_argument(self, *args, **kwargs):
"Prevent the addition of any single hyphen, multiple letter args"
gnu_args = []
for arg in args:
# Fix if we have at least 3 chars where the first is a hyphen
# and the second is not a hyphen (e.g. -op becomes --op)
if len(arg) > 3 and arg[0] == '-' and arg[1] != '-':
gnu_args.append('-' + arg)
else:
gnu_args.append(arg)
argparse.ArgumentParser.add_argument(self, *gnu_args, **kwargs)
def _print_exit(message, file=None):
if message:
if file is None:
file = sys.stderr
file.write(message)
sys.exit(2)
def call(obj, arglist=sys.argv[1:], eager=True, config=None,
default_section=None, gnu=True):
if gnu:
plac.ArgumentParser.add_argument = add_gnu_argument
if config is None:
return plac.call(obj, arglist=arglist, eager=eager)
argparser = plac.parser_from(obj)
argnames = argparser.argspec.args
defaults = argparser.argspec.defaults
cp = config_parser_from(obj, config, default_section)
cfg = dict(zip_longest(argnames, defaults))
ini_values = _read_config(cp, config, default_section)
for k in obj.__annotations__.keys():
a = plac.Annotation.from_(obj.__annotations__[k])
if a.type and k in ini_values:
if a.type is type(True):
try:
ini_values[k] = cp._convert_to_boolean(ini_values[k])
except ValueError:
argparser.print_usage(sys.stderr)
_print_exit(
"{}: error: {}={} failed conversion to <type 'bool'> in:\n{}\n".format(
argparser.prog, k, ini_values[k], config))
else:
try:
ini_values[k] = a.type(ini_values[k])
except ValueError:
argparser.print_usage(sys.stderr)
_print_exit(
'{}: error: {}={} failed conversion to {} in:\n{}\n'.format(
argparser.prog, k, ini_values[k], a.type, config))
cfg.update(ini_values)
if sys.version >= '3':
items = cfg.items()
else:
items = cfg.iteritems()
argparser.set_defaults(**dict((k, v) for k, v in items))
cmd, result = argparser.consume(arglist)
if plac.iterable(result) and eager: # listify the result
return list(result)
return result
| {
"content_hash": "1b655a4dd5b3b1a767688302e6f2e877",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 95,
"avg_line_length": 31.861842105263158,
"alnum_prop": 0.5985959116250258,
"repo_name": "fprimex/plac_ini",
"id": "5e3cc39d10b8ec6cf26f796ff278aee672914d17",
"size": "4843",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plac_ini.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5954"
}
],
"symlink_target": ""
} |
import sys
import time
from PlayerBgndThread import processThread
sys.path.append('/home/carlos/Documents/Elettra/Kafka_DonkiOrchestra_project/DonkiOrchestra_0.0.0/DonkiPlayer/scripts/mcstas-generator/src')
if __name__ == "__main__":
if len(sys.argv) < 4:
print "\nUsage:\n\t",sys.argv[0],"player_name info_server_url action_script\n\t"
sys.exit(0)
else:
player_name = sys.argv[1]
info_server = sys.argv[2]
action_code = sys.argv[3]
dt = processThread(player_name, info_server, action_code)
try:
dt.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
dt._alive = False
print "Bye"
| {
"content_hash": "51a7eb433c6543ef59acc1c1faf59440",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 140,
"avg_line_length": 29.541666666666668,
"alnum_prop": 0.6205923836389281,
"repo_name": "ess-dmsc/do-ess-data-simulator",
"id": "59acce91ba94c0ddf52803a7e3eb9a05ac24d70c",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DonkiPlayer/DonkiPlayer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "259376"
},
{
"name": "Roff",
"bytes": "60563"
}
],
"symlink_target": ""
} |
import os
import re
import shutil
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from scoot import client_lib as proto
# TODO: use a tmp domain socket instead of the default.
# Adapted from daemon/server/handler_test.go
def example_test():
tmpdir = tempfile.mkdtemp()
rpc_timeout_ns = int(500*1e6)
try:
# Connect to the daemon server.
proto.start()
# Create paths to ingest.
tmpdir_ok = os.path.join(tmpdir, 'ok')
tmpdir_fail = os.path.join(tmpdir, 'fail')
os.mkdir(tmpdir_ok)
os.mkdir(tmpdir_fail)
# Populate the paths we want to ingest.
resource_ok = os.path.join(tmpdir_ok, "resource.txt")
script_ok = os.path.join(tmpdir_ok, "script.sh")
script_fail = os.path.join(tmpdir_fail, "script.sh")
open(resource_ok, 'w').write("content")
open(script_ok, 'w').write("ls resource.txt")
open(script_fail, 'w').write("ls resource.txt")
# Ingest scripts into their own snapshots. The 'fail' snapshot will be missing resource.txt.
ok_id = proto.create_snapshot(tmpdir_ok)
fail_id = proto.create_snapshot(tmpdir_fail)
# Run scripts serially in their respective snapshots. Block until each run finishes.
ok_run_id = proto.run(argv=["sh", "./script.sh"], timeout_ns=rpc_timeout_ns, snapshot_id=ok_id)
ok_statuses = proto.poll(run_ids=[ok_run_id], timeout_ns=rpc_timeout_ns)
if len(ok_statuses) != 1:
raise proto.ScootException(Exception("expected one poll result for ok_run_id."))
fail_run_id = proto.run(argv=["sh", "./script.sh"], timeout_ns=rpc_timeout_ns, snapshot_id=fail_id)
fail_statuses = proto.poll(run_ids=[fail_run_id], timeout_ns=rpc_timeout_ns)
if len(fail_statuses) != 1:
raise proto.ScootException(Exception("expected one poll result for fail_run_id."))
# Make sure 'ok' and 'fail' returned the correct exit code.
if ok_statuses[0].exit_code != 0:
raise proto.ScootException(Exception("failure checking exit code of 'ok' run: " + str(ok_statuses[0].exit_code)))
if fail_statuses[0].exit_code == 0:
raise proto.ScootException(Exception("failure checking exit code of 'fail' run: " + str(fail_statuses[0].exit_code)))
# Checkout result snapshots for both runs.
ok_dir = os.path.join(tmpdir, "okco")
fail_dir = os.path.join(tmpdir, "failco")
proto.checkout_snapshot(snapshot_id=ok_statuses[0].snapshot_id, dirpath=ok_dir)
proto.checkout_snapshot(snapshot_id=fail_statuses[0].snapshot_id, dirpath=fail_dir)
# Check that 'ok' and 'fail' populated only STDOUT or STDERR respectively.
def assert_file_contains(filepath, contents, msg):
text = open(filepath, 'r').read()
if re.search(contents, text) is None:
raise proto.ScootException(Exception("%s: [%s] bad file contents: %s" % (msg, filepath, text)))
assert_file_contains(os.path.join(ok_dir, "STDOUT"), "resource.txt\n", "ok")
assert_file_contains(os.path.join(ok_dir, "STDERR"), "", "ok")
assert_file_contains(os.path.join(fail_dir, "STDOUT"), "", "fail")
assert_file_contains(os.path.join(fail_dir, "STDERR"), "No such file or directory\n", "fail")
finally:
shutil.rmtree(tmpdir)
proto.stop_daemon()
if __name__ == '__main__':
example_test()
| {
"content_hash": "f1c79337cf4e4b672b8ef7c1b796a15b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 123,
"avg_line_length": 41.87179487179487,
"alnum_prop": 0.676056338028169,
"repo_name": "dbentley/scoot",
"id": "6c5c41ba8a48c41c365480c6bee4f70ad87df69b",
"size": "3290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/protocol/python/tests/integration.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "685382"
},
{
"name": "Makefile",
"bytes": "2887"
},
{
"name": "Protocol Buffer",
"bytes": "3584"
},
{
"name": "Python",
"bytes": "36170"
},
{
"name": "Shell",
"bytes": "3425"
},
{
"name": "Thrift",
"bytes": "5323"
}
],
"symlink_target": ""
} |
import csv,sys,os
os.environ['MPLCONFIGDIR'] = '/tmp'
import numpy
from matplotlib.path import Path
from rtree import index as rtree
import shapefile
from pyproj import Proj, transform
def findNeighborhood(location, index_rtree, neighborhoods):
match = index_rtree.intersection((location[0], location[1], location[0], location[1]))
for a in match:
if any(map(lambda x: x.contains_point(location), neighborhoods[a][1])):
return a
return -1
def readNeighborhood(shapeFilename, index, neighborhoods):
sf = shapefile.Reader(shapeFilename)
for sr in sf.shapeRecords():
if sr.record[1] not in ['New York', 'Kings', 'Queens', 'Bronx']: continue
paths = map(Path, numpy.split(sr.shape.points, sr.shape.parts[1:]))
bbox = paths[0].get_extents()
map(bbox.update_from_path, paths[1:])
index.insert(len(neighborhoods), list(bbox.get_points()[0])+list(bbox.get_points()[1]))
neighborhoods.append((sr.record[3], paths))
neighborhoods.append(('UNKNOWN', None))
def parseInput():
for line in sys.stdin:
line = line.strip('\n')
values = line.split(',')
if len(values)>1 and values[0]!='medallion':
yield values
def geocode(longitude,latitude,index_rtree,neighborhoods):
if not latitude or not longitude:
#print("Error reading longitude/latitude")
return -1
#convert to projected
inProj = Proj(init='epsg:4326')
outProj = Proj(init='epsg:26918')
outx,outy = transform(inProj,outProj,longitude,latitude)
pickup_location = (outx,outy)
resultMap = findNeighborhood(pickup_location, index_rtree, neighborhoods)
if resultMap!=-1:
zipcode_result = neighborhoods[resultMap][0]
return zipcode_result
else:
#print("Unable to convert lat-lon: %f %f"%(float(latitude),float(longitude)))
return -1
def main():
index_rtree = rtree.Index()
neighborhoods = []
agg = {}
readNeighborhood('PostalBoundary.shp', index_rtree, neighborhoods)
for values in parseInput():
try:
#general trips attributes
passenger_count = values[7]
pickup_longitude = values[10]
pickup_latitude = values[11]
dropoff_longitude = values[12]
dropoff_latitude = values[13]
tip_amount = values[18]
total_amount = values[20]
paymentType = data[14]
tip_percentage = ((float(tip_amount)/float(total_amount))*100)
pickup_location = (float(pickup_longitude), float(pickup_latitude))
dropoff_location = (float(dropoff_longitude), float(dropoff_latitude))
pickup_zipcode = geocode(pickup_location[0], pickup_location[1],index_rtree,neighborhoods)
dropoff_zipcode = geocode(dropoff_location[0], dropoff_location[1],index_rtree,neighborhoods)
if (pickup_zipcode!=-1) and (dropoff_zipcode!=-1):
print '%s\t%s,%s,%s,%s,%s' % (pickup_zipcode+'^'+dropoff_zipcode,passenger_count,tip_percentage, pickup_zipcode,dropoff_zipcode)
except:
pass
if __name__ == '__main__':
main()
| {
"content_hash": "c24b2d1a2b4c4510fd2126cd7ff8fc0b",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 144,
"avg_line_length": 37.34117647058824,
"alnum_prop": 0.6364209199747952,
"repo_name": "vzmehta/BigData2016",
"id": "7c3c96eedbe12ed8ea3c5a79449d956bb70e8a4c",
"size": "3198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/mapper_zip2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1885"
},
{
"name": "Python",
"bytes": "115393"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
} |
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity the recognised entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.ID = data['peopleRecognitionId']
entity.name = 'person'
entity.category = 'person'
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
if 'peopleEmotion' in data and data['peopleEmotion'] is not None:
entity.face.emotion = data['peopleEmotion'].encode('ascii', 'ignore')
entity.face.emotionProbability = data['peopleEmotionAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
| {
"content_hash": "e888bb8f57112329260986f781f5400d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 107,
"avg_line_length": 31.310344827586206,
"alnum_prop": 0.6016886930983847,
"repo_name": "WalkingMachine/sara_behaviors",
"id": "4795ac4e9438f8d7c3a71e9b44be1f83a6716561",
"size": "2763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara_flexbe_states/src/sara_flexbe_states/WonderlandGetPersonByRecognitionId.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "6456"
},
{
"name": "CMake",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "905600"
},
{
"name": "Shell",
"bytes": "2661"
}
],
"symlink_target": ""
} |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.utils import timezone
import uuid
class OnlineSurvey(models.Model):
"""
OnlineSurvey, providing a reference to customizable online surveys (e.g. SurveyMonkey)
"""
WELL = 'w'
REGISTRY = 'r'
SEARCH = 's'
SURVEY_PAGE_CHOICES = (
(WELL, "well"),
(REGISTRY, "registry"),
(SEARCH, "search")
)
survey_guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
survey_introduction_text = models.TextField(verbose_name="Introduction Text", max_length=200, blank=True, null=True)
survey_link = models.URLField(verbose_name="Link", blank=True, null=True)
survey_enabled = models.BooleanField(verbose_name="Enabled", blank=False, null=False, default=False)
survey_page = models.CharField(verbose_name="Page", choices=SURVEY_PAGE_CHOICES, max_length=1, default=WELL)
effective_date = models.DateField(default=timezone.now,blank=False, null=False)
expiry_date = models.DateField(blank=True, null=True)
def __str__(self):
return '{}: {} | {} | {}'.format(self.survey_introduction_text, self.survey_link, self.survey_enabled, self.survey_page)
class Meta:
db_table = 'online_survey'
ordering = ['effective_date']
"""
def __str__(self):
return self.description
return '{} {} {} {} {}'.format(self.survey_guid, self.survey_introduction_text, self.survey_link, self.survey_enabled, self.survey_page)
"""
| {
"content_hash": "478c97315d93b74c70bf5165234daf12",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 144,
"avg_line_length": 38.22222222222222,
"alnum_prop": 0.6841085271317829,
"repo_name": "rstens/gwells",
"id": "397e70bd86a20b63b205b2d60d99c9a99c2608b6",
"size": "2064",
"binary": false,
"copies": "1",
"ref": "refs/heads/developer",
"path": "gwells/models/OnlineSurvey.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1707"
},
{
"name": "CSS",
"bytes": "71007"
},
{
"name": "Groovy",
"bytes": "91669"
},
{
"name": "HTML",
"bytes": "136711"
},
{
"name": "JavaScript",
"bytes": "193917"
},
{
"name": "PLpgSQL",
"bytes": "49465"
},
{
"name": "Python",
"bytes": "481602"
},
{
"name": "Shell",
"bytes": "52420"
},
{
"name": "TSQL",
"bytes": "3727"
},
{
"name": "Vue",
"bytes": "65417"
}
],
"symlink_target": ""
} |
import os
import sys
import logging
import optparse
import subprocess
class PackageCreator:
def __init__(self):
self.opts = None
logging.basicConfig(level=logging.INFO)
self.parse_args()
def _execCommand(self, cmd_string) :
logging.info('Issuing command %s' % cmd_string)
command = cmd_string.split()
return subprocess.check_output(command)
def parse_args(self) :
parser = optparse.OptionParser()
parser.add_option("--version", \
help="Version number for gram deb release", \
default=None, dest="version")
parser.add_option("--output_directory", \
help="Output directory for deb files", \
default="/tmp", dest="output_directory")
parser.add_option("--gcf_root", \
help="Location of local GCF root", \
default="/opt/gcf-2.2", dest="gcf_root")
parser.add_option("--mon_root", \
help="Location of Monitoring code", \
default="/opt/ops-monitoring", dest="mon_root")
parser.add_option("--is_update", \
help="Use this option to create an update package rather than the full package", \
default=False, dest="is_update")
parser.add_option("--gram_root", \
help="Root of the GRAM source tree", \
default=os.environ['HOME'], dest="gram_root")
[self.opts, args] = parser.parse_args()
# Change the version in the DEBIAN_*/control files
# Create the two .deb files
def run(self):
if self.opts.version is None:
print "Version must be set"
sys.exit(0)
# Check if it's an update packager
if self.opts.is_update:
template = "python createupdatedpkg.py --gcf_root=%s --version=%s --gram_root=%s --deb_filename=%s/gram_%s.deb"
cmd = template % (self.opts.gcf_root,self.opts.version,self.opts.gram_root,self.opts.output_directory, \
"update")
self._execCommand(cmd)
return
# Generate the two deb files
template = "python createdpkg.py --compute_node=%s --gcf_root=%s --mon_root=%s --deb_filename=%s/gram_%s.deb --version=%s --gram_root=%s"
control_command = template % \
("False", self.opts.gcf_root, self.opts.mon_root, self.opts.output_directory, \
"control", self.opts.version, self.opts.gram_root)
compute_command = template % \
("True", self.opts.gcf_root, self.opts.mon_root, self.opts.output_directory, \
"compute", self.opts.version, self.opts.gram_root)
self._execCommand(control_command)
self._execCommand(compute_command)
if __name__ == "__main__":
PackageCreator().run()
| {
"content_hash": "551ab5804af71e00cc6698564256dcea",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 145,
"avg_line_length": 40.67567567567568,
"alnum_prop": 0.5441860465116279,
"repo_name": "GENI-NSF/gram",
"id": "874aacbbd66ff25c2d6a94af6ef80ce16790c4c0",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grizzly/pkg/create_gram_packages.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15878"
},
{
"name": "Makefile",
"bytes": "440"
},
{
"name": "Python",
"bytes": "1625664"
},
{
"name": "Shell",
"bytes": "16869"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from geocoder.base import OneResult, MultipleResultsQuery
from geocoder.location import Location
class IpinfoResult(OneResult):
@property
def lat(self):
loc = self.raw.get('loc')
if loc:
return Location(loc).lat
@property
def lng(self):
loc = self.raw.get('loc')
if loc:
return Location(loc).lng
@property
def address(self):
if self.city:
return u'{0}, {1}, {2}'.format(self.city, self.state, self.country)
elif self.state:
return u'{0}, {1}'.format(self.state, self.country)
elif self.country:
return u'{0}'.format(self.country)
else:
return u''
@property
def postal(self):
return self.raw.get('postal')
@property
def city(self):
return self.raw.get('city')
@property
def state(self):
return self.raw.get('region')
@property
def country(self):
return self.raw.get('country')
@property
def hostname(self):
return self.raw.get('hostname')
@property
def ip(self):
return self.raw.get('ip')
@property
def org(self):
return self.raw.get('org')
class IpinfoQuery(MultipleResultsQuery):
"""
API Reference
-------------
https://ipinfo.io
"""
provider = 'ipinfo'
method = 'geocode'
_URL = 'http://ipinfo.io/json'
_RESULT_CLASS = IpinfoResult
_KEY_MANDATORY = False
def _before_initialize(self, location, **kwargs):
if location.lower() == 'me' or location == '':
self.url = 'http://ipinfo.io/json'
else:
self.url = 'http://ipinfo.io/{0}/json'.format(self.location)
def _adapt_results(self, json_response):
return [json_response]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = IpinfoQuery('8.8.8.8')
g.debug()
| {
"content_hash": "30cafcd52438b715d23a29a85c33083e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 22.235955056179776,
"alnum_prop": 0.5709954522486104,
"repo_name": "DenisCarriere/geocoder",
"id": "3cc5095502862a76fb2aca8ad9a73ce7613bf0e4",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/ipinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "415"
},
{
"name": "Python",
"bytes": "283948"
}
],
"symlink_target": ""
} |
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QObject
from math import floor
from queue import Queue
import qtawesome as qta
from config import config
class StatusButton(QWidget):
def __init__(self, start_m, pause_m, parent=None):
QWidget.__init__(self, parent=parent)
self._lyt = QHBoxLayout()
self._lyt.setSpacing(0)
self._lyt.setContentsMargins(0, 0, 0, 0)
# Signals: 0: start signal, pause signal
self._sigs = [start_m, pause_m]
# fa.play fa.stop fa.pause fa.spinner
self._st_loading()
self._btn = QPushButton(self._icon, self._text, parent=self)
self._btn.setStyleSheet('background-color: {};'.format(self._clr))
self._btn.clicked.connect(self._sigs[1])
# Status 0: connected, 1: loading, 2: paused
self._status = 1
self._lyt.addWidget(self._btn)
self.setLayout(self._lyt)
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, name):
if not name:
return None
if name not in ['play', 'stop', 'pause', 'spinner']:
return None
self._icon = qta.icon('fa.{}'.format(name))
@property
def text(self):
return self._text
@text.setter
def icon(self, text):
if not text:
return None
self._text = text
@property
def status(self):
return self._status
@status.setter
def status(self, status):
# print('Status {}'.format(status))
if not self.sigs:
print('Can\'t change status before signals will be assigned.')
return None
if status not in range(3):
return None
self._status = status
# print('Chaging status to: {}.'.format(self._status))
if status == 0:
self._st_running()
elif status == 1:
self._st_loading()
elif status == 2:
self._st_paused()
else:
pass
self._repaint()
def _st_running(self):
self._text = 'Pause'
self._icon = qta.icon('fa.pause')
self._clr = '#DB9292'
self._btn.disconnect()
self._btn.clicked.connect(self._sigs[1])
def _st_loading(self):
self._text = 'Loading'
self._icon = qta.icon('fa.spinner')
self._clr = '#D8D8D8'
def _st_paused(self):
self._text = 'Run'
self._icon = qta.icon('fa.play')
self._clr = '#0F822C'
self._btn.disconnect()
self._btn.clicked.connect(self._sigs[0])
@property
def sigs(self):
return self._sigs
@sigs.setter
def sigs(self, signals):
if not signals:
return None
self._sigs = signals
def _repaint(self):
self._btn.setStyleSheet('background-color: {};'.format(self._clr))
self._btn.setText(self._text)
self._btn.setIcon(self._icon)
self.update()
| {
"content_hash": "0da8595c08d079dfd66993340038c7a8",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 74,
"avg_line_length": 26.032,
"alnum_prop": 0.5586969883220652,
"repo_name": "alberand/PySM",
"id": "190ec25ef9e6ea9303a144d54ff797d27e3f8ce8",
"size": "3254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "status_button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31742"
}
],
"symlink_target": ""
} |
"""
WSGI config for twote project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twote.settings")
application = get_wsgi_application()
| {
"content_hash": "03bc748839365a300789282d0a242391",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.1875,
"alnum_prop": 0.7674418604651163,
"repo_name": "totalgood/twote",
"id": "94a5e6cc2da6fcda0d738030b4e36d3ccbb3df0b",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twote/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92192"
},
{
"name": "Shell",
"bytes": "1293"
}
],
"symlink_target": ""
} |
from readStockData import readClosingPrice
from readStockData import readVolume
from dataAnalysis import calCorrelation
s = readClosingPrice('ibm.xls')
v = readVolume('ibm.xls')
calCorrelation(s,v)
| {
"content_hash": "fe08d86ec8914eec5c38741907ab2b8e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 25,
"alnum_prop": 0.82,
"repo_name": "cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price",
"id": "1aaa7d3e8287db2d8a27f19386b58cadbe2b91ed",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/14 ibm/ibmAnalyze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "647999"
}
],
"symlink_target": ""
} |
import errno
from collections import deque
from _multiprocessing import Connection
from satori.objects import Argument
from .protocol import Attach, Detach, Disconnect, Map, Unmap, Send, Receive, KeepAlive, ProtocolError
class Slave2(object):
@Argument('connection', type=Connection)
def __init__(self, connection):
self.connection = connection
self.queue_clients = dict()
self.clients = set()
self.added_clients = deque()
self.removed_clients = deque()
self.terminated = False
def terminate(self):
self.terminated = True
def attach(self, client, queue):
if queue not in self.queue_clients:
self.connection.send(Attach(queue))
self.connection.recv()
self.queue_clients[queue] = deque()
self.queue_clients[queue].append(client)
def detach(self, client, queue):
if queue in self.queue_clients:
self.queue_clients[queue].remove(client)
if not self.queue_clients[queue]:
self.connection.send(Detach(queue))
self.connection.recv()
del self.queue_clients[queue]
def map(self, criteria, queue):
self.connection.send(Map(criteria, queue))
return self.connection.recv()
def unmap(self, mapping):
self.connection.send(Unmap(mapping))
self.connection.recv()
def send(self, event):
self.connection.send(Send(event))
self.connection.recv()
def keep_alive(self):
self.connection.send(KeepAlive())
self.connection.recv()
def disconnect(self):
self.connection.send(Disconnect())
def add_client(self, client):
self.added_clients.append(client)
def remove_client(self, client):
self.removed_clients.append(client)
def run(self):
try:
while not self.terminated:
while self.removed_clients:
client = self.removed_clients.popleft()
for queue in set(self.queue_clients):
if client in self.queue_clients[queue]:
self.detach(client, queue)
client.deinit()
self.clients.remove(client)
while self.added_clients:
client = self.added_clients.popleft()
self.clients.add(client)
client.slave = self
client.init()
if not self.clients:
break
self.connection.send(Receive())
try:
(queue, event) = self.connection.recv()
except IOError as e:
if e[0] == errno.EINTR:
break
else:
raise
if queue in self.queue_clients:
client = self.queue_clients[queue].popleft()
client.handle_event(queue, event)
self.queue_clients[queue].append(client)
finally:
# not deinitializing remaining clients, only disconnecting from queues
for client in self.clients:
for queue in set(self.queue_clients):
if client in self.queue_clients[queue]:
self.detach(client, queue)
self.clients.clear()
self.disconnect()
| {
"content_hash": "94d60e36a67efc44f752b7f41a240b67",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 33.30769230769231,
"alnum_prop": 0.5502309468822171,
"repo_name": "zielmicha/satori",
"id": "f50fcfad1db38e459dc2eac8826a2a409990d6c6",
"size": "3497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.events/satori/events/slave2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
import time
import os
import io
import traceback
from PIL import Image
import threading
try:
from picamera.array import PiRGBArray
from picamera import PiCamera
except:
pass
import logging
import settings
from task_common import TaskBase
from image_lib import overlay_pil_image_pi, watermark_image
from fb import *
class StillFrameTask(TaskBase):
'''
Salvataggio della foto
'''
STILL_FRAME_SECONDS = 4
still_frame = None
start_time = None
_overlay = None
def __init__(self, ctx):
TaskBase.__init__(self, ctx)
self._is_completed = False
def execute(self):
if self.still_frame is None:
stream = io.BytesIO()
self.device_ctx.camera.capture(
stream, use_video_port=True, format='jpeg')
try:
self.still_frame = Image.open(stream)
self._overlay = overlay_pil_image_pi(
self.device_ctx.camera, self.still_frame)
self.device_ctx.custom_data["STILL_IMAGE"] = self.still_frame
except:
logging.error(traceback.format_exc())
if self.start_time is None:
self.start_time = time.time()
diff_time = int(round(time.time() - self.start_time))
if diff_time >= self.STILL_FRAME_SECONDS:
if self._overlay is not None:
self.device_ctx.camera.remove_overlay(self._overlay)
self._is_completed = True
def is_completed(self):
return self._is_completed
| {
"content_hash": "cd52e1ffd6c73b27fd8e43245defab8f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 24.967741935483872,
"alnum_prop": 0.6046511627906976,
"repo_name": "guglielmino/selfie-o-matic",
"id": "1df17af16a9af42bc58f1bff7882cf46461df968",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/task_stillframe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27366"
}
],
"symlink_target": ""
} |
import random
from collections import namedtuple
from os import path, makedirs
import zipfile
from tqdm import tqdm
from embeddings.embedding import Embedding
class ElmoEmbedding(Embedding):
"""
Reference: https://allennlp.org/elmo
"""
settings = {
'weights': 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5',
'options': 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
}
def __init__(self):
from allennlp.modules.elmo import _ElmoCharacterEncoder
if not path.isdir(self.path('elmo')):
makedirs(self.path('elmo'))
self.fweights = self.ensure_file(path.join('elmo', 'weights.hdf5'), url=self.settings['weights'])
self.foptions = self.ensure_file(path.join('elmo', 'options.json'), url=self.settings['options'])
self.embeddings = _ElmoCharacterEncoder(self.foptions, self.fweights)
def emb(self, word, default=None):
from allennlp.modules.elmo import batch_to_ids
idx = batch_to_ids([[word]])
emb = self.embeddings(idx)['token_embedding']
return emb[0, 1].tolist()
if __name__ == '__main__':
from time import time
emb = ElmoEmbedding()
for w in ['canada', 'vancouver', 'toronto']:
start = time()
print('embedding {}'.format(w))
print('size {}'.format(len(emb.emb(w))))
print('took {}s'.format(time() - start))
| {
"content_hash": "a52b02063a102f6d601228f961358b48",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 163,
"avg_line_length": 37.595238095238095,
"alnum_prop": 0.6548448385053831,
"repo_name": "vzhong/embeddings",
"id": "2928b66cb05ea3c98744da443f0911e136264b88",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "embeddings/elmo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "23065"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
import sys as _sys
import numpy as _np
import scipy as _sp
from scipy.stats import chi2 as _chi2
import warnings as _warnings
import time as _time
import itertools as _itertools
from functools import reduce as _reduce
from functools import lru_cache as _lru_cache
try:
from ...tools import fastcalc as _fastcalc
except:
_fastcalc = None
try:
import cvxpy as _cp
except ImportError:
_cp = None
REBUILD = True
REVERT_MSG_THRESHOLD = 10.0 # larger values = fewer messages
MAX_RESIDUAL_TVD_REDUCTION_PER_ITER = 0.3
OBJ_CHK_TOL = 1e-6 # tolerance used to check that objective fn decreases when it should
ZERO_RTVD_THRESHOLD = 1e-5 # threshold for considering a residualTVD == 0 (and not needing to compute higher weights)
# Make numpy raise exceptions on wrong input, rather than just warnings
# Useful for correctly handling logs of negative numbers
#_np.seterr(invalid='raise', divide='raise') # don't do this globally in pyGSTi - use only for debugging!
# The "zero" used in positive-probability constraints. Cannot be exactly 0
# because this causes problems when evaluating the log inside convex solver
# objective functions.
CONSTRAINT_ZERO = 0.0 # 5e-10
default_cvxpy_solver_args = {
"all": dict(warm_start=True),
"SCS": dict(eps=2e-6, max_iters=1000),
"kicked_SCS": dict(eps=1e-7, max_iters=10000)
}
# ------------------------------------------------------------------------------
# Utility functions
# ------------------------------------------------------------------------------
def default_cvxpy_args(solver):
addl_args = default_cvxpy_solver_args['all'].copy()
addl_args.update(default_cvxpy_solver_args.get(solver, {}))
return addl_args
def remove_kicked(s):
if s.startswith("kicked_"):
return s[len("kicked_"):]
return s
def print_revert_msg(formatted_str, tup, verbosity):
greater, lesser = tup
if verbosity > 0 and (greater - lesser) / (greater + lesser + 1e-6) > REVERT_MSG_THRESHOLD:
print("REVERTING: " + (formatted_str % tup))
# ------------------------------------------------------------------------------
# Parameterizing weight-k stochastic matrices: utility functions
# ------------------------------------------------------------------------------
def unit_vector(a, b):
"""Returns the unit vector of length 'b' with the 'a'th element = 1"""
tmp = _np.zeros(b)
tmp[a] = 1
return tmp
def matrix_units(dim):
""" Returns a list of all matrix units of dimension `dim` """
return [_np.reshape(unit_vector(a, dim**2), (dim, dim)) for a in range(dim**2)]
def multikron(a):
""" Kronecker product of all the elements of `a` """
return _reduce(_np.kron, a)
# These are useful for making arbitrary matrices and then sticking in the right number of identities:
def interior_tensor_product(mx, dim_a, dim_b, e=None):
"""
`mx` is an operator on two subsystems of dimension dim_a and dim_b
`mx = sum_i A_i \otimes B_i` where A_i is an operator on subsystem a and B_i is an operator on subsystem b
Return: sum_i A_i \otimes e \otimes B_i
"""
assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), "Dimensions do not agree with matrix size"
assert _np.shape(e)[0] == _np.shape(e)[1], "e should be a square matrix"
basis_a = matrix_units(dim_a)
basis_b = matrix_units(dim_b)
return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])
for unit_a in basis_a for unit_b in basis_b))
def swell_slow(mx, which_bits, n_bits=4):
# M a transition matrix on bits b1..bn
# Return a transition matrix on all bits
assert all([bit < n_bits for bit in which_bits]), "You've specified bits not in the register"
which_bits = _np.array(which_bits)
if set(which_bits) == set(_np.arange(n_bits)):
return mx
for ind in range(n_bits):
if ind in which_bits:
continue
else:
dim_before = 2**(sum(which_bits < ind))
dim_after = 2**(sum(which_bits > ind))
mx = interior_tensor_product(mx, dim_before, dim_after, _np.eye(2))
which_bits = _np.sort(_np.append(which_bits, ind))
return swell_slow(mx, which_bits, n_bits)
def swell(mx, which_bits, n_bits=4):
# M a transition matrix on bits b1..bn
# Return a transition matrix on all bits
assert all([bit < n_bits for bit in which_bits]), "You've specified bits not in the register"
which_bits = _np.array(which_bits)
if set(which_bits) == set(_np.arange(n_bits)):
return mx
# *** Below is a special case of construction found in DMOpRep_Embedded.__cinit__ ***
# (where each sector/component has dimension 2 - a classical bit)
action_inds = which_bits # the indices that correspond to mx indices
numBasisEls = _np.array([2] * n_bits, _np.int64)
# numBasisEls_noop_blankaction is just numBasisEls with actionInds == 1
numBasisEls_noop_blankaction = numBasisEls.copy()
numBasisEls_noop_blankaction[action_inds] = 1
# multipliers to go from per-label indices to tensor-product-block index
# e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
multipliers = _np.array(_np.flipud(_np.cumprod([1] + [2] * (n_bits - 1))), _np.int64)
# noop_incrementers[i] specifies how much the overall vector index
# is incremented when the i-th "component" digit is advanced
dec = 0
noop_incrementers = _np.empty(n_bits, _np.int64)
for i in range(n_bits - 1, -1, -1):
noop_incrementers[i] = multipliers[i] - dec
dec += (numBasisEls_noop_blankaction[i] - 1) * multipliers[i]
# self.baseinds specifies the contribution from the "active
# component" digits to the overall vector index.
baseinds = _np.empty(2**len(action_inds), _np.int64)
basisInds_action = [[0, 1]] * len(action_inds)
for ii, op_b in enumerate(_itertools.product(*basisInds_action)):
vec_index = 0
for j, bInd in zip(action_inds, op_b):
vec_index += multipliers[j] * bInd
baseinds[ii] = vec_index
ret = _np.zeros((2**n_bits, 2**n_bits), 'd') # final "swelled" matrix
mx = _np.ascontiguousarray(mx)
ret = _np.ascontiguousarray(ret)
_fastcalc.fast_add_embeded(mx, ret, noop_incrementers, numBasisEls_noop_blankaction, baseinds)
#CHECK DEBUG
#check = swell_slow(mx, which_bits, n_bits)
#assert(_np.allclose(check, ret))
return ret
# ------------------------------------------------------------------------------
# Functions to handle parameter counting for stochastic matrices
# ------------------------------------------------------------------------------
def n_matrices_per_weight(weight, n_bits):
""" The number of submatrices there are for `weight` """
return int(_sp.special.binom(n_bits, weight))
def n_parameters_per_matrix(weight, n_bits):
""" The number of parameters needed to define a weight-w transition submatrix on `n_bits`"""
return 2**weight * (2**weight - 1)
def n_parameters(weight, n_bits):
""" The number of parameters needed to define a complete weight-w transition matrix"""
n_w = n_parameters_per_matrix(weight, n_bits)
# Number of ways to pick weight bits out of n_bits
n_a = n_matrices_per_weight(weight, n_bits)
return n_w * n_a
def transition_matrix(v, dimension):
"""
Produce a transition matrix of a given dimension given a parameter vector v.
The only enforced constraint here is that the columns sum to 1
"""
assert len(v) == dimension * (dimension - 1), f"Parameter vector must have length {dimension*(dimension-1)}."
for ind in range(dimension):
v = _np.insert(v, dimension * ind + ind, 1 - sum(v[dimension * ind:dimension * (ind + 1) - 1]))
return _np.reshape(v, (dimension, dimension)).T
def comprehensive_transition_matrix(v, weight, n_bits):
""" Build a generic weight-n transition_matrix """
assert len(v) == n_parameters(weight, n_bits), "v is the wrong dimension"
n_w = n_parameters_per_matrix(weight, n_bits)
n_a = n_matrices_per_weight(weight, n_bits)
vs = _np.reshape(v, (n_a, n_w))
pairs = list(_itertools.combinations(_np.arange(n_bits), weight))
ctm = sum((swell(transition_matrix(v, 2**weight), pair, n_bits)
for v, pair in zip(vs, pairs))) / n_a
return ctm
def nlogp(n, p):
"""n*log(p) such that if n == 0 the product is 0 too"""
return 0 if n == 0 else n * _np.log(max(p, 1e-8))
def log_likelihood(data, probs):
""" Compute log likelihood of a probability distribution over bitstrings given data """
# Assume data is given as counts
return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])
def max_log_likelihood(data):
""" Compute log likelihood of a probability distribution over bitstrings given data """
# Assume data is given as counts
tot = sum(data)
return _np.sum([nlogp(n, n / tot) for n in data if n > 0])
@_lru_cache(maxsize=100)
def _build_basis_slow(weight, n_bits):
"""
Build a basis of matrices for constructing the transition matrix
T = I + sum_i a_i G_i
also builds the constraint matrix, C:
C . a <= 1
"""
_warnings.warn(("You're using a slow version of the basis-building code used by the disturbance calculations"
" - compile pyGSTi's C extensions to make this go faster."))
n_w = n_parameters_per_matrix(weight, n_bits)
n_a = n_matrices_per_weight(weight, n_bits)
dim = 2**n_bits
my_basis = []
my_constraints = []
# All sets of qubits of given weight on n_bits
pairs = list(_itertools.combinations(_np.arange(n_bits), weight))
for ind in range(n_w * n_a):
v = unit_vector(ind, n_w * n_a)
vs = _np.reshape(v, (n_a, n_w))
ctm = sum((swell_slow(transition_matrix(v, 2**weight), pair, n_bits)
for v, pair in zip(vs, pairs))) - n_a * _np.eye(dim)
my_basis += [ctm]
my_constraints += [-_np.diag(ctm)]
return my_basis, _np.array(my_constraints, dtype='int').T
@_lru_cache(maxsize=100)
def _build_basis_fast(weight, n_bits):
"""
Build a basis of matrices for constructing the transition matrix
T = I + sum_i a_i G_i
also builds the constraint matrix, C:
C . a <= 1
"""
n_w = n_parameters_per_matrix(weight, n_bits)
n_a = n_matrices_per_weight(weight, n_bits)
dim = 2**n_bits
my_basis = []
my_constraints = []
# All sets of qubits of given weight on n_bits
pairs = list(_itertools.combinations(_np.arange(n_bits), weight))
for ind in range(n_w * n_a):
v = unit_vector(ind, n_w * n_a)
vs = _np.reshape(v, (n_a, n_w))
ctm = sum((swell(transition_matrix(v, 2**weight), pair, n_bits)
for v, pair in zip(vs, pairs)))
ctm -= n_a * _np.eye(dim)
my_basis += [ctm]
my_constraints += [-_np.diag(ctm)]
return my_basis, _np.array(my_constraints, dtype='int').T
#Select fast version if it's available
build_basis = _build_basis_fast if (_fastcalc is not None) else _build_basis_slow
class ResidualTVD:
"""
Computes the "weight-X residual TVD": the TVD between two probability
distributions up to weight-X transformations.
This corresponds to optimizing abs(Q - T*P) where P and Q are the two
probability distributions and T is a transition matrix.
"""
def __init__(self, weight, n_bits, initial_treg_factor=1e-3, solver="SCS"):
"""
Create a ResidualTVD function object.
Parameters
----------
weight : int
The weight: all stochastic errors of this weight or below are
considered "free", i.e. contribute nothing, to this residual TVD.
n_bits : int
The number of bits (qubits).
initial_treg_factor : float, optional
The magnitude of an internal penalty factor on the off-diagonals of
the transition matrix (T), intended to eliminate unnecessarily-large
T matrices which move a large proportion of probability between
near-zero elements of both P and Q. You should only adjust this
if you know what you're doing.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
"""
self.exactly_zero = bool(weight == n_bits)
self.n_bits = n_bits
self.n = int(2**n_bits)
self.weight = weight
self.dim = n_parameters(weight, n_bits)
self.solver = solver
self.initial_treg_factor = initial_treg_factor
self.warning_msg = None
# Hold values *separate* from cvxpy variables as we sometimes need to revert
# cvxpy optimizations which actually move values in a way that gives a *worse*
# objective function.
self.t_params = _np.zeros(self.dim)
# cvxpy parameters
self.P = _cp.Parameter(shape=(self.n,), nonneg=True, value=_np.zeros(self.n))
self.Q = _cp.Parameter(shape=(self.n,), nonneg=True, value=_np.zeros(self.n))
if weight == 0: return # special case; nothing more needed
# Initialze a regularization factor to keep the optimizer from putting large elements
# in T that move weight between near-zero elements of both p and q. We might need
# to adjust this later, so make it a parameter.
self.Treg_factor = _cp.Parameter(nonneg=True, value=self.initial_treg_factor)
# Build the basis and the constrain matrix - the basis used to construct the T vector
self.t_basis, self.cons = build_basis(self.weight, self.n_bits)
self._build_problem()
def build_transfer_mx(self, t_params=None, apply_abs=True):
""" Builds transition matrix from a vector of parameters """
if t_params is None: t_params = self.t_params
tmx = _np.sum([t_params[ind] * self.t_basis[ind] for ind in range(self.dim)], axis=0) + _np.eye(self.n)
return _np.abs(tmx) if apply_abs else tmx
def _build_problem(self):
# Initialize the variables - the parameters used to define the T matrix
self.T_params = _cp.Variable(self.dim, value=self.t_params.copy())
# Constraints
# T must be stochastic, so
# column sums must be 1 <-- enforced by construction of T
# T must have no negative elements so:
# 1. Keep all the diagonal elements positive
# 2. Keep all the off-diagonal elements positive
bounds = _np.ones(self.n)
self.constraints = [self.cons @ self.T_params <= bounds,
self.T_params >= 0]
# Form objective.
self.T = _cp.sum([self.T_params[ind] * self.t_basis[ind] for ind in range(self.dim)]) + _np.eye(2**self.n_bits)
self.resid_tvd = _cp.sum(_cp.abs(self.Q - self.T @ self.P)) / 2
self.obj = _cp.Minimize(self.resid_tvd + self.Treg_factor * _cp.norm(self.T_params, 1))
# Form the problem.
self.prob = _cp.Problem(self.obj, self.constraints)
def _rebuild_problem(self):
# Set variable values
self.T_params.value[:] = self.t_params.copy()
def _obj(self, t_params): # objective function for sanity checking cvxpy
p = self.P.value
q = self.Q.value
tmx = self.build_transfer_mx(t_params)
return _np.sum(_np.abs(q - _np.dot(tmx, p))) / 2
def __call__(self, p, q, verbosity=1, warn=True):
"""
Compute the residual TVD.
Parameters
----------
p, q : numpy array
The reference and test probability distributions, respectively,
given as an array of probabilities, one for each 2**n_bits bit string.
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
warn : bool, optional
Whether warning messages should be issued if problems are encountered.
Returns
-------
float
"""
if self.exactly_zero: return 0.0 # shortcut for trivial case
if self.weight == 0:
return _np.sum(_np.abs(q - p)) / 2
#Set parameter values
self.P.value[:] = p[:]
self.Q.value[:] = q[:]
treg_factor_ok = False
self.Treg_factor.value = self.initial_treg_factor
while not treg_factor_ok:
obj1 = self._obj(self.t_params)
if REBUILD:
self._rebuild_problem()
else:
self._build_problem()
self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1),
**default_cvxpy_args(self.solver))
failed = self.T.value is None # or self.resid_tvd.value is None
if not failed: # sanity check
t_chk = self.build_transfer_mx(self.T_params.value)
assert(_np.linalg.norm(_np.abs(self.T.value) - t_chk) < 1e-6)
self.warning_msg = None
if failed:
if self.solver == "SCS":
#raise ValueError("ResidualTVD: Convex optimizer failure")
for eps in [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:
if REBUILD:
self._rebuild_problem()
else:
self._build_problem()
self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), eps=eps)
failed = self.T.value is None # or self.resid_tvd.value is None
if not failed:
t_chk = self.build_transfer_mx(self.T_params.value)
assert(_np.linalg.norm(self.T.value - t_chk) < 1e-6)
if eps > 1e-4:
self.warning_msg = ("ResidualTVD: Needed to increase eps to %g."
" The resulting ResidualTVD values are less precise.") % eps
if warn: print(self.warning_msg)
break
else:
raise ValueError("ResidualTVD: Convex optimizer failure")
else:
raise ValueError("ResidualTVD: Convex optimizer failure")
#check that Treg_factor term doesn't dominate
# Update: just leave this alone, since norm-penalty doesn't get reported - TODO later
treg_factor_ok = True
# ------------------------------------------------------------------
#EXPERIMENTAL algorithms for updating Treg_factor ------------------
# ------------------------------------------------------------------
#resid_tvd = self._obj(self.T_params.value)
#if resid_tvd > 10 * self.Treg_factor.value * _np.linalg.norm(self.T_params.value, 1):
# Treg_factor_ok = True
#else:
# self.Treg_factor.value = resid_tvd / 10 # self.Treg_factor.value / 10
#obj2 = self._obj(self.T_params.value)
#if obj2 < obj1:
# Treg_factor_ok = True
#else:
# #maybe penalty term dominated - reduce norm(tparams) penalty term
# self.T_params.value[:] = self.t_params[:] #REVERT
# self.T.value[:, :] = _np.sum([self.t_params[ind] * self.t_basis[ind]
# for ind in range(self.dim)], axis=0) + _np.eye(self.n) # REVERT
# self.Treg_factor.value = self.Treg_factor.value / 10
# if self.Treg_factor.value > 1e-7:
# print("REDUCING treg factor to: ", self.Treg_factor.value)
# else:
# Treg_factor_ok = True # give up!
if self.Treg_factor.value != self.initial_treg_factor:
if verbosity > 0: print("NOTE: Treg_factor was reduced to %g." % self.Treg_factor.value)
#_warnings.warn(("Initial Treg_factor (%g) was too large, and was reduced to %g."
# " Consider reducing the initial value to avoid repeating calculations.")
# % (self.initial_treg_factor, self.Treg_factor.value))
obj2 = self._obj(self.T_params.value)
if obj2 <= obj1:
self.t_params[:] = self.T_params.value[:]
else:
print_revert_msg("ResidualTVD failed to reduce objective function (%g > %g)", (obj2, obj1), verbosity)
self.T_params.value[:] = self.t_params[:]
self.T.value[:, :] = self.build_transfer_mx(self.t_params)
return self._obj(self.t_params) # not self.obj.value b/c that has additional norm regularization
class RegularizedDeltaLikelihood:
"""
The max - log-likelihood regularized by a "fixed-transition-matrix residual TVD".
The 'alpha' parameter determines the strength of the regularizaton. The objective
function is:
(max_logL - logL) + alpha * fixed_T_residual_tvd
"""
def __init__(self, data_p, data_q, solver="SCS"):
"""
Initialize a RegularizedLikelihood function object.
Parameters
----------
data_p, data_q : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
"""
self.data_P = data_p
self.data_Q = data_q
self.solver = solver
self.warning_msg = None
self.n = len(data_p)
# Hold values *separate* from cvxpy variables as we sometimes need to revert
# cvxpy optimizations which actually move values in a way that gives a *worse*
# objective function.
self.p = _np.array(self.data_P) / _np.sum(self.data_P)
self.q = _np.array(self.data_Q) / _np.sum(self.data_Q)
# cvxpy parameters
self.T = _cp.Parameter(shape=(self.n, self.n), nonneg=True, value=_np.eye(self.n))
self.alpha = _cp.Parameter(nonneg=True, value=1.0)
self.max_logl = max_log_likelihood(data_p) + max_log_likelihood(data_q)
self._build_problem()
def _build_problem(self):
#HACK: cvxpy seems non-deterministic usin SCS, and doesn't reliably return
# the same result given the same problem unless we re-init the problem like this:
self.P = _cp.Variable(self.n, nonneg=True, value=self.p.copy())
self.Q = _cp.Variable(self.n, nonneg=True, value=self.q.copy())
self.constraints = [self.P >= CONSTRAINT_ZERO, _cp.sum(self.P) == 1,
self.Q >= CONSTRAINT_ZERO, _cp.sum(self.Q) == 1]
# Form objective.
llp = _cp.sum([num * _cp.log(prob) for num, prob in zip(self.data_P, self.P) if num > 0])
llq = _cp.sum([num * _cp.log(prob) for num, prob in zip(self.data_Q, self.Q) if num > 0])
self.log_likelihood = llp + llq
self.residual_tvd = _cp.sum(_cp.abs(self.Q - self.T @ self.P)) / 2
self.objective = _cp.Minimize((self.max_logl - self.log_likelihood) + self.alpha * self.residual_tvd)
self.prob = _cp.Problem(self.objective, self.constraints)
def _rebuild_problem(self):
# Set variable values
self.P.value[:] = self.p.copy()
self.Q.value[:] = self.q.copy()
def _obj(self, p, q): # objective function for sanity checking cvxpy
alpha = self.alpha.value # a parameter
tmx = self.T.value # a parameter
delta_logl = self.max_logl - (log_likelihood(self.data_P, p)
+ log_likelihood(self.data_Q, q))
residual_tvd = _np.sum(_np.abs(q - _np.dot(tmx, p))) / 2
return delta_logl + alpha * residual_tvd
def _delta_logl_value(self):
dlogl = self.max_logl - (log_likelihood(self.data_P, self.p)
+ log_likelihood(self.data_Q, self.q))
assert(dlogl >= 0)
return dlogl
def __call__(self, log10_alpha, tmx, verbosity=1, warn=True):
"""
Computes the regularized log-likelihood:
(max_logL - logL) + alpha * fixed_T_residual_tvd
Parameters
----------
log10_alpha : float
log10(alpha), where alpha sets the strength of the regularization.
T : numpy array
The (fixed) transition matrix used in fixed_T_residual_tvd.
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
warn : bool, optional
Whether warning messages should be issued if problems are encountered.
Returns
-------
float
"""
#Set parameter values
self.T.value = tmx
self.alpha.value = 10.0**log10_alpha
obj1 = self._obj(self.p, self.q)
if REBUILD:
self._rebuild_problem()
else:
self._build_problem()
self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), **default_cvxpy_args(self.solver))
failed = self.P.value is None or self.Q.value is None
self.warning_msg = None
if failed:
if self.solver == "SCS":
if verbosity > 0: print("RegularizedLikelihood: Convex optimizer failure")
for eps in [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:
#if verbosity > 0: print("EPS = ", eps)
if REBUILD:
self._rebuild_problem()
else:
self._build_problem()
self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), eps=eps)
failed = self.P.value is None or self.Q.value is None
if not failed:
if eps > 1e-4:
self.warning_msg = ("RegularizedLikelihood: Needed to increase eps to %g."
" The resulting ResidualTVD values are less precise.") % eps
if verbosity > 0 and warn: print(self.warning_msg)
break
else:
raise ValueError("RegularizedLikelihood: Convex optimizer failure")
else:
raise ValueError("RegularizedLikelihood: Convex optimizer failure")
obj2 = self._obj(self.P.value / sum(self.P.value), self.Q.value / sum(self.Q.value))
if obj2 <= obj1:
self.p[:] = self.P.value[:]
self.q[:] = self.Q.value[:]
self.p /= sum(self.p) # ensure sum(p) == 1 (cvxpy doesn't always obey constraints exactly)
self.q /= sum(self.q) # ensure sum(q) == 1 (cvxpy doesn't always obey constraints exactly)
else:
print_revert_msg("RegularizedLikelihood failed to reduce objective function (%g > %g)",
(obj2, obj1), verbosity)
self.P.value[:] = self.p[:]
self.Q.value[:] = self.q[:]
# Note: we just return the logl value, not the regularized
# objective value (self.objective.value)
return self._delta_logl_value()
class ProfileLikelihood:
"""
The profile likelihood obtained by maximizing the likelihood on level-sets of
constant weight-X residual-TVD.
ProfileLikelihood(residual_TVD) values are evaluated by optimizing the function:
alpha*ResidualTVD(p,q;weight) - log(Likelihood(p,q;data_ref,data_test))
for a fixed value of alpha, yielding a single (residual_TVD, ProfileLikelihood) point.
The optimization is implemented as an alternating minimization between
optimize-T (ResidualTVD) and optimize-(P,Q) (RegularizedLikelihood) steps.
"""
def __init__(self, weight, n_bits, data_ref, data_test, solver="SCS"):
"""
Create a ProfileLikelihood function object.
Parameters
----------
weight : int
The weight: all stochastic errors of this weight or below are
considered "free", i.e. contribute nothing, to the residual TVD.
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
"""
self.weight = weight
self.n_bits = n_bits
self.data_ref = data_ref
self.data_test = data_test
self.solver = solver
# Initialize the two solvers
self.residual_tvd = ResidualTVD(weight, n_bits, solver=solver)
self.reg_likelihood = RegularizedDeltaLikelihood(data_ref, data_test, solver=solver)
# Initialize self.p, self.q, and self.T
self._init_starting_values()
# Store the log-likelihood with *no* regularization (alpha=0)
# in case this is useful (this only depends on the data)
self.max_logl = max_log_likelihood(data_ref) + max_log_likelihood(data_test)
def _init_starting_values(self):
# Initialize p, q, and T to a standard set of initial
# values before beginning an alternating-minimization.
# Initialize p and q to their ML estimates
self.p = _np.array(self.data_ref) / _np.sum(self.data_ref)
self.q = _np.array(self.data_test) / _np.sum(self.data_test)
# Initialize T for the ML estimates of P and Q
self.t_params = _np.zeros(self.residual_tvd.dim)
#Sync values in contained objectives
self.residual_tvd.P.value[:] = self.p[:]
self.residual_tvd.Q.value[:] = self.q[:]
self.residual_tvd.t_params[:] = self.t_params[:]
self.reg_likelihood.p[:] = self.p[:]
self.reg_likelihood.q[:] = self.q[:]
self.reg_likelihood.T.value[:, :] = self.residual_tvd.build_transfer_mx(self.t_params)
def _obj(self, log10_alpha, p=None, q=None, tmx=None): # for debugging
if p is None: p = self.p
if q is None: q = self.q
if tmx is None: tmx = self.residual_tvd.build_transfer_mx(self.t_params)
logl = (log_likelihood(self.data_ref, p)
+ log_likelihood(self.data_test, q)) - self.max_logl
residual_tvd = _np.sum(_np.abs(q - _np.dot(tmx, p))) / 2
return 10**log10_alpha * residual_tvd - logl
def _iterate(self, log10_alpha, verbosity, warn):
# Minimize over p and q
tmx_raw = self.residual_tvd.build_transfer_mx(self.t_params, apply_abs=False)
tmx = self.residual_tvd.build_transfer_mx(self.t_params)
obj1 = self._obj(log10_alpha) # ; print("obj1 = ",obj1)
delta_logl = self.reg_likelihood(log10_alpha, tmx, verbosity=verbosity, warn=warn)
self.p[:] = self.reg_likelihood.p[:]
self.q[:] = self.reg_likelihood.q[:]
obj2 = self._obj(log10_alpha) # ; print("obj2 = ",obj2)
assert(obj2 <= obj1 + OBJ_CHK_TOL)
# Minimize over T
#res_tvd_curT = _np.sum(_np.abs(self.q - _np.dot(T, self.p))) / 2 # uses "current" T
res_tvd = self.residual_tvd(self.p, self.q, verbosity=verbosity, warn=warn)
if self.weight != 0: # weight = 0 case has no T matrix
# we limit the change in p_prime = T*p:
# |T'*p - T*p| = |((1-d)*Ts + (d-1)*T)*p| <= |(1-d)(Ts-T)|*|p| = (1-d)|Ts-T|*|p|
# so, if we want delta_p_prime < eps then set (1-d) = eps / (|Ts-T|*|p|)
# where norms are the vector 1-norm and inherited matrix 1-norm
pre_res_tvd = _np.sum(_np.abs(self.q - _np.dot(tmx, self.p))) / 2 # uses "old" T
eps = max(MAX_RESIDUAL_TVD_REDUCTION_PER_ITER, 0.1 * pre_res_tvd) # only allow step of 0.1*existing tvd
tmxs = self.residual_tvd.T.value
damping = max(0, 1 - eps / max((_np.linalg.norm(_np.abs(tmxs) - tmx, ord=1)
* _np.linalg.norm(self.p, ord=1)), 1e-6))
self.t_params[:] = damping * self.t_params + (1 - damping) * self.residual_tvd.T_params.value
self.residual_tvd.t_params[:] = self.t_params[:] # back-propagate damped t_params to ResidualTVD
self.residual_tvd.T_params.value[:] = self.t_params[:] # needed?
new_tmx = self.residual_tvd.build_transfer_mx(self.t_params)
assert(_np.allclose(new_tmx, _np.abs(damping * tmx_raw + (1 - damping) * tmxs)))
new_res_tvd = _np.sum(_np.abs(self.q - _np.dot(new_tmx, self.p))) / 2
best_res_tvd = _np.sum(_np.abs(self.q - _np.dot(_np.abs(tmxs), self.p))) / 2
assert(-OBJ_CHK_TOL < pre_res_tvd - new_res_tvd < eps)
#print("DEBUG TVD: ", pre_res_tvd, new_res_tvd, best_res_tvd, res_tvd)
assert(abs(best_res_tvd - res_tvd) <= OBJ_CHK_TOL)
else:
new_res_tvd = res_tvd # no damping in weight=0 case
obj3 = self._obj(log10_alpha) # ; print("obj3 = ",obj3)
assert(obj3 <= obj2 + OBJ_CHK_TOL)
return new_res_tvd, delta_logl
def __call__(self, log10_alpha=0, maxiters=20, reltol=1e-5, abstol=1e-5, verbosity=1, warn=True):
"""
Compute an (x,y) = (residualTVD, ProfileLikelihood(residualTVD)) point
given a fixed value of alpha, by minimizing (w.r.t p and q):
alpha*ResidualTVD(p,q;weight) - log(Likelihood(p,q;data_ref,data_test))
Parameters
----------
log10_alpha : float
log10(alpha), where alpha sets the strength of the regularization.
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow
before giving up and deeming the final result "ok".
reltol : float, optional
The relative tolerance used to within the alternating minimization.
abstol : float, optional
The absolute tolerance used to within the alternating minimization.
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
warn : bool, optional
Whether warning messages should be issued if problems are encountered.
Returns
-------
residualTVD : float
ProfileLikelihood(residualTVD) : float
"""
self._init_starting_values()
last_residual_tvd = last_dlog_likelihood = -1.0e100 # a sentinel
last_obj = None
for ind in range(maxiters):
residual_tvd, delta_log_likelihood = self._iterate(log10_alpha, verbosity - 1, warn)
rel_rtvd = abs(last_residual_tvd - residual_tvd) / (abs(residual_tvd) + abstol)
rel_logl = abs(last_dlog_likelihood - delta_log_likelihood) / (abs(delta_log_likelihood) + abstol)
last_residual_tvd, last_dlog_likelihood = residual_tvd, delta_log_likelihood
obj = delta_log_likelihood + 10**(log10_alpha) * residual_tvd
if verbosity > 0:
print("Iteration %d: dlogL=%g, residualTVD=%g (rel change=%g, %g): %g" %
(ind, delta_log_likelihood, residual_tvd, rel_logl, rel_rtvd, obj))
assert(last_obj is None or obj <= last_obj), \
"Alternating minimization failed to decrease objective function!"
if (rel_logl < reltol or abs(delta_log_likelihood) < abstol) \
and (rel_rtvd < reltol or abs(residual_tvd) < abstol):
if verbosity > 0: print("Converged!")
break
else:
if verbosity > 0:
print("Maxium iterations (%d) reached before converging." % maxiters)
return residual_tvd, delta_log_likelihood
def at_logl_value(self, logl_value, maxiters=20, search_tol=0.1, reltol=1e-5, abstol=1e-5,
init_log10_alpha=3, verbosity=1):
max_logl = self.max_logl
res_tvd, delta_logl = self.at_delta_logl_value(max_logl - logl_value, maxiters,
search_tol, reltol, abstol, init_log10_alpha, verbosity)
return res_tvd, max_logl - delta_logl
def at_delta_logl_value(self, delta_logl_value, maxiters=20, search_tol=0.1, reltol=1e-5, abstol=1e-5,
init_log10_alpha=3, verbosity=1):
"""
Compute an (x,y) = (residualTVD, ProfileLikelihood(residualTVD)) point
such that ProfileLikelihood(residualTVD) is within `search_tol` of `logl_value`.
Parameters
----------
delta_logl_value : float
the target profile (max - log-likelihood) value.
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow
before giving up and deeming the final result "ok".
search_tol : float, optional
The tolerance used when testing whether an obtained profile delta-log-likelihood
value is close enough to `delta_logl_value`.
reltol : float, optional
The relative tolerance used to within the alternating minimization.
abstol : float, optional
The absolute tolerance used to within the alternating minimization.
init_log10_alpha : float, optional
The initial log10(alpha) value to use. This shouldn't matter except
that better initial values will cause the routine to run faster.
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
Returns
-------
residualTVD : float
ProfileLikelihood(residualTVD) : float
"""
log10_alpha = init_log10_alpha
left = None; left_val = None
right = None; right_val = None
bracket_is_substantial = True
res_tvd = None # in case first evaluation fails
it = 0
if verbosity > 0: print("Searching for delta logl value = %.3f +/- %.3f" % (delta_logl_value, search_tol))
while bracket_is_substantial:
res_tvd, delta_logl = self(log10_alpha, maxiters, reltol, abstol, verbosity - 1, warn=False)
if verbosity > 0:
print("Binary search (iter %d): log10(a)=%.3f in [%.3f,%.3f]"
% (it, log10_alpha, left or _np.nan, right or _np.nan),
"dlogl=%.6f resTVD=%.6f" % (delta_logl, res_tvd))
if (left_val and left_val > delta_logl) or (right_val and right_val < delta_logl):
print("WARNING: value looks suspicious! Dlogl=%s should have been in (%s, %s)!"
% (delta_logl, str(right_val), str(left_val)))
if abs(delta_logl - delta_logl_value) < search_tol:
return res_tvd, delta_logl
if res_tvd < abstol / 10.0: # small residualTVD value => increasing alpha doesn't help, we're already at 0
right = log10_alpha; right_val = delta_logl
if delta_logl > delta_logl_value:
# delta_logl too high, need less residualTVD penalty => decrease alpha
right = log10_alpha; right_val = delta_logl
else:
# delta_logl too low, need more residualTVD penalty => increase alpha
left = log10_alpha; left_val = delta_logl
if left is not None and right is not None:
if right_val - left_val > 1e-6:
gamma = (delta_logl_value - left_val) / (right_val - left_val)
#log10_alpha = _np.clip((1 - gamma) * left + gamma * right, left, right)
log10_alpha = _np.clip(_np.log10((1 - gamma) * 10**left + gamma * 10**right), left, right)
else:
log10_alpha = (left + right) / 2.0
bracket_is_substantial = (right - left) / (left + right) > 1e-6 # convergence criterion
elif left is None: # right was just updated -> decrease alpha
log10_alpha -= 1 # decrease alpha by 1 order of magnitude
else:
log10_alpha += 1
it += 1
if verbosity > 0:
if self.reg_likelihood.warning_msg: print(self.reg_likelihood.warning_msg)
if self.residual_tvd.warning_msg: print(self.residual_tvd.warning_msg)
if res_tvd > abstol and abs(delta_logl - delta_logl_value) < 4 * search_tol:
# Only make a fuss if it's 4x the given tolerance
_warnings.warn(("A binary search could not pinpoint the desired dlogL value within tolerance %g."
" (It achieved %g instead of the desired %g). This could invalidate the computed"
" error bars.") % (4 * search_tol, delta_logl, delta_logl_value))
# Otherwise we're against the "wall" where the ResidTVD==0, and
# it's likely that logl_value can't be attained (so don't warn about it).
return res_tvd, delta_logl
def at_2llr_value(self, two_llr_value, maxiters=20, search_tol=0.1,
reltol=1e-5, abstol=1e-5, init_log10_alpha=3, verbosity=1):
"""
Similar to :method:`at_delta_logl_value` except target is a 2*log-likelihood-ratio
value, i.e. 2*(max_logL - logL).
"""
# llr = max_logl - logl => delta_logl = two_llr_value/2.0
return self.at_delta_logl_value(two_llr_value / 2.0, maxiters,
search_tol, reltol, abstol, init_log10_alpha, verbosity)
def at_confidence(self, confidence_percent, maxiters=20, search_tol=0.1,
reltol=1e-5, abstol=1e-5, init_log10_alpha=3, verbosity=1):
"""
Similar to :method:`at_logl_value` except target is a given percent confidence
value, yielding a (residualTVD, ProfileLikelihood(residualTVD)) point that lies
on one end of a `confidence_percent`% confidence interval of the residualTVD.
Note that `confidence_percent` should be a number between 0 and 100, *not* 0 and 1.
"""
if confidence_percent <= 1.0:
_warnings.warn(("`confidence_percent` <= 1.0 may be a mistake - "
"this should be value between 0 and 100, not 0 and 1."))
return self.at_2llr_value(_chi2.ppf(confidence_percent / 100.0, df=1), maxiters,
search_tol, reltol, abstol, init_log10_alpha, verbosity)
class ResidualTVDWithConfidence:
"""
Residual TVD with error bars given by an assumed-symmetric confidence-region.
The residual TVD is computed using :class:`ResidualTVD`. A confidence region
is constructed by finding where the :class:`ProfileLikelihood` is reduced from
its maximum by an amount given by the desired confidence level. This locates one
side of the confidence region, and it is assumed to be symmetric.
"""
def __init__(self, weight, n_bits, data_ref, data_test, solver="SCS", initial_treg_factor=1e-3):
"""
Create a ResidualTVDWithConfidence function object.
Parameters
----------
weight : int
The weight: all stochastic errors of this weight or below are
considered "free", i.e. contribute nothing, to this residual TVD.
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
initial_treg_factor : float, optional
The magnitude of an internal penalty factor on the off-diagonals of
the T matrix (see :class:`ResidualTVD`).
"""
self.exactly_zero = bool(weight == n_bits)
self.residual_tvd = ResidualTVD(weight, n_bits, initial_treg_factor, solver=solver)
self.profile_likelihood = ProfileLikelihood(
weight, n_bits, data_ref, data_test, solver)
self.pML = _np.array(data_ref) / _np.sum(data_ref)
self.qML = _np.array(data_test) / _np.sum(data_test)
def __call__(self, confidence_percent=68.0, maxiters=20, search_tol=0.1,
reltol=1e-5, abstol=1e-5, init_log10_alpha=3, verbosity=1):
"""
Compute the ResidualTVD and its `confidence_percent`% confidence interval.
Parameters
----------
confidence_percent : float
The confidence level desired for the computed error bars. Note that this
number can range between 0 and 100, not 0 and 1.
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow within
the profile-loglikelihood computation before giving up and deeming
the final result "ok".
search_tol : float, optional
The tolerance on the log-likelihood used when trying to locate the
(residualTVD, logL) pair with logL at the edge of the confidence interval.
reltol : float, optional
The relative tolerance used to within profile likelihood.
abstol : float, optional
The absolute tolerance used to within profile likelihood.
init_log10_alpha : float, optional
The initial log10(alpha) value to use within profile likelihood
evaluations. Only change this if you know what you're doing.
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
"""
if self.exactly_zero: return 0.0, 0.0 # shortcut for trivial case
resid_tvd = self.residual_tvd(self.pML, self.qML)
# print("ResidTVD = ",resid_tvd)
resid_tvd_at_edge_of_cr, _ = self.profile_likelihood.at_confidence(
confidence_percent, maxiters, search_tol, reltol, abstol, init_log10_alpha, verbosity)
# print("ResidTVD @ CR-edge = ",resid_tvd_at_edge_of_cr)
return resid_tvd, resid_tvd - resid_tvd_at_edge_of_cr
class ProfileLikelihoodPlot:
def __init__(self, profile_likelihood, mode="auto-cr", maxiters=20,
search_tol=0.1, reltol=1e-5, abstol=1e-5, log10_alpha_values=None, num_auto_pts=10, verbosity=1):
"""
Creates a plot of the profile log-likelihood.
Parameters
----------
profile_likelihood : ProfileLikelihood
The profile likelihood to plot
mode : {"auto-cr", "auto-fullrange", "manual"}
How to decide what domain/range to plot. "auto-cr" plots the region
of the profile likelihood relevant to finding a confidence region.
"auto-fullrange" plots the entire range of log-likelihood values, from
the maximum to the amount it is reduced when the residual-TVD reaches 0.
"manual" lets the user specify the log10(alpha) values to use (given
in the `log10_alpha_values` argument).
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow before
giving up and deeming the final result "ok".
search_tol : float, optional
The tolerance on the log-likelihood used when trying to locate a (residualTVD, logL)
pair with a particular logL.
reltol : float, optional
The relative tolerance used to within profile likelihood.
abstol : float, optional
The absolute tolerance used to within profile likelihood.
log10_alpha_values : list, optional
A list of log10(alpha) values to use to determing the (x,y)=(residualTVD, logL)
points to plot when `mode == "manual"`.
num_auto_pts : int, optional
The number of points to include in the plot when `mode` is "auto-cr" or "auto-fullrange".
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
"""
# Place to dump the results
self.profile_likelihood = profile_likelihood
self.mode = mode
self.residual_tvds = []
self.log_likelihoods = []
self.ps = []
self.ts = []
self.qs = []
if mode.startswith("auto"):
assert(log10_alpha_values is None)
self._compute_pts_auto(mode, maxiters, search_tol, reltol, abstol, num_auto_pts, verbosity)
elif mode == "manual":
assert(log10_alpha_values is not None), "Must specify `log10_alpha_values` for manual mode!"
self.log10_alphas = log10_alpha_values
self._compute_pts_manual(log10_alpha_values, maxiters, reltol, abstol, verbosity)
else:
raise ValueError("Invalid mode: %s" % mode)
def _compute_pts_manual(self, log10_alpha_values, maxiters,
reltol, abstol, verbosity):
for log10_alpha in log10_alpha_values:
residual_tvd, log_likelihood = self.profile_likelihood(
log10_alpha, maxiters, reltol, abstol, verbosity)
self.residual_tvds += [residual_tvd]
self.log_likelihoods += [log_likelihood]
self.ps += [self.profile_likelihood.p]
self.ts += [_np.dot(self.profile_likelihood.T, self.profile_likelihood.p)]
self.qs += [self.profile_likelihood.q]
return self.residual_tvds, self.log_likelihoods
def _get_minlogl(self, search_tol, maxiters, reltol, abstol, verbosity):
large_log10_alpha = 3
min_residual_tvd = 1.0
min_logl = None # in case failure on first eval
while min_residual_tvd > search_tol:
min_residual_tvd, min_logl = self.profile_likelihood(
large_log10_alpha, maxiters, reltol, abstol, verbosity)
large_log10_alpha += 1 # increase by 3 orders of magnitude
return min_logl
def _compute_pts_auto(self, mode, maxiters, search_tol, reltol, abstol, num_pts, verbosity):
max_logl = self.profile_likelihood.max_logl
if mode == "auto-cr":
offset_to_cr_edge = _chi2.ppf(0.95, df=1) / 2.0 # delta logL to get to 95% CR edge
min_logl = max_logl - 2 * offset_to_cr_edge # range is 2x to put CR edge in middle of range.
elif mode == "auto-fullrange":
min_logl = self._get_minlogl(search_tol, maxiters, reltol, abstol, verbosity)
else:
raise ValueError("Invalid 'auto' mode: %s" % mode)
desired_logl_values = _np.linspace(min_logl, max_logl, num_pts)
for logl in desired_logl_values:
residual_tvd, log_likelihood = self.profile_likelihood.at_logl_value(
logl, maxiters, search_tol, reltol, abstol, verbosity=1)
self.residual_tvds += [residual_tvd]
self.log_likelihoods += [log_likelihood]
self.ps += [self.profile_likelihood.p]
self.ts += [_np.dot(self.profile_likelihood.residual_tvd.build_transfer_mx(), self.profile_likelihood.p)]
self.qs += [self.profile_likelihood.q]
return self.residual_tvds, self.log_likelihoods
def make_plot(self, xlim=None, ylim=None, figsize=(10, 7), title=None):
"""
Creates the plot figure using matplotlib. Arguments are familiar plot variables.
"""
from matplotlib import pyplot as plt
xs, ys = self.residual_tvds, self.log_likelihoods
plt.figure(figsize=figsize)
plt.scatter(xs, ys)
plt.title("Profile Likelihood" if (title is None) else title, fontsize=22)
plt.xlabel('Residual TVD', fontsize=16)
plt.ylabel('Log Likelihood', fontsize=16)
if xlim:
plt.xlim(xlim[0], xlim[1])
else:
plt.xlim(_np.min(xs), _np.max(xs))
if ylim:
plt.ylim(ylim[0], ylim[1])
else:
plt.ylim(_np.min(ys), _np.max(ys))
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
def compute_disturbances_with_confidence(n_bits, data_ref, data_test, confidence_percent=68.0,
max_weight=4, maxiters=20, search_tol=0.1, reltol=1e-5,
abstol=1e-5, solver="SCS", initial_treg_factor=1e-3, verbosity=1):
"""
Compute the weight-X distrubances between two data sets (including error bars).
This function is computes the weight-X disturbance, defined as the difference between
the weight-(X-1) and weight-X residual TVDs, (evaluated at the ML probability
distributions implied by the data) for all weights up to `max_weight`. It also
uses the data to compute `confidence_percent`% confidence intervals for each residualTVD
and adds these in quadrature to arrive at error bars on each weight-X disturbance.
Parameters
----------
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
confidence_percent : float or None, optional
The confidence level desired for the computed error bars. Note that this
number can range between 0 and 100, not 0 and 1. If None, then no error
bars are computed.
max_weight : int, optional
The maximum weight disturbance to compute. Typically this is the same
as `n_bits`.
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow within
the profile-loglikelihood computation before giving up and deeming
the final result "ok".
search_tol : float, optional
The tolerance on the log-likelihood used when trying to locate the
(residualTVD, logL) pair with logL at the edge of the confidence interval.
reltol : float, optional
The relative tolerance used to within profile likelihood.
abstol : float, optional
The absolute tolerance used to within profile likelihood.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
initial_treg_factor : float, optional
The magnitude of an internal penalty factor on the off-diagonals of
the T matrix (see :class:`ResidualTVD`).
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
Returns
-------
list
A list of the disturbances by weight. The lists i-th element is a
`(disturbance, errorbar_length)` tuple for the weight (i+1) disturbance.
That is, the weight (i+1) disturbance = `disturbance +/- errorbar_length`.
"""
rtvds_by_weight = compute_residual_tvds(n_bits, data_ref, data_test, confidence_percent,
max_weight, maxiters, search_tol, reltol,
abstol, solver, initial_treg_factor, verbosity)
rtvds = [value_and_errorbar[0] for value_and_errorbar in rtvds_by_weight]
errorbars = [value_and_errorbar[1] for value_and_errorbar in rtvds_by_weight]
disturbance_by_weight = []
for i in range(1, max_weight + 1):
eb = _np.sqrt(errorbars[i - 1]**2 + errorbars[i]**2) \
if (confidence_percent is not None) else None
disturbance_by_weight.append((rtvds[i - 1] - rtvds[i], eb))
return disturbance_by_weight
def compute_residual_tvds(n_bits, data_ref, data_test, confidence_percent=68.0,
max_weight=4, maxiters=20, search_tol=0.1, reltol=1e-5,
abstol=1e-5, solver="SCS", initial_treg_factor=1e-3, verbosity=1):
"""
Compute the weight-X residual TVDs between two data sets (including error bars).
Parameters
----------
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
confidence_percent : float or None, optional
The confidence level desired for the computed error bars. Note that this
number can range between 0 and 100, not 0 and 1. If None, then no error
bars are computed.
max_weight : int, optional
The maximum weight residual TVD to compute. Typically this is the same
as `n_bits`.
maxiters : int, optional
The maximum number of alternating-minimization iterations to allow within
the profile-loglikelihood computation before giving up and deeming
the final result "ok".
search_tol : float, optional
The tolerance on the log-likelihood used when trying to locate the
(residualTVD, logL) pair with logL at the edge of the confidence interval.
reltol : float, optional
The relative tolerance used to within profile likelihood.
abstol : float, optional
The absolute tolerance used to within profile likelihood.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
initial_treg_factor : float, optional
The magnitude of an internal penalty factor on the off-diagonals of
the T matrix (see :class:`ResidualTVD`).
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
Returns
-------
list
A list of the residual TVDs by weight. The lists i-th element is a
`(residual_tvd, errorbar_length)` tuple for the weight (i+1) residual TVD.
That is, the weight (i+1) residual TVD = `residual_tvd +/- errorbar_length`.
"""
residualtvd_by_weight = []
last_rtvd = None; last_errorbar = None
for weight in range(0, max_weight + 1):
t0 = _time.time()
if last_rtvd is not None and last_rtvd < ZERO_RTVD_THRESHOLD:
if verbosity > 1:
print("Approximating weight-%d residual TVD as zero (b/c weight-%d r-TVD < %g)"
% (weight, weight - 1, ZERO_RTVD_THRESHOLD))
residualtvd_by_weight.append((0.0, 0.0)) # or use previous value and error bar?
continue
if verbosity > 0:
print("Computing weight-%d residual TVD..." % weight, end='')
if confidence_percent is not None:
residual_tvd_fn = ResidualTVDWithConfidence(weight, n_bits, data_ref, data_test,
solver, initial_treg_factor)
resid_tvd, errorbar = residual_tvd_fn(confidence_percent, maxiters,
search_tol, reltol, abstol, verbosity=verbosity - 2)
else:
p_ml = _np.array(data_ref) / _np.sum(data_ref)
q_ml = _np.array(data_test) / _np.sum(data_test)
residual_tvd_fn = ResidualTVD(weight, n_bits, solver=solver)
resid_tvd = residual_tvd_fn(p_ml, q_ml, verbosity=verbosity - 2)
errorbar = None
# added a tolerance to the line below so this doesn't trigger with resid_tvd is barely above the last rtvd
if last_rtvd is not None and resid_tvd > last_rtvd + 1e-6:
#Try recomputing with kicked parameters
solver = "kicked_" + solver
kicked_args = default_cvxpy_args(solver)
if len(kicked_args) > 0:
if verbosity > 0:
print("Adjusting solver to use %s b/c residual TVD didn't decrease like it should have (%g > %g)"
% (str(kicked_args), resid_tvd, last_rtvd))
if confidence_percent is not None:
residual_tvd_fn = ResidualTVDWithConfidence(weight, n_bits, data_ref, data_test,
solver, initial_treg_factor)
resid_tvd, errorbar = residual_tvd_fn(confidence_percent, maxiters, search_tol,
reltol, abstol, verbosity=verbosity - 2)
else:
p_ml = _np.array(data_ref) / _np.sum(data_ref)
q_ml = _np.array(data_test) / _np.sum(data_test)
residual_tvd_fn = ResidualTVD(weight, n_bits, solver=solver)
resid_tvd = residual_tvd_fn(p_ml, q_ml, verbosity=verbosity - 2)
errorbar = None
else:
if verbosity > 0:
print("Warning! Residual TVD didn't decrease like it should (but no adjustments for %s)." % solver)
solver = remove_kicked(solver)
if last_rtvd is not None and resid_tvd > last_rtvd + 1e-6:
if verbosity > 0:
print(("Warning! Residual TVD *still* didn't decrease like it should have - "
"just using lower weight solution."))
resid_tvd, errorbar = last_rtvd, last_errorbar
residualtvd_by_weight.append((resid_tvd, errorbar))
last_rtvd = resid_tvd
last_errorbar = errorbar
eb_str = (" +/- %.3g" % errorbar) if (errorbar is not None) else ""
if verbosity > 0:
print(" %5.1fs\t\t%.3g%s" % (_time.time() - t0, resid_tvd, eb_str))
return residualtvd_by_weight
def resample_data(data, n_data_points=None, seed=None):
""" Sample from the ML probability distrubution of `data`."""
if seed is not None: _np.random.seed(seed)
if n_data_points is None: n_data_points = _np.sum(data)
p_ml = _np.array(data) / _np.sum(data)
resampled = _np.random.multinomial(n_data_points, p_ml)
return resampled
def compute_disturbances_bootstrap_rawdata(n_bits, data_ref, data_test, num_bootstrap_samples=20,
max_weight=4, solver="SCS", verbosity=1, seed=0,
return_resampled_data=False, add_one_to_data=True):
"""
Compute the weight-X distrubances between two data sets (including error bars).
This function is computes the weight-X disturbance, defined as the difference between
the weight-(X-1) and weight-X residual TVDs, (evaluated at the ML probability
distributions implied by the data) for all weights up to `max_weight`. It also
uses the data to compute 1-sigma error bar for each value using the boostrap method.
Parameters
----------
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
num_bootstrap_samples : int
The number of boostrap (re-)samples to use.
max_weight : int, optional
The maximum weight disturbance to compute. Typically this is the same
as `n_bits`.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
add_one_to_data : bool, optional
Sets whether the bootstrap should be calculated after adding a single fake count to every
possible outcome.
Returns
-------
disturbance_by_weight_ML : numpy.ndarray
The ML disturbances by weight (length `max_weight`)
bootstrap_disturbances_by_weight : numpy.ndarray
A (max_weight, num_bootstrap_samples) sized array of each disturbance
computed for each of the `num_bootstrap_samples` re-sampled data sets.
"""
#p_ml = _np.array(data_ref) / _np.sum(data_ref)
#q_ml = _np.array(data_test) / _np.sum(data_test)
if verbosity > 0:
print("Computing base disturbances")
dist_by_weight_ml = compute_disturbances_with_confidence(
n_bits, data_ref, data_test, None, max_weight, solver=solver, verbosity=verbosity - 1)
dist_by_weight = _np.zeros((max_weight, num_bootstrap_samples), 'd')
resampled_data = []
bootstrap_data_ref = data_ref + _np.ones(len(data_ref), dtype='int')
bootstrap_data_test = data_test + _np.ones(len(data_test), dtype='int')
for i in range(num_bootstrap_samples):
if verbosity > 0:
print("Analyzing bootstrap sample %d of %d..." % (i + 1, num_bootstrap_samples), end='')
_sys.stdout.flush(); tStart = _time.time()
redata_ref = resample_data(bootstrap_data_ref, seed=seed + i)
redata_test = resample_data(bootstrap_data_test, seed=seed + num_bootstrap_samples + i)
if return_resampled_data:
resampled_data.append((redata_ref, redata_test))
try:
disturbances = compute_disturbances_with_confidence(
n_bits, redata_ref, redata_test, None, max_weight, solver=solver, verbosity=verbosity - 2)
except Exception:
try:
if verbosity > 0: print("\nFalling back on ECOS")
disturbances = compute_disturbances_with_confidence(
n_bits, redata_ref, redata_test, None, max_weight, solver="ECOS", verbosity=verbosity - 2)
except Exception:
if verbosity > 0: print("\nFailed using %s and ECOS - reporting nans" % solver)
for w in range(max_weight):
dist_by_weight[w, i] = _np.nan
for w in range(max_weight):
dist_by_weight[w, i] = disturbances[w][0]
if verbosity > 0:
print(" (%.1fs)" % (_time.time() - tStart))
dist_ml = _np.array([dist_by_weight_ml[w][0] for w in range(max_weight)], 'd')
if return_resampled_data:
return dist_ml, dist_by_weight, resampled_data
else:
return dist_ml, dist_by_weight
def compute_disturbances_from_bootstrap_rawdata(ml_disturbances, bootstrap_disturbances,
num_bootstrap_samples='all'):
"""
Compute 1-sigma error bars for a set of disturbances (given by `ml_disturbances`)
using boostrap data.
Parameters
----------
ml_disturbances : numpy.ndarray
The disturbances by weight (length `max_weight`) for the maximum-likelhood
(ML) distribution of some set of data.
bootstrap_disturbances : numpy.ndarray
A (max_weight, num_bootstrap_samples) sized array where each column is
the set of by-weight disturbances for a distribution corresponding to a
re-sampled bootstrap data set.
num_bootstrap_samples : int or tuple or 'all'
How many bootstrap samples to use when computing the boostrap error bars.
This number can be less than the total number of bootstrap samples to test
how using fewer boostrap samples would have performed. `'all'` means to
use all available bootstrap samples. If a tuple, then each entry should be
an integer and a series of error bars is returned (instead of a single one)
corresponding to using each number of samples.
Returns
-------
list
A list of the disturbances by weight. The lists i-th element is a
`(disturbance, errorbar_length)` tuple for the weight (i+1) disturbance.
That is, the weight (i+1) disturbance = `disturbance +/- errorbar_length`.
If `num_bootstrap_samples` is a tuple, then elements are instead
`(disturbance, errorbar_length1, errorbar_length2, ...)` where error bar
lengths correspond to entries in `num_bootstrap_samples`.
"""
if not isinstance(num_bootstrap_samples, (list, tuple)):
num_bootstrap_samples = (num_bootstrap_samples,)
max_weight = len(ml_disturbances)
rms_disturbance_error = {w: () for w in range(max_weight)}
for w in range(max_weight):
for nsamples in num_bootstrap_samples:
if nsamples == 'all': nsamples = len(bootstrap_disturbances[w])
if nsamples == 0: continue # zero boot strap samples => no error bars
# error_vec = error in weight-(w+1) disturbance for each bootstrap sample
error_vec = bootstrap_disturbances[w][0:nsamples] - ml_disturbances[w]
rms_disturbance_error[w] += (_np.sqrt(_np.mean(error_vec**2)),)
return [(ml_disturbances[w],) + rms_disturbance_error[w] for w in range(max_weight)]
def compute_disturbances(n_bits, data_ref, data_test, num_bootstrap_samples=20,
max_weight=4, solver="SCS", verbosity=1, add_one_to_data=True):
"""
Compute the weight-X disturbances between two data sets (including error bars).
This function is computes the weight-X disturbance, defined as the difference between
the weight-(X-1) and weight-X residual TVDs, (evaluated at the ML probability
distributions implied by the data) for all weights up to `max_weight`. It also
uses the data to compute 1-sigma error bar for each value using the boostrap method.
Parameters
----------
n_bits : int
The number of bits (qubits).
data_ref, data_test : numpy array
Arrays of outcome counts from the reference and test experiments,
respectively. Each array has one element per 2^n_bits bit string.
num_bootstrap_samples : int
The number of boostrap (re-)samples to use. If 0, then error bars are not computed.
max_weight : int, optional
The maximum weight disturbance to compute. Typically this is the same
as `n_bits`.
solver : str, optional
The name of the solver to used (see `cvxpy.installed_solvers()`)
verbosity : int, optional
Sets the level of detail for messages printed to the console (higher = more detail).
add_one_to_data : bool, optional
Sets whether the bootstrap should be calculated after adding a single fake count to every
possible outcome.
Returns
-------
list
A list of the disturbances by weight. The lists i-th element is a
`(disturbance, errorbar_length)` tuple for the weight (i+1) disturbance.
That is, the weight (i+1) disturbance = `disturbance +/- errorbar_length`.
"""
dist_ml, dist = compute_disturbances_bootstrap_rawdata(
n_bits, data_ref, data_test, num_bootstrap_samples,
max_weight, solver, verbosity, add_one_to_data=add_one_to_data)
return compute_disturbances_from_bootstrap_rawdata(dist_ml, dist)
#TODO: move to unit tests
#sig_i = _np.array([[1., 0], [0, 1]], dtype='complex')
#sig_x = _np.array([[0, 1], [1, 0]], dtype='complex')
#sig_y = _np.array([[0, -1], [1, 0]], dtype='complex') * 1.j
#sig_z = _np.array([[1, 0], [0, -1]], dtype='complex')
#sig_m = (sig_x - 1.j * sig_y) / 2.
#sig_p = (sig_x + 1.j * sig_y) / 2.
#
#def test():
# """ Unit tests for this module - a work in progress """
# # This is a test of the above functions ... should all be 0
# assert(_np.count_nonzero(
# interior_tensor_product(multikron([sigX,sigZ,sigI]), 2,4, sigI) -
# multikron([sigX,sigI,sigZ,sigI]))==0)
# assert(_np.count_nonzero(swell(sigX,[1],3) - multikron([sigI,sigX,sigI]))==0)
# assert(_np.count_nonzero(swell(sigX,[0],3) - multikron([sigX,sigI,sigI]))==0)
# assert(_np.count_nonzero(swell(_np.kron(sigX,sigX),[1,3],4) - multikron([sigI,sigX,sigI,sigX]))==0)
#
# # Test the above functions - How many parameters for a weight-k stochastic matrix on 4 bits?
# assert([n_parameters(weight,4) for weight in [1,2,3,4]] == [8, 72, 224, 240])
#
# # TODO - more unittests | {
"content_hash": "f3db56fa0e99663d2436c256723fbdcd",
"timestamp": "",
"source": "github",
"line_count": 1643,
"max_line_length": 119,
"avg_line_length": 43.77175897748022,
"alnum_prop": 0.5998025501619922,
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"id": "996c1f3d4da8cd91e7fe95a69338a65970db3dab",
"size": "71917",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycqed/analysis_v2/disturbancecalc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8748"
},
{
"name": "C++",
"bytes": "8802"
},
{
"name": "Cython",
"bytes": "8291"
},
{
"name": "OpenQASM",
"bytes": "15894"
},
{
"name": "Python",
"bytes": "7978715"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from tornado import template
import tornado.web
from dominate.tags import div, br, h3
from backend.components.base_component import BaseComponent
from backend.misc.sys_types import Rt
class Room(BaseComponent):
"""Defines room display in system"""
table_name = 'rooms'
column_headers_and_types = BaseComponent.column_headers_and_types + [['els', 'text'], ['regs', 'text']]
COL_REGS = 4
COL_ELS = 3
ID = 0
items = {}
groups_per_row = 4
def __init__(self, *args):
super().__init__(args[0], Rt(args[1]), args[2]) # inicjalizuj id type, name
Room.items[self.id] = self
self.elements = args[Room.COL_ELS]
self.regulations = args[Room.COL_REGS]
self.groups = OrderedDict() # Thanks to ordered dict groups always apear in the same order
def add_element(self, *elements):
for element in elements:
self.elements.append(element)
def get_display_data(self,):
"""Returns data organized in bootstrap rows and columns. There are Room.groups_per_row columns per row"""
rows = []
row = []
for group_num, group in enumerate(self.groups.values()):
row.append(group)
if group_num == Room.groups_per_row-1:
rows.append(row)
row = []
if row:
rows.append(row) # dla ostatniego niepelnego rzedu
return rows
def get_html(self, ):
"""Generates room html"""
rows = self.get_display_data()
room_container = div(cls = "well", id='room' + str(self.id))
room_name = h3(self.name, cls="text-center")
room_container.add(room_name)
for row in rows:
r = div(cls='row')
with r:
for group in row:
div(cls="col-sm-3 group").add(group.get_html())
room_container.add(r)
return room_container.render()
def __str__(self, ):
return "".join([super().__str__(), '\tELEMENTS: ', ",".join([str(el.id) for el in self.elements])])
if __name__ == "__main__":
from common.sys_types import rt, et
from common.elements.output_element import Blind
room = Room(rt.corridor, 'Korytarz')
el0 = Blind(et.blind, 'roleta', 86, 86)
el1 = Blind(et.blind, 'roleta', 86, 86)
room.add_element((el0, el1))
print (str(room))
| {
"content_hash": "6bcb3529ccbbe77373a4247782cfcaf4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 113,
"avg_line_length": 33.64383561643836,
"alnum_prop": 0.5753257328990228,
"repo_name": "dzon4xx/system",
"id": "901c5b6d97948c860006d14bc676b6af54c811ae",
"size": "2456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server_client/server/models/room.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2304"
},
{
"name": "HTML",
"bytes": "3018"
},
{
"name": "JavaScript",
"bytes": "5354"
},
{
"name": "Python",
"bytes": "116523"
}
],
"symlink_target": ""
} |
from ducktape.mark import matrix, ignore
from ducktape.mark.resource import cluster
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.services.security.kafka_acls import ACLs
from kafkatest.utils import is_int
class ZooKeeperSecurityUpgradeTest(ProduceConsumeValidateTest):
"""Tests a rolling upgrade for zookeeper.
"""
def __init__(self, test_context):
super(ZooKeeperSecurityUpgradeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.group = "group"
self.producer_throughput = 100
self.num_producers = 1
self.num_consumers = 1
self.acls = ACLs(self.test_context)
self.zk = ZookeeperService(self.test_context, num_nodes=3)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
def create_producer_and_consumer(self):
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic,
throughput=self.producer_throughput)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000, message_validator=is_int)
self.consumer.group_id = self.group
@property
def no_sasl(self):
return self.kafka.security_protocol == "PLAINTEXT" or self.kafka.security_protocol == "SSL"
@property
def is_secure(self):
return self.kafka.security_protocol == "SASL_PLAINTEXT" \
or self.kafka.security_protocol == "SSL" \
or self.kafka.security_protocol == "SASL_SSL"
def run_zk_migration(self):
# change zk config (auth provider + jaas login)
self.zk.zk_sasl = True
if self.no_sasl:
self.kafka.start_minikdc_if_necessary(self.zk.zk_principals)
# restart zk
self.zk.restart_cluster()
# restart broker with jaas login
self.kafka.restart_cluster()
# run migration tool
for node in self.zk.nodes:
self.zk.zookeeper_migration(node, "secure")
# restart broker with zookeeper.set.acl=true and acls
self.kafka.zk_set_acl = True
self.kafka.restart_cluster()
@cluster(num_nodes=9)
@matrix(security_protocol=["PLAINTEXT", "SSL", "SASL_SSL", "SASL_PLAINTEXT"])
def test_zk_security_upgrade(self, security_protocol):
self.zk.start()
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
# set acls
if self.is_secure:
self.kafka.authorizer_class_name = KafkaService.ACL_AUTHORIZER
self.acls.set_acls(security_protocol, self.kafka, self.topic, self.group)
if self.no_sasl:
self.kafka.start()
else:
self.kafka.start(self.zk.zk_principals)
#Create Producer and Consumer
self.create_producer_and_consumer()
#Run upgrade
self.run_produce_consume_validate(self.run_zk_migration)
| {
"content_hash": "fc50de4939e1ed0eed1a2aeaffa7e366",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 99,
"avg_line_length": 36.757894736842104,
"alnum_prop": 0.6626575028636884,
"repo_name": "sslavic/kafka",
"id": "f1a5dd23525de15de5d0db327c40a1ea3469665b",
"size": "4273",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/core/zookeeper_security_upgrade_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26633"
},
{
"name": "Dockerfile",
"bytes": "5117"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "14966996"
},
{
"name": "Python",
"bytes": "802091"
},
{
"name": "Scala",
"bytes": "5802403"
},
{
"name": "Shell",
"bytes": "94955"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
"""System parameters wrappers"""
from __future__ import absolute_import
import unittest
import mock
from ros3ddevcontroller.param.parameter import Parameter, ReadOnlyParameter, ParameterStatus, Infinity
class ParameterTestCase(unittest.TestCase):
def test_new_no_status(self):
par = Parameter('foo', 'bar', str)
self.assertEqual(par.name, 'foo')
self.assertEqual(par.value, 'bar')
self.assertIsInstance(par.value, str)
self.assertEqual(par.value_type, str)
self.assertTrue(hasattr(par, 'status'))
status = par.status
self.assertIsInstance(status, ParameterStatus)
self.assertEqual(status.read, True)
self.assertEqual(status.write, True)
self.assertEqual(status.status, ParameterStatus.SOFTWARE)
def test_new_status(self):
st = ParameterStatus()
par = Parameter('foo', 1, int, status=st, min_val=0, max_val=10)
self.assertEqual(par.name, 'foo')
self.assertEqual(par.value, 1)
self.assertIsInstance(par.value, int)
self.assertEqual(par.value_type, int)
self.assertEqual(par.min_value, 0)
self.assertEqual(par.max_value, 10)
self.assertIs(par.status, st)
def test_new_bad_status(self):
self.assertRaises(AssertionError, Parameter,
'foo', 'bar', str, status=1)
def test_read_only(self):
par = Parameter('foo', 'bar', str)
self.assertFalse(par.is_read_only())
par = ReadOnlyParameter('foo', 'bar', str)
self.assertTrue(par.is_read_only())
def test_set_status(self):
status = ParameterStatus(status_type=ParameterStatus.SOFTWARE)
status.set_status(ParameterStatus.HARDWARE)
self.assertEqual(status.status, ParameterStatus.HARDWARE)
class FloatInfinityTestCase(unittest.TestCase):
def test_convert_to(self):
self.assertEqual(Infinity.convert_to(1e100), Infinity.PLUS)
self.assertEqual(Infinity.convert_to(-1e100), Infinity.MINUS)
self.assertEqual(Infinity.convert_to(10e20), 10e20)
self.assertEqual(Infinity.convert_to(-10e20), -10e20)
self.assertEqual(Infinity.convert_to(float('inf')), Infinity.PLUS)
self.assertEqual(Infinity.convert_to(float('-inf')), Infinity.MINUS)
self.assertEqual(Infinity.convert_to(0.3333), 0.3333)
self.assertEqual(Infinity.convert_to(128), 128)
def test_convert_from(self):
self.assertEqual(Infinity.convert_from(Infinity.PLUS), float('inf'))
self.assertEqual(Infinity.convert_from(Infinity.MINUS), float('-inf'))
self.assertEqual(Infinity.convert_from(10e20), 10e20)
self.assertEqual(Infinity.convert_from(-10e20), -10e20)
self.assertEqual(Infinity.convert_from(float('inf')), float('inf'))
self.assertEqual(Infinity.convert_from(float('-inf')), float('-inf'))
self.assertEqual(Infinity.convert_from(0.3333), 0.3333)
self.assertEqual(Infinity.convert_from(128), 128)
| {
"content_hash": "74d8f0cb2dcda51e38b57c8b6b67051f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 102,
"avg_line_length": 38.65384615384615,
"alnum_prop": 0.6703150912106136,
"repo_name": "open-rnd/ros3d-dev-controller",
"id": "4292ab73b40ae0587b26a690ce95492d7b4da828",
"size": "4119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests_parameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178036"
}
],
"symlink_target": ""
} |
import logging
import json
import core
import uuid
import time
import base64
try:
import queue as Queue
except ImportError:
import Queue
import datetime
import string
valid_chars = set(string.ascii_letters+string.digits+'{}|~^<>!#$%()+,-.@_[] ')
log = logging.getLogger()
log.debug("Valid SQL characters are {0}".format(valid_chars))
session_nums = 0
command_nums = {}
event_types = {
"notification": "NOT",
"url": "URL",
"function": "FUN"
}
def set_response(session_id, command_id, event, response_function):
"""
Set a response listener in the session object
:param session_id:
:param command_id:
:param event:
:param response_function:
"""
session_container = core.sessions[session_id]
commands_container = session_container["commands"]
for command in commands_container:
if command["id"] == command_id:
command_data = command
command_data.update({
"event": event,
"function": response_function
})
def gen_session(username, client_type, db):
"""
:param username:
:param client:
:return: session_id
"""
session_id = get_session_id(db)
# Start monitoring notifications
# Register a session id
core.sessions.update({
session_id: {
"username": username,
"commands": [],
"created": datetime.datetime.now(),
"updates": Queue.Queue(),
"id": session_id,
"client": client_type
}
})
return session_id
def gen_command_uid():
"""
Generate a 16 character url safe base64 string
:return urlsafe base64 string:
"""
return base64.urlsafe_b64encode(uuid.uuid1().bytes).decode("utf-8").rstrip('=\n').replace('/', '_')
def create_command_obj(session_id, command):
'''
Generate a properly formatted command object
:param session_id:
:param command:
:return command object:
'''
command_uid = "{0}_{1}".format(session_id,gen_command_uid())
log.debug(":{0}:Generating a new command object with command id {1}".format(session_id, command_uid))
command_object = {
"command": command,
"id": command_uid
}
#Add the command to the session
core.sessions[session_id]["commands"].append(command_object)
return command_object
def get_event_uid(type):
'''
Get an event uid using the event type
:param type:
:return: Event uid string
'''
e_type = event_types[type]
return "{0}:{1}".format(e_type, str(uuid.uuid1()))
def dump_events(events, db):
"""
Dump events
:param events:
:param db:
"""
#Delete all events from db that should be finished
events_table = db['events']
events_table.delete(time < time.time())
for event in events:
#Remove one time events that had functions in them
if event["type"] != "function":
events_table.upsert(event, ['uid'])
def load_key(key_type, db, load_url=False):
"""
Load a key from the database and implement the cycler
:param key_type:
:param db:
:param load_url:
:return api key:
"""
working_keys = db.query('SELECT * FROM `keys` WHERE type="{0}" and uses <= max_uses'.format(key_type))
correct_key = sorted(working_keys, key=lambda x: x["num"])[0]
key_uses = correct_key["uses"]
key_value = correct_key["value"]
updated_uses = key_uses+1
#Assume that keys reset monthly
db['keys'].update(dict(type=key_type, num=correct_key['num'], uses=updated_uses), ['type', 'num'])
if load_url:
return (key_value, correct_key["url"])
return key_value
def initialize_session_tracking(db):
"""
Deprecated and out of use
:param db:
"""
vars = db["vars"]
session_increment = vars.find_one(name="session_incremnet")
log.debug("Found session increment {0} from server".format(session_increment))
global session_nums
session_nums = session_increment
def get_session_id(db):
"""
Incrementer for session ids
:param db:
"""
global session_nums
session_nums+=1
session_id = uuid.uuid1()
session_str = str(session_id)
log.debug("Generated session_id {0}".format(session_str))
log.debug("Updating session increment in db")
data = dict(name="session_id", value=session_nums)
db['vars'].update(data, ['name'])
return session_str
def get_command_id(session_id):
"""
Incrementing command ids based on the session id
:param session_id:
:return command_id:
"""
global command_nums
command_nums+=1
command_id = "{0}_{1}".format(
session_id, gen_command_uid()
)
log.debug("Generated command id {0}".format(command_id))
return command_id
def get_user_token(username):
"""
Get a customized user token to store encrypted in the cookies
:param username:
:return user_token:
"""
user_uid = uuid.uuid3(uuid.NAMESPACE_DNS, str(username))
gen_uid = uuid.uuid1()
return str(gen_uid)+":u:"+str(user_uid)
def return_json(response):
"""
Render a response object as json, assert that it has all the correct keys, and return it
:param response:
:return json string:
"""
#Make sure the needed keys are in the response data
try:
assert type(response) == dict
assert "type" in response.keys()
assert "data" in response.keys()
assert "text" in response.keys()
log.debug("Returning response {0}".format(response))
return json.dumps(response)
except AssertionError as e:
log.error("AssertionError {0}, {1} when trying to render response {2}".format(
e.message, e.args, response
))
return {
"type": "error",
"data": None,
"text": "Server returned malformed response {0}".format(response)
}
def fold(string, line_length=120, indent=0, indent_first_line=False, _runs=0):
"""Fold a string into multiple Lines.
Fold function by Max Ertl (https://github.com/Sirs0ri)
:param string: The string you want to fold.
:param line_length: The desired max line length (int)
:param indent: if you want lines to be indented, you can specify the number of
spaces here
:param indent_first_line: if this is True, the first line won't be indented.
:return formatted string:
"""
if indent > line_length:
log.debug("The indentation is higher than the desired line-length and will "
"therefore be ignored.")
# Set up the actual line length
if indent_first_line is False and _runs == 0:
length = line_length
else:
length = line_length - indent
# The actual folding:
if len(string) < length:
# no need to fold
return (string)
else:
s = ""
i = 0
# Find the last space that would be in the last 12 chars of the new line
# The text will be folded here, 12 proved to be a good value in my tests
for c in string[length:length - 12:-1]:
if c == " ":
# Space found, fold here and remove the space
s += string[0:length - i]
string = string[length + 1 - i:]
# Fold the rest of the string recursively
return "{}\n{}{}".format(s, " " * indent,
fold(string, line_length, indent,
indent_first_line, _runs + 1))
else:
# Character is not a space, move to the previous one
i += 1
# No space found in the last 12 chars of the new line. Use full length
s += string[0:length]
string = string[length:]
return "{}\n{}{}".format(s, " " * indent,
fold(string, line_length, indent,
indent_first_line, _runs + 1))
def check_string(in_str):
"""
Sanatize data
:param in_str: List or string of strings to be validated
:return boolean:
"""
if type(in_str) == list:
return all([check_string(x) for x in in_str])
else:
filters = (
in_str.strip() and
all([x in valid_chars for x in in_str])
)
return filters | {
"content_hash": "2f6f38f2dedcd2608ba7b02ac324b0f3",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 106,
"avg_line_length": 29.574468085106382,
"alnum_prop": 0.5936450839328538,
"repo_name": "ironman5366/W.I.L.L",
"id": "b0e7fe9d62734f95c1b249ebb00366e423ee399f",
"size": "8357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "46315"
},
{
"name": "Python",
"bytes": "118534"
}
],
"symlink_target": ""
} |
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
If you want to have a more secure implementation, use the functions from the
``rsa.pkcs1`` module.
"""
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2014-02-22"
__version__ = '3.1.4'
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
| {
"content_hash": "0c372d2844062afd988bed1fb8505239",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 30.322580645161292,
"alnum_prop": 0.723404255319149,
"repo_name": "itielshwartz/BackendApi",
"id": "b76399f744cbac397fa589ae8699ecd3cc1a1ad4",
"size": "1575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rsa/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5759"
},
{
"name": "JavaScript",
"bytes": "5960"
},
{
"name": "Python",
"bytes": "1061709"
}
],
"symlink_target": ""
} |
import sqlite3
from vtfunc import TableFunction
class GenerateSeries(TableFunction):
params = ['start', 'stop', 'step']
columns = ['output']
name = 'series'
def initialize(self, start=0, stop=None, step=1):
self.start = start
self.stop = stop or float('inf')
self.step = step
self.curr = self.start
def iterate(self, idx):
if self.curr > self.stop:
raise StopIteration
ret = self.curr
self.curr += self.step
return (ret,)
conn = sqlite3.connect(':memory:')
GenerateSeries.register(conn)
cursor = conn.execute('SELECT * FROM series(0, 10, 2)')
print(cursor.fetchall())
cursor = conn.execute('SELECT * FROM series(5, NULL, 20) LIMIT 10')
print(cursor.fetchall())
| {
"content_hash": "78f648b4e107eab7a643087b111e13ce",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 23.242424242424242,
"alnum_prop": 0.6232073011734028,
"repo_name": "coleifer/sqlite-vtfunc",
"id": "47ed15a01fe25f6f2b1a33eaeb71626ab142cf58",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/generate_series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9175"
},
{
"name": "Cython",
"bytes": "20702"
},
{
"name": "Python",
"bytes": "9537"
}
],
"symlink_target": ""
} |
"""
Python-Rightscale
A stupid wrapper around rightscale's HTTP API
"""
import time
import types
from .actions import RS_DEFAULT_ACTIONS, COLLECTIONS
from .httpclient import HTTPClient
from .util import get_rc_creds, HookList
# magic strings from the 1.5 api
DEFAULT_API_PREPATH = '/api'
# authenticate here
OAUTH2_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'oauth2'))
# start hypermedia searches here
ROOT_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'sessions'))
# these *should* be discoverable from the '/api/sessions' route above, but they
# are not. there is an open ticket filed with rightscale. until it gets
# addressed, it's just more magic:
ACCOUNT_INFO_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'sessions/accounts'))
HEALTH_CHECK_RES_PATH = '/'.join((DEFAULT_API_PREPATH, 'health-check'))
COLLECTION_TYPE = 'type=collection'
def get_resource_method(name, template):
"""
Creates a function that is suitable as a method for ResourceCollection.
"""
def rsr_meth(self, **kwargs):
http_method = template['http_method']
extra_path = template.get('extra_path')
if extra_path:
fills = {'res_id': kwargs.pop('res_id', '')}
path = self.path + (extra_path % fills)
else:
path = self.path
response = self.client.request(http_method, path, **kwargs)
loc = response.headers.get('location', None)
if loc:
# If the returned code is a 201, then there should be a location
# header in the response that we can use to re-get the newly created
# resource.
loc = response.headers.get('location')
response = self.client.request('get', loc, **kwargs)
# At this point, we better have a valid JSON response object
try:
obj = response.json()
except:
# The response had no JSON ... not a resource object
return
if COLLECTION_TYPE in response.content_type:
ret = HookList(
[Resource(r, path, response, self.client) for r in obj],
response=response
)
else:
ret = Resource(obj, path, response, self.client)
return ret
rsr_meth.__name__ = name
return rsr_meth
class Resource(object):
"""
A single resource.
:param dict soul: The essence of the resource as returned by the RightScale
API. This is the dictionary of attributes originally returned as the
JSON body of the HTTP response from RightScale.
:param str path: The path portion of the URL. E.g. ``/api/clouds/1``.
:param rightscale.httpclient.HTTPResponse response: The raw response object
returned by :meth:`HTTPClient.request`.
"""
def __init__(self, soul=None, path='', response=None, client=None):
if soul is None:
soul = {}
self.soul = soul
self.path = path
self.collection_actions = {}
self.response = response
self.client = client
self._links = None
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.soul)
def __str__(self):
return str(self.soul)
def __cmp__(self, other):
return cmp(self.soul, other.soul)
@property
def content_type(self):
if self.response:
return self.response.content_type[0]
return ''
def _get_rel_hrefs(self):
rel_hrefs = self.soul.get('links', [])
return dict((raw['rel'], raw['href']) for raw in rel_hrefs)
@property
def href(self):
return self._get_rel_hrefs().get('self', '')
@property
def links(self):
# only initialize once, not if empty
if self._links is None:
_links = self._get_rel_hrefs()
collection_actions = COLLECTIONS.get(self.content_type, {})
self.collection_actions = collection_actions
for name, action in collection_actions.iteritems():
if action is None and name in _links:
del _links[name]
continue
if name not in _links:
_links[unicode(name)] = unicode(
'%s/%s' % (self.path, name)
)
self._links = _links
return self._links
def __dir__(self):
return self.links.keys()
def __getattr__(self, name):
path = self.links.get(name)
if not path:
raise AttributeError('%s object has no attribute %s' % (
self.__class__.__name__,
name,
))
actions = RS_DEFAULT_ACTIONS.copy()
tpl = self.collection_actions.get(name)
if tpl:
actions.update(tpl)
return ResourceCollection(path, self.client, actions)
class ResourceCollection(object):
def __init__(self, path, client, actions):
self.path = path
self.client = client
for name, template in actions.items():
if not template:
continue
method = get_resource_method(name, template)
setattr(self, name, types.MethodType(method, self, self.__class__))
class RightScale(Resource):
def __init__(
self,
path=DEFAULT_API_PREPATH,
refresh_token=None,
api_endpoint=None,
):
"""
Creates and configures the API object.
:param str refresh_token: The refresh token provided by Rightscale when
API access is enabled.
:param api_endpoint: The rightscale subdomain to be hit with API
requests.
:param str path: The path portion of the URL.
E.g. ``/api``.
"""
super(RightScale, self).__init__({}, path)
self.auth_token = None
self.auth_expires_at = 0
rc_creds = get_rc_creds()
# prevent dumb leakage from the environment by only grabbing creds from
# rc file if they are not specified to the constructor.
if api_endpoint is None:
api_endpoint = rc_creds[0]
if not api_endpoint:
raise ValueError("Can't login with no api endpoint.")
self.api_endpoint = api_endpoint
if refresh_token is None:
refresh_token = rc_creds[1]
if not refresh_token:
raise ValueError("Can't login. Need refresh token!")
self.refresh_token = refresh_token
self._client = HTTPClient(
api_endpoint,
{'X-API-Version': '1.5'},
)
@property
def client(self):
# Validate that the auth_expires_at time hasn't been reached. If it is,
# the token is invalid and needs to be re-generated.
if time.time() > self.auth_expires_at:
self.auth_token = None
# lazy login so you can create instances without triggering a net hit
if not self.auth_token:
self.login()
return self._client
@client.setter
def client(self, value):
self._client = value
def login(self):
"""
Gets and stores an OAUTH token from Rightscale.
"""
login_data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
}
client = self._client
response = client.post(OAUTH2_RES_PATH, data=login_data)
raw_token = response.json()
self.auth_token = "Bearer %s" % raw_token['access_token']
client.s.headers['Authorization'] = self.auth_token
# Generate an expiration time for our token of 60-seconds before the
# standard time returned by RightScale. This will be used in the
# self.client property to validate that our token is still usable on
# every API call.
self.auth_expires_at = time.time() + int(raw_token['expires_in']) - 60
def health_check(self):
# only in 1.5 api docs, not discoverable via href
return self.client.get(HEALTH_CHECK_RES_PATH).json()
@property
def links(self):
if not self.soul:
try:
response = self.client.get(ROOT_RES_PATH)
self.response = response
self.soul = response.json()
except:
self.soul = {}
return super(RightScale, self).links
| {
"content_hash": "7609d0a20025c89e53fd3ac88fe2f8e4",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 32.16030534351145,
"alnum_prop": 0.5747685734630904,
"repo_name": "diranged/python-rightscale-1",
"id": "b274ac5d791aa9b1b8904b872759ece6a188b4d0",
"size": "8426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rightscale/rightscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44565"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('origin_app', models.CharField(help_text=b'Record on which app the user signed up', max_length=255, choices=[(b'Pollination', b'Pollination'), (b'Beekeepers', b'Beekeepers')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
)
]
| {
"content_hash": "342c68f50fd6520f6a269d9f0ab2055d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 194,
"avg_line_length": 35.34782608695652,
"alnum_prop": 0.6408364083640836,
"repo_name": "project-icp/bee-pollinator-app",
"id": "3ad6c479b7dbd34e4029cc9957cfc306068e7d6c",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/icp/apps/user/migrations/0001_create_user_profile_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7443"
},
{
"name": "HTML",
"bytes": "70570"
},
{
"name": "JavaScript",
"bytes": "1120839"
},
{
"name": "Python",
"bytes": "367148"
},
{
"name": "SCSS",
"bytes": "165023"
},
{
"name": "Shell",
"bytes": "24001"
}
],
"symlink_target": ""
} |
import os
if os.name == 'nt':
# `isabs('/path/to/thing')` returns true on windows, which I think is wrong, so use `splitdrive` instead
def _fixed_nt_isabs(path):
return os.path.splitdrive(path)[0] != ''
# maintainers apparently don't care about https://bugs.python.org/issue9949, so here is @ncdave4life's patched version
def _fixed_nt_realpath(path):
"""Return the absolute version of a path with symlinks resolved."""
from nt import _getfinalpathname # pylint: disable=import-error
from ntpath import normpath
if path: # Empty path must return current working directory.
try:
path = _getfinalpathname(path)
if str(path[:4]) == '\\\\?\\':
path = path[4:] # remove the \\?\
except WindowsError: # pylint: disable=undefined-variable
pass # Bad path - return unchanged.
elif isinstance(path, bytes):
path = os.getcwdb()
else:
path = os.getcwd()
return normpath(path)
# install overrides
os.path.isabs = _fixed_nt_isabs
os.path.realpath = _fixed_nt_realpath
| {
"content_hash": "6a9257bb802a9997689472b5859dae81",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 122,
"avg_line_length": 37.74193548387097,
"alnum_prop": 0.5974358974358974,
"repo_name": "scottbilas/dotfiles",
"id": "f4e74a928189847fe22e3dd2b5b4a99f1847f735",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "updot/updot/_pyhacks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AutoHotkey",
"bytes": "865"
},
{
"name": "Batchfile",
"bytes": "430"
},
{
"name": "JavaScript",
"bytes": "7086"
},
{
"name": "Lua",
"bytes": "53818"
},
{
"name": "Perl",
"bytes": "95482"
},
{
"name": "PowerShell",
"bytes": "37956"
},
{
"name": "Python",
"bytes": "172383"
},
{
"name": "Shell",
"bytes": "80975"
},
{
"name": "Vim script",
"bytes": "9611"
}
],
"symlink_target": ""
} |
__revision__ = "test/File.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that:
-- the File() global function and environment method work correctly;
-- the former does not try to expand construction variables;
-- calling File() as a method of a File() object works correctly.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print File('ddd')
print File('$FOO')
print File('${BAR}_$BAR')
print env.File('eee')
print env.File('$FOO')
print env.File('${BAR}_$BAR')
f1 = env.File('f1')
print f1
f2 = f1.File('f2')
print f2
""")
expect = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
f1
f2
""", build_str = """\
scons: `.' is up to date.
""")
test.run(stdout = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "00ecaef5f8d16706404d10c2c0bf3dfd",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 18.72,
"alnum_prop": 0.6527777777777778,
"repo_name": "EmanueleCannizzaro/scons",
"id": "ff0829048353934af2b03b3d274c2a3785a0792d",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/File.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import jwt
import re
from tornado.gen import coroutine
from tornado.httpclient import AsyncHTTPClient
from os import path, getcwd
from api.model.models import User, UserActivation
from api.Crypto import hash_password
import functools
def authenticated(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
response = {'Error': "Token is invalid."}
self.set_status(401, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
return method(self, *args, **kwargs)
return wrapper
# Decode a JWT token and return the results.
def validate_token(jwt_token, secret, algorithm):
try:
if jwt_token is None:
return None
payload = jwt.decode(jwt_token, secret, algorithms=[algorithm])
return payload
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return None
@coroutine
def fetch_coroutine(url):
http_client = AsyncHTTPClient()
response = yield http_client.fetch(url)
return response.body
# TODO is the extension always jpg?
@coroutine
def download_avatar(url, username):
data = yield fetch_coroutine(url)
current_dir = getcwd()
output_file_name = path.join(current_dir, "static/avatars/") + username + ".jpg"
save_file(output_file_name, data)
return username + ".jpg"
def save_file(path, data):
with open(path, "bw") as f:
f.write(data)
def uglify_username(username):
# Remove all non-word characters (everything except numbers and letters)
username = re.sub(r"[^\w\s]", '', username)
# Replace all runs of whitespace with a single dash
username = re.sub(r"\s+", '-', username)
return username
def get_oauth_settings(settings):
settings = {
"facebook": {
"key": settings["facebook_api_key"],
"secret": settings["facebook_api_secret"]
},
"google": {
"key": settings["google_oauth_key"],
"secret": settings["google_oauth_secret"]
}
}
return settings
def do_save_user(user_to_save, session):
# TODO: document this.
user = User()
user.username = user_to_save["username"]
user.password = hash_password(user_to_save["password"])
user.fullname = user_to_save["name"]
user.email = user_to_save['email']
user.valid = False # A user is not valid until his/her email has ben verified.
user.avatar = "_default_avatar.png"
session.add(user)
session.commit()
return user
def save_activation_info(activation_code, user, session):
# Save activation info.
user_activation = UserActivation()
user_activation.code = activation_code
user_activation.user_id = user.id
session.add(user_activation)
session.commit()
| {
"content_hash": "ebf13d1ae03e332362b92b65e7ac7dcd",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 84,
"avg_line_length": 25.236842105263158,
"alnum_prop": 0.6426833507125478,
"repo_name": "fdemian/Morpheus",
"id": "24a9f72cc13f6725d309f68f5832e2e1a996387c",
"size": "2877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/Utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4869"
},
{
"name": "HTML",
"bytes": "4518"
},
{
"name": "JavaScript",
"bytes": "139555"
},
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "109884"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("ArgMax")
def _ArgMaxGrad(op, grad):
del op, grad
return [None, None]
@ops.RegisterGradient("ArgMin")
def _ArgMinGrad(op, grad):
del op, grad
return [None, None]
# TODO(rmlarsen): Implement gradient.
ops.NotDifferentiable("EuclideanNorm")
_empty_tuple = ()
def _IsScalar(x):
return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
if context.executing_eagerly():
ctx = context.context()
new_shape = ctx.ones_rank_cache().get(rank)
if new_shape is None:
new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)
ctx.ones_rank_cache().put(rank, new_shape)
else:
new_shape = [1] * rank
grad = array_ops.reshape(grad, new_shape)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with ops.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.divide(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access
if (input_shape is not None and output_shape is not None and
None not in input_shape and None not in output_shape):
input_size = np.prod(input_shape)
output_size = np.prod(output_shape)
factor = input_size // max(output_size, 1)
factor = constant_op.constant(factor, dtype=sum_grad.dtype)
else:
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
rank = array_ops.rank(op.inputs[0])
reduction_indices = (reduction_indices + rank) % rank
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, rank)
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
# For complex inputs, the gradient is in the conjugate direction.
y = array_ops.reshape(
math_ops.conj(left) * math_ops.conj(right), permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentSumWithNumSegments")
def _SparseSegmentSumWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSumWithNumSegments."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentMeanWithNumSegments")
def _SparseSegmentMeanWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentMeanWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments")
def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):
"""Gradient for SparseSegmentSqrtNWithNumSegments."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None, None)
def _SegmentMinOrMaxGrad(op, grad):
""" Gradient for SegmentMin and SegmentMax. """
zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
def _GatherDropNegatives(params,
ids,
zero_clipped_indices=None,
is_positive=None):
""" Helper function for unsorted segment ops.
Gathers params for
positive segment ids and gathers 0 for inputs with negative segment id.
Also returns the clipped indices and a boolean mask with the same shape
as ids where a positive id is masked as true. With this, the latter two
can be passed as arguments to this function to reuse them.
"""
if zero_clipped_indices is None:
zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
gathered = array_ops.gather(params, zero_clipped_indices)
if is_positive is None:
is_positive = math_ops.greater_equal(ids, 0)
# tf.where(condition, x, y) requires condition to have the same shape as x
# and y.
# todo(philjd): remove this if tf.where supports broadcasting (#9284)
for _ in range(gathered.shape.ndims - is_positive.shape.ndims):
is_positive = array_ops.expand_dims(is_positive, -1)
is_positive = (
is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))
# replace gathered params of negative indices with 0
zero_slice = array_ops.zeros_like(gathered)
return (array_ops.where(is_positive, gathered, zero_slice),
zero_clipped_indices, is_positive)
def _UnsortedSegmentMinOrMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs, zero_clipped_indices, is_positive = \
_GatherDropNegatives(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
is_selected = math_ops.logical_and(is_selected, is_positive)
num_selected = math_ops.unsorted_segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.divide(grad, num_selected)
gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
zero_clipped_indices, is_positive)
zeros = array_ops.zeros_like(gathered_grads)
return array_ops.where(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for UnsortedSegmentSum."""
return _GatherDropNegatives(grad, op.inputs[1])[0], None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
""" Gradient for UnsortedSegmentMax. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentMin")
def _UnsortedSegmentMinGrad(op, grad):
""" Gradient for UnsortedSegmentMin. """
return _UnsortedSegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentProd")
def _UnsortedSegmentProdGrad(op, grad):
""" Gradient for UnsortedSegmentProd.
The gradient can be expressed for each segment by dividing the segment's
product by each element of the segment input tensor, but this approach can't
deal with zeros in the input.
Unlike reduce_prod we can't use cumsum here as individual segments may have
a different number of elements. Therefore we consider three cases:
1) A segment input contains no zeros and we can safely divide by the input
tensor.
2) A segment contains exactly one zero. Then the gradient of each input of
the segment is zero except for the 0-input, there the gradient is
the product of the remaining segment entries.
3) A segment contains at least two zeros. The gradient is zero for all
segment inputs.
"""
# Note that unsorted_segment_sum will filter out the negative indices,
# so we don't need to do a logical_and with is_positive here
is_zero = math_ops.equal(op.inputs[0], 0)
num_zeros = gen_math_ops.unsorted_segment_sum(
math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])
# handle case 3 and set the gradient to 0 for segments with more than one
# 0 as input
grad = array_ops.where(
math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)
# replace all zeros with ones and compute the unsorted_segment_prod
non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]),
op.inputs[0])
non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,
op.inputs[1], op.inputs[2])
# clip the indices for gather to be positive
zero_clipped_indices = math_ops.maximum(op.inputs[1],
array_ops.zeros_like(op.inputs[1]))
gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)
gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)
prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.
# Now fetch the individual results for segments containing 0 and those that
# don't. is_zero will also fetch results for entries with negative index
# but the following gather_drop_negatives sets the corresponding entry in
# grad to 0 for these
partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod,
prod_divided_by_el)
gathered_grad = _GatherDropNegatives(grad, op.inputs[1],
zero_clipped_indices)[0]
return gathered_grad * partial_derivative, None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return gen_math_ops.reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = constant_op.constant(2.0, dtype=x.dtype)
return math_ops.multiply(grad, math_ops.multiply(x, y))
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops.sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
ga = gen_math_ops.xdivy(grad, a)
return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga
else:
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops.rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
grad_b = gen_math_ops.rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
y = math_ops.exp(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(y, grad)
else:
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, x)
else:
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return gen_math_ops.xdivy(grad, 1 + x)
else:
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Xlogy")
def _XLogyGrad(op, grad):
"""Returns gradient of xlogy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xlogy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(x, y)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Xdivy")
def _XDivyGrad(op, grad):
"""Returns gradient of xdivy(x, y) with respect to x and y."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
not_zero_x = math_ops.cast(
math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
partial_x = gen_math_ops.xdivy(not_zero_x, y)
partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.tanh_grad(y, grad)
@ops.RegisterGradient("Asinh")
def _AsinhGrad(op, grad):
"""Returns grad * 1/cosh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return grad / math_ops.cosh(y)
@ops.RegisterGradient("Acosh")
def _AcoshGrad(op, grad):
"""Returns grad * 1/sinh(y)."""
y = op.outputs[0]
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, math_ops.sinh(y))
else:
return grad / math_ops.sinh(y)
@ops.RegisterGradient("Atanh")
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(math_ops.digamma(x), grad)
else:
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI0e")
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(partial_x, grad)
else:
return grad * partial_x
@ops.RegisterGradient("BesselI1e")
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(dy_dx, grad)
else:
return grad * dy_dx
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
with ops.control_dependencies([grad]):
partial_a = gen_math_ops.igamma_grad_a(a, x)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -
math_ops.lgamma(a))
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)
return (-igamma_grad_a, -igamma_grad_x)
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
_, rx = gen_array_ops.broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (
gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
(a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (
None, # da
None, # db
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))
else:
return (
None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),
sq))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
# TODO(b/36815900): Mark None return values as NotImplemented
if compat.forward_compatible(2019, 9, 14):
return (None,
array_ops.reshape(
math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),
sx))
else:
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad]):
y = math_ops.conj(y)
return gen_math_ops.sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
if compat.forward_compatible(2019, 9, 14):
return math_ops.mul_no_nan(secx2, grad)
else:
return secx2 * grad
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
if compat.forward_compatible(2019, 9, 14):
return -math_ops.xdivy(grad, den)
else:
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad]):
if compat.forward_compatible(2019, 9, 14):
grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))
else:
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
def _ShapesFullySpecifiedAndEqual(x, y, grad):
# pylint: disable=protected-access
x_shape = x._shape_tuple()
y_shape = y._shape_tuple()
grad_shape = grad._shape_tuple()
# pylint: enable=protected-access
return (x_shape == y_shape and x_shape == grad_shape and
x_shape is not None and None not in x_shape)
@ops.RegisterGradient("Add")
@ops.RegisterGradient("AddV2")
def _AddGrad(op, grad):
"""Gradient for Add."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
return grad, None
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
"""Gradient for Sub."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return grad, -grad
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad) and
grad.dtype in (dtypes.int32, dtypes.float32)):
return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy))
@ops.RegisterGradient("MulNoNan")
def _MulNoNanGrad(op, grad):
"""The gradient of scalar multiplication with NaN-suppression."""
x = op.inputs[0]
y = op.inputs[1]
if (isinstance(grad, ops.Tensor) and
_ShapesFullySpecifiedAndEqual(x, y, grad)):
return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.divide(math_ops.divide(-x, y), y), grad), ry),
sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("FloorMod")
def _FloorModGrad(op, grad):
"""Returns grad * (1, -floor(x/y))."""
x = math_ops.conj(op.inputs[0])
y = math_ops.conj(op.inputs[1])
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
floor_xy = math_ops.floor_div(x, y)
gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
gy = array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
return gx, gy
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),
ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),
sy))
@ops.RegisterGradient("DivNoNan")
def _DivNoNanGrad(op, grad):
"""DivNoNan op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
if compat.forward_compatible(2019, 9, 14):
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
math_ops.mul_no_nan(
math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
grad), ry), sy))
else:
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(
grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
if compat.forward_compatible(2019, 9, 14):
gx = array_ops.reshape(
math_ops.reduce_sum(
gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), rx), sx)
else:
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
mask = math_ops.not_equal(x, 0)
else:
# There's no sensible real value to return if x < 0, so return 0
mask = x > 0
safe_x = array_ops.where(mask, x, array_ops.ones_like(x))
log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))
if compat.forward_compatible(2019, 9, 14):
gy = array_ops.reshape(
math_ops.reduce_sum(gen_math_ops.mul_no_nan(z * log_x, grad), ry), sy)
else:
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGradInputOnly(op, grad, selector_op):
x = op.inputs[0]
y = op.inputs[1]
zeros = array_ops.zeros_like(grad)
xmask = selector_op(x, y)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = None # Return None for ygrad since the config allows that.
return (xgrad, ygrad)
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
y = op.inputs[1]
skip_input_indices = None
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(
y):
# When we want to get gradients for the first input only, and the second
# input tensor is a scalar, we can do a much simpler calculation
return _MaximumMinimumGradInputOnly(op, grad, selector_op)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
x = op.inputs[0]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
if skip_input_indices is not None and 0 in skip_input_indices:
gx = None
else:
xgrad = array_ops.where(xmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
if skip_input_indices is not None and 1 in skip_input_indices:
gy = None
else:
ygrad = array_ops.where(xmask, zeros, grad)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
with ops.control_dependencies([grad]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros), array_ops.where(
c, zeros, grad))
@ops.RegisterGradient("SelectV2")
def _SelectGradV2(op, grad):
c = op.inputs[0]
x = op.inputs[1]
y = op.inputs[2]
zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)
gx = array_ops.where_v2(c, grad, zeros)
x_shape = array_ops.shape(x)
output_shape = array_ops.shape(op.outputs[0])
# Reduce away broadcasted leading dims.
reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)
gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)
gx = array_ops.reshape(gx, x_shape)
gy = array_ops.where_v2(c, zeros, grad)
y_shape = array_ops.shape(y)
# Reduce away broadcasted leading dims.
reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)
gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)
gy = array_ops.reshape(gy, y_shape)
return (None, gx, gy)
def _MatMulGradAgainstFirstOnly(op, grad):
"""Gradient for MatMul, only for the first input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
return grad_a, None
def _MatMulGradAgainstSecondOnly(op, grad):
"""Gradient for MatMul, only for the second input."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
if not t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return None, grad_b
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
try:
skip_input_indices = op.skip_input_indices
if skip_input_indices is not None:
if 1 in skip_input_indices:
return _MatMulGradAgainstFirstOnly(op, grad)
elif 0 in skip_input_indices:
return _MatMulGradAgainstSecondOnly(op, grad)
except AttributeError:
# No gradient skipping, so do the full gradient computation
pass
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = gen_math_ops.mat_mul(grad, b)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)
grad_b = gen_math_ops.mat_mul(a, grad)
elif t_a and t_b:
grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)
grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: not context.executing_eagerly() and (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),
_SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
@ops.RegisterGradient("BatchMatMulV2")
def _BatchMatMulV2(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
# Reduce along the broadcasted batch dimensions, if broadcasting is required.
shape_x_static = x.get_shape()
shape_y_static = y.get_shape()
if not (shape_x_static.is_fully_defined() and
shape_y_static.is_fully_defined() and
shape_x_static == shape_y_static):
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])
grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)
grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Angle")
def _AngleGrad(op, grad):
"""Returns -grad / (Im(x) + iRe(x))"""
x = op.inputs[0]
with ops.control_dependencies([grad]):
re = math_ops.real(x)
im = math_ops.imag(x)
z = math_ops.reciprocal(math_ops.complex(im, re))
zero = constant_op.constant(0, dtype=grad.dtype)
complex_grad = math_ops.complex(grad, zero)
return -complex_grad * z
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
return math_ops.div_no_nan(
math_ops.complex(
grad, array_ops.zeros_like(grad)) * op.inputs[0],
math_ops.complex(
op.outputs[0], array_ops.zeros_like(op.outputs[0])))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),
None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [out / x, None]
@ops.RegisterGradient("NextAfter")
def _NextAfterGrad(op, grad):
"""Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
x1 = op.inputs[0]
x2 = op.inputs[1]
s_x1 = array_ops.shape(x1)
s_x2 = array_ops.shape(x2)
r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
with ops.control_dependencies([grad]):
partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
return (array_ops.reshape(
math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
array_ops.reshape(
math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
| {
"content_hash": "ca14a7a4643a2a970027a2bed59183cd",
"timestamp": "",
"source": "github",
"line_count": 1644,
"max_line_length": 81,
"avg_line_length": 34.41727493917275,
"alnum_prop": 0.6536177582976919,
"repo_name": "alsrgv/tensorflow",
"id": "0db8953b6964bf8f0a42c9bc66a0b88f10f2613d",
"size": "57271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
} |
from django.db import models
from django import forms
from django.contrib.auth.models import User, Group
from inbox.models import Message
from django.core.exceptions import ValidationError
# # Allow multiple Groups with the same name
# Group._meta.get_field('name')._unique = False
def get_received_messages(self):
return Message.objects.filter(recipient=self)
def get_unread_messages(self):
return Message.objects.filter(recipient=self, read=False)
User.add_to_class('get_received_messages', get_received_messages)
User.add_to_class('get_unread_messages', get_unread_messages)
class UserProfile(models.Model):
# Django documentation for built-in User model:
# https://docs.djangoproject.com/en/1.7/ref/contrib/auth/#django.contrib.auth.models.User
# This line is required. Links UserProfile to a User model instance.
# related_name helps with accessing UserProfile when you have corresponding User.
user = models.OneToOneField(User, related_name='user_profile')
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True, default="")
favorite_groups = models.ManyToManyField(Group,blank=True)
# Override the __unicode__() method to return out something meaningful!
def __unicode__(self):
return self.user.username
class Meta:
permissions = (
('site_manager', 'Has site manager privileges'),
)
class UserForm(forms.ModelForm):
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control','required':None}))
password = forms.CharField(required=True, widget=forms.PasswordInput(attrs={'class': 'form-control','required':None}))
first_name = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control'}))
last_name = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.CharField(required=False, widget=forms.EmailInput(attrs={'class': 'form-control'}))
def clean_username(self):
username = self.cleaned_data.get('username', '')
if User.objects.filter(username=username).exists():
raise ValidationError('Username "' + username + '" taken.")')
return username
class Meta:
model = User
fields = ('username', 'password', 'first_name', 'last_name', 'email')
# username/password fields not required
class EditUserForm(forms.ModelForm):
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(required=True, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
first_name = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control'}))
last_name = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.CharField(required=False, widget=forms.EmailInput(attrs={'class': 'form-control'}))
def clean_username(self):
username = self.cleaned_data.get('username', '')
if User.objects.filter(username=username).exists():
raise ValidationError('Username "' + username + '" taken.")')
return username
class Meta:
model = User
fields = ('username', 'password', 'first_name', 'last_name', 'email')
class UserProfileForm(forms.ModelForm):
website = forms.EmailField(required=False, widget=forms.URLInput(attrs={'class': 'form-control'}))
# Must explicitly say that field is not required in order for ClearableFileInput to render with clear checkbox
picture = forms.ImageField(required=False, widget=forms.ClearableFileInput(attrs={}))
class Meta:
model = UserProfile
fields = ('website', 'picture')
class LoginForm(forms.ModelForm):
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(required=True, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ('username', 'password')
| {
"content_hash": "6859659a669f63c135c545a8986f8556",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 119,
"avg_line_length": 41.8936170212766,
"alnum_prop": 0.7437785678009141,
"repo_name": "mas2tg/cs3240-f16-team07",
"id": "94af8b36bf78d5ae59e86521eb0ea87f6214cd3a",
"size": "3938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "423"
},
{
"name": "Python",
"bytes": "4958"
}
],
"symlink_target": ""
} |
"""
Modules for creating and accessing Data Store Units
"""
from __future__ import absolute_import
from datacube.storage.tiling import tile_datasets_with_storage_type
| {
"content_hash": "4e67dfceb56459843b412526f6eb3c79",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 28,
"alnum_prop": 0.7857142857142857,
"repo_name": "ceos-seo/Data_Cube_v2",
"id": "f11d50703c286a9020cc13a3af7a4b450356aeff",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agdc-v2/datacube/storage/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1959"
},
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "CSS",
"bytes": "772075"
},
{
"name": "GLSL",
"bytes": "165400"
},
{
"name": "HTML",
"bytes": "1457619"
},
{
"name": "JavaScript",
"bytes": "50036576"
},
{
"name": "Jupyter Notebook",
"bytes": "16917211"
},
{
"name": "Makefile",
"bytes": "6773"
},
{
"name": "Python",
"bytes": "1174107"
},
{
"name": "Shell",
"bytes": "7641"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(1, "../../")
# Output checks common for multiple algorithms
def compare_output(original, generic, strip_part, algo_name, generic_algo_name):
original = original[original.find(strip_part):].replace(algo_name, '').strip()
generic = generic[generic.find(strip_part):].replace(generic_algo_name, '').strip()
assert generic == original, "expected:\n%s\n\nbut got instead generic:\n%s" % (original, generic)
# print("expected:\n%s\n\nand got generic:\n%s" % (original, generic))
def compare_params(original, generic):
original_params = original.params
generic_params = generic.params
assert original is not None
assert generic_params is not None
assert len(original_params) == len(generic_params) - 2 # Two more in Generic: _model_key and model_path
for param_name in original_params:
if param_name == "model_id":
continue
generic_param = generic_params[param_name]
original_param = original_params[param_name]
if param_name == "ignored_columns":
assert generic_param == original_param
assert generic_param is not None
assert original_param is not None
| {
"content_hash": "07fd45915aea29287bae4849467720e4",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 108,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.675787728026534,
"repo_name": "h2oai/h2o-3",
"id": "5f3fc7f0fcae20641e10c6e5c18dc9f72970e078",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_generic_model/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
} |
"""
PyCOMPSs Testbench KMeans
========================
"""
# Imports
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
import random
USER_EVENTS = 70000100
CLUSTER_MAIN_LOOP = 400
PARTIAL_SUM = 401
REDUCE_KEY = 402
REDUCE_NO_KEY = 403
GENERATE = 404
def init_board_gauss(numV, dim, K):
n = int(float(numV) / K)
data = []
random.seed(5)
for k in range(K):
c = [random.uniform(-1, 1) for i in range(dim)]
s = random.uniform(0.05, 0.5)
for i in range(n):
d = np.array([np.random.normal(c[j], s) for j in range(dim)])
data.append(d)
Data = np.array(data)[:numV]
return Data
def init_board_random(numV, dim):
from numpy import random
random.seed(5)
return [random.random(dim) for _ in range(numV)]
@task(returns=dict, tracing_hook=True)
def cluster_points_partial(XP, mu, ind):
import pyextrae.multiprocessing as pyextrae
import numpy as np
dic = {}
XP = np.array(XP)
pyextrae.eventandcounters(USER_EVENTS, CLUSTER_MAIN_LOOP)
for x in enumerate(XP):
bestmukey = min([(i[0], np.linalg.norm(x[1] - mu[i[0]]))
for i in enumerate(mu)], key=lambda t: t[1])[0]
if bestmukey not in dic:
dic[bestmukey] = [x[0] + ind]
else:
dic[bestmukey].append(x[0] + ind)
pyextrae.eventandcounters(USER_EVENTS, 0)
return dic
@task(returns=dict, tracing_hook=True)
def partial_sum(XP, clusters, ind):
import pyextrae.multiprocessing as pyextrae
import numpy as np
XP = np.array(XP)
pyextrae.eventandcounters(USER_EVENTS, PARTIAL_SUM)
p = [(i, [(XP[j - ind]) for j in clusters[i]]) for i in clusters]
pyextrae.eventandcounters(USER_EVENTS, 0)
dic = {}
for i, l in p:
dic[i] = (len(l), np.sum(l, axis=0))
return dic
@task(returns=dict, priority=True, tracing_hook=True)
def reduceCentersTask(a, b):
import pyextrae.multiprocessing as pyextrae
for key in b:
if key not in a:
pyextrae.eventandcounters(USER_EVENTS, REDUCE_NO_KEY)
a[key] = b[key]
pyextrae.eventandcounters(USER_EVENTS, 0)
else:
pyextrae.eventandcounters(USER_EVENTS, REDUCE_KEY)
a[key] = (a[key][0] + b[key][0], a[key][1] + b[key][1])
pyextrae.eventandcounters(USER_EVENTS, 0)
return a
def has_converged(mu, oldmu, epsilon, iter, maxIterations):
print("iter: " + str(iter))
print("maxIterations: " + str(maxIterations))
if oldmu != []:
if iter < maxIterations:
aux = [np.linalg.norm(oldmu[i] - mu[i]) for i in range(len(mu))]
distancia = sum(aux)
if distancia < epsilon * epsilon:
print("Distancia_T: " + str(distancia))
return True
else:
print("Distancia_F: " + str(distancia))
return False
else:
# detencion pq se ha alcanzado el maximo de iteraciones
return True
def init_random(dim, k):
from numpy import random
random.seed(5)
# ind = random.randint(0, len(X) - 1)
m = np.array([random.random(dim) for _ in range(k)])
# return random.sample(X[ind], k)
return m
@task(returns=list, disableUserEvents=True)
def genFragment(numv, dim):
import pyextrae.multiprocessing as pyextrae
# if mode == "gauss":
# return init_board_gauss(numv, dim, k)
# else:
pyextrae.eventandcounters(USER_EVENTS, GENERATE)
frag = init_board_random(numv, dim)
pyextrae.eventandcounters(USER_EVENTS, 0)
return frag
def kmeans_frag(numV, k, dim, epsilon, maxIterations, numFrag):
from pycompss.api.api import compss_wait_on
import time
size = int(numV / numFrag)
startTime = time.time()
X = [genFragment(size, dim) for _ in range(numFrag)]
print("Points generation Time {} (s)".format(time.time() - startTime))
mu = init_random(dim, k)
oldmu = []
n = 0
startTime = time.time()
while not has_converged(mu, oldmu, epsilon, n, maxIterations):
oldmu = mu
clusters = [cluster_points_partial(
X[f], mu, f * size) for f in range(numFrag)]
partialResult = [partial_sum(
X[f], clusters[f], f * size) for f in range(numFrag)]
mu = merge_reduce(reduceCentersTask, partialResult)
mu = compss_wait_on(mu)
mu = [mu[c][1] / mu[c][0] for c in mu]
print(mu)
n += 1
print("Kmeans Time {} (s)".format(time.time() - startTime))
return (n, mu)
if __name__ == "__main__":
import time
import numpy as np
numV = 100
dim = 2
k = 2
numFrag = 2
startTime = time.time()
result = kmeans_frag(numV, k, dim, 1e-4, 10, numFrag)
print("Elapsed Time {} (s)".format(time.time() - startTime))
| {
"content_hash": "a7fa7304c51c6ccb43f0468ad66e41a8",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 76,
"avg_line_length": 29.11377245508982,
"alnum_prop": 0.5950226244343891,
"repo_name": "mF2C/COMPSs",
"id": "7b6c0f8081cce7fb60a1bbfb90097f5a19640963",
"size": "4906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/tools/1_tracing_userevents_hook_python/src/task_tracing_userEvents_tracingHookEnabled.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "1595"
},
{
"name": "C",
"bytes": "222477"
},
{
"name": "C++",
"bytes": "200186"
},
{
"name": "Dockerfile",
"bytes": "901"
},
{
"name": "Gnuplot",
"bytes": "4195"
},
{
"name": "Java",
"bytes": "4213323"
},
{
"name": "JavaScript",
"bytes": "16906"
},
{
"name": "Jupyter Notebook",
"bytes": "10514"
},
{
"name": "Lex",
"bytes": "1356"
},
{
"name": "M4",
"bytes": "5538"
},
{
"name": "Makefile",
"bytes": "14740"
},
{
"name": "Python",
"bytes": "635267"
},
{
"name": "Shell",
"bytes": "1241476"
},
{
"name": "XSLT",
"bytes": "177323"
},
{
"name": "Yacc",
"bytes": "3655"
}
],
"symlink_target": ""
} |
from functools import partial
from inspect import signature
from itertools import product
from itertools import chain
from itertools import permutations
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import det_curve
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import max_error
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import mean_poisson_deviance
from sklearn.metrics import mean_gamma_deviance
from sklearn.metrics import median_absolute_error
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import zero_one_loss
from sklearn.metrics import ndcg_score
from sklearn.metrics import dcg_score
from sklearn.metrics import top_k_accuracy_score
from sklearn.metrics._base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"max_error": max_error,
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"mean_absolute_percentage_error": mean_absolute_percentage_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
"mean_normal_deviance": partial(mean_tweedie_deviance, power=0),
"mean_poisson_deviance": mean_poisson_deviance,
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance":
partial(mean_tweedie_deviance, power=1.4),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score,
adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
# `confusion_matrix` returns absolute values and hence behaves unnormalized
# . Naming it with an unnormalized_ prefix is necessary for this module to
# skip sample_weight scaling checks which will fail for unnormalized
# metrics.
"unnormalized_confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype('float') / confusion_matrix(
*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"unnormalized_multilabel_confusion_matrix": multilabel_confusion_matrix,
"unnormalized_multilabel_confusion_matrix_sample":
partial(multilabel_confusion_matrix, samplewise=True),
"hamming_loss": hamming_loss,
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"jaccard_score": jaccard_score,
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"weighted_jaccard_score": partial(jaccard_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"micro_jaccard_score": partial(jaccard_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"macro_jaccard_score": partial(jaccard_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"samples_jaccard_score": partial(jaccard_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array([
precision,
recall,
np.pad(thresholds,
pad_width=(0, pad_threshholds),
mode='constant',
constant_values=[np.nan])
])
CURVE_METRICS = {
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
"det_curve": det_curve,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovr'),
"weighted_ovr_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovr'),
"ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovo'),
"weighted_ovo_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovo'),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score":
average_precision_score, # default: average="macro"
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
"ndcg_score": ndcg_score,
"dcg_score": dcg_score,
"top_k_accuracy_score": top_k_accuracy_score
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
"coverage_error",
"unnormalized_multilabel_confusion_matrix_sample",
"label_ranking_loss",
"label_ranking_average_precision_score",
"dcg_score",
"ndcg_score"
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"brier_score_loss",
"micro_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"roc_auc_score",
"weighted_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
"jaccard_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"roc_curve",
"precision_recall_curve",
"det_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score"
}
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = {
"roc_auc_score", "average_precision_score", "partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"roc_curve",
"precision_recall_curve",
"det_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"unnormalized_multilabel_confusion_matrix_sample",
"cohen_kappa_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"top_k_accuracy_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "partial_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"coverage_error", "label_ranking_loss",
"ndcg_score",
"dcg_score",
"label_ranking_average_precision_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
"samples_jaccard_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error", "median_absolute_error", "mean_squared_error",
"r2_score", "explained_variance_score", "mean_absolute_percentage_error"
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"micro_jaccard_score", "macro_jaccard_score",
"jaccard_score",
"samples_jaccard_score",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error", "max_error",
"cohen_kappa_score", "mean_normal_deviance"
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss",
"mean_gamma_deviance", "mean_poisson_deviance",
"mean_compound_poisson_deviance", "mean_absolute_percentage_error"
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"median_absolute_error",
"max_error",
"ovo_roc_auc",
"weighted_ovo_roc_auc"
}
METRICS_REQUIRE_POSITIVE_Y = {
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_compound_poisson_deviance",
}
def _require_positive_targets(y1, y2):
"""Make targets strictly positive"""
offset = abs(min(y1.min(), y2.min())) + 1
y1 += offset
y2 += offset
return y1, y2
def test_symmetry_consistency():
# We shouldn't forget any metrics
assert (SYMMETRIC_METRICS.union(
NOT_SYMMETRIC_METRICS, set(THRESHOLDED_METRICS),
METRIC_UNDEFINED_BINARY_MULTICLASS) ==
set(ALL_METRICS))
assert (
SYMMETRIC_METRICS.intersection(NOT_SYMMETRIC_METRICS) ==
set())
@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS))
def test_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_bin = random_state.randint(0, 2, size=(20, 25))
y_pred_bin = random_state.randint(0, 2, size=(20, 25))
metric = ALL_METRICS[name]
if name in METRIC_UNDEFINED_BINARY:
if name in MULTILABELS_METRICS:
assert_allclose(metric(y_true_bin, y_pred_bin),
metric(y_pred_bin, y_true_bin),
err_msg="%s is not symmetric" % name)
else:
assert False, "This case is currently unhandled"
else:
assert_allclose(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS))
def test_not_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
metric = ALL_METRICS[name]
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))
raise ValueError("%s seems to be symmetric" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y1, y2 = _require_positive_targets(y1, y2)
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant with list"
"" % name)
assert_allclose(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant with "
"np-array-1d" % name)
assert_allclose(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant with "
"np-array-column" % name)
# Mix format support
assert_allclose(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
assert_allclose(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
# These mix representations aren't allowed
with pytest.raises(ValueError):
metric(y1_1d, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_1d)
with pytest.raises(ValueError):
metric(y1_list, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_list)
with pytest.raises(ValueError):
metric(y1_column, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS |
MULTILABELS_METRICS)):
with pytest.raises(ValueError):
metric(y1_row, y2_row)
@pytest.mark.parametrize(
'name',
sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
@pytest.mark.parametrize('name', THRESHOLDED_METRICS)
def test_thresholded_invariance_string_vs_numbers_labels(name):
# Ensure that thresholded metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
pos_label_str = "spam"
with ignore_warnings():
metric = THRESHOLDED_METRICS[name]
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
with pytest.raises(ValueError):
metric(y1_str, y2)
with pytest.raises(ValueError):
metric(y1_str.astype('O'), y2)
invalids_nan_inf = [
([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf]),
([0, 1], [np.inf, 1]),
([0, 1], [np.nan, 1]),
]
@pytest.mark.parametrize(
'metric',
chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values())
)
@pytest.mark.parametrize("y_true, y_score", invalids_nan_inf)
def test_regression_thresholded_inf_nan_input(metric, y_true, y_score):
with pytest.raises(ValueError, match="contains NaN, infinity"):
metric(y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
@pytest.mark.parametrize(
"y_true, y_score",
invalids_nan_inf +
# Add an additional case for classification only
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/6809
[([np.nan, 1, 2], [1, 2, 3])]
)
def test_classification_inf_nan_input(metric, y_true, y_score):
"""check that classification metrics raise a message mentioning the
occurrence of non-finite values in the target vectors."""
err_msg = "Input contains NaN, infinity or a value too large"
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
def test_classification_binary_continuous_input(metric):
"""check that classification metrics raise a message of mixed type data
with continuous/binary target vectors."""
y_true, y_score = ['a', 'b', 'a'], [0.1, 0.2, 0.3]
err_msg = (
"Classification metrics can't handle a mix of binary and continuous "
"targets"
)
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
if name in METRICS_REQUIRE_POSITIVE_Y:
values = [1, 2]
else:
values = [0, 1]
for i, j in product(values, repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS)
# Those metrics are not always defined with one sample
# or in multiclass classification
- METRIC_UNDEFINED_BINARY_MULTICLASS - set(THRESHOLDED_METRICS)))
def test_single_sample(name):
check_single_sample(name)
@pytest.mark.parametrize('name',
sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS))
def test_single_sample_multioutput(name):
check_single_sample_multioutput(name)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_number_of_output_differ(name):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
metric = ALL_METRICS[name]
with pytest.raises(ValueError):
metric(y_true, y_pred)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_regression_invariance_to_dimension_shuffling(name):
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_allclose(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling invariant" % (
name))
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
y1_list_array_indicator = list(y1)
y2_list_array_indicator = list(y2)
y1_list_list_indicator = [list(a) for a in y1_list_array_indicator]
y2_list_list_indicator = [list(a) for a in y2_list_array_indicator]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_allclose(metric(y1_sparse_indicator, y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance between "
"dense and sparse indicator formats." % name)
assert_almost_equal(metric(y1_list_list_indicator,
y2_list_list_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense array and list of list "
"indicator formats." % name)
assert_almost_equal(metric(y1_list_array_indicator,
y2_list_array_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and list of array "
"indicator formats." % name)
@pytest.mark.parametrize('name', sorted(MULTILABELS_METRICS))
def test_raise_value_error_multilabel_sequences(name):
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
with pytest.raises(ValueError):
metric(seq, seq)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_binary_classification(name):
# Test in the binary case
n_classes = 2
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.normal(size=y_true.shape)
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_multiclass_classification(name):
# Test in the multiclass case
n_classes = 4
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@pytest.mark.parametrize('name', sorted(
METRICS_WITH_NORMALIZE_OPTION.intersection(MULTILABELS_METRICS)
))
def test_normalize_option_multilabel_classification(name):
# Test in the multilabel case
n_classes = 4
n_samples = 100
random_state = check_random_state(0)
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
y_score = random_state.uniform(size=y_true.shape)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure,
[metric(y_true_binarize[:, i], y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_allclose(micro_measure,
metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_allclose(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure,
np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_allclose(sample_measure,
np.mean([metric(y_true_binarize[i], y_pred_binarize[i])
for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_pred, average="unknown")
with pytest.raises(ValueError):
metric(y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multiclass(name):
n_samples, n_classes = 50, 3
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize(
'name',
sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING))
def test_averaging_multilabel(name):
n_samples, n_classes = 40, 5
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_zeroes(name):
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_binary_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_ones(name):
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# top_k_accuracy_score always lead to a perfect score for k > 1 in the
# binary case
metric = partial(metric, k=1) if name == "top_k_accuracy_score" else metric
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_allclose(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError("Unweighted and weighted scores are unexpectedly "
"almost equal (%s) and (%s) "
"for %s" % (unweighted_score, weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_allclose(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%s != %s) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_allclose(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_allclose(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%s != %s) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_allclose(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = (r"Found input variables with inconsistent numbers of "
r"samples: \[{}, {}, {}\]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
with pytest.raises(ValueError, match=error_message):
metric(y1, y2, sample_weight=np.hstack([sample_weight,
sample_weight]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) -
METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_regression_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
# binary
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_multiclass_sample_weight_invariance(name):
# multiclass
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
# softmax
temp = np.exp(-y_score)
y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1)
check_sample_weight_invariance(name, metric, y_true, y_score_norm)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted((MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS
| MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_multilabel_sample_weight_invariance(name):
# multilabel indicator
random_state = check_random_state(0)
_, ya = make_multilabel_classification(n_features=1, n_classes=10,
random_state=0, n_samples=50,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=10,
random_state=1, n_samples=50,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
@pytest.mark.parametrize(
'name',
sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"}))
def test_multilabel_label_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.randint(0, 2, size=(n_samples, n_classes))
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name', sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS))
def test_thresholded_multilabel_multioutput_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.normal(size=y_true.shape)
# Makes sure all samples have at least one label. This works around errors
# when running metrics where average="sample"
y_true[y_true.sum(1) == 4, 0] = 0
y_true[y_true.sum(1) == 0, 0] = 1
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
if metric == mean_absolute_percentage_error:
assert np.isfinite(current_score)
assert current_score > 1e6
# Here we are not comparing the values in case of MAPE because
# whenever y_true value is exactly zero, the MAPE value doesn't
# signify anything. Thus, in this case we are just expecting
# very large finite value.
else:
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name',
sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_thresholded_metric_permutation_invariance(name):
n_samples, n_classes = 100, 3
random_state = check_random_state(0)
y_score = random_state.rand(n_samples, n_classes)
temp = np.exp(-y_score)
y_score = temp / temp.sum(axis=-1).reshape(-1, 1)
y_true = random_state.randint(0, n_classes, size=n_samples)
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
inverse_perm = np.zeros(n_classes, dtype=int)
inverse_perm[list(perm)] = np.arange(n_classes)
y_score_perm = y_score[:, inverse_perm]
y_true_perm = np.take(perm, y_true)
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize("metric_name", CLASSIFICATION_METRICS)
def test_metrics_consistent_type_error(metric_name):
# check that an understable message is raised when the type between y_true
# and y_pred mismatch
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=object)
y2 = rng.randint(0, 2, size=y1.size)
err_msg = "Labels in y_true and y_pred should be of the same type."
with pytest.raises(TypeError, match=err_msg):
CLASSIFICATION_METRICS[metric_name](y1, y2)
@pytest.mark.parametrize(
"metric, y_pred_threshold",
[
(average_precision_score, True),
(brier_score_loss, True),
(f1_score, False),
(partial(fbeta_score, beta=1), False),
(jaccard_score, False),
(precision_recall_curve, True),
(precision_score, False),
(recall_score, False),
(roc_curve, True),
],
)
@pytest.mark.parametrize("dtype_y_str", [str, object])
def test_metrics_pos_label_error_str(metric, y_pred_threshold, dtype_y_str):
# check that the error message if `pos_label` is not specified and the
# targets is made of strings.
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str)
y2 = rng.randint(0, 2, size=y1.size)
if not y_pred_threshold:
y2 = np.array(["spam", "eggs"], dtype=dtype_y_str)[y2]
err_msg_pos_label_None = (
"y_true takes value in {'eggs', 'spam'} and pos_label is not "
"specified: either make y_true take value in {0, 1} or {-1, 1} or "
"pass pos_label explicit"
)
err_msg_pos_label_1 = (
r"pos_label=1 is not a valid label. It should be one of "
r"\['eggs', 'spam'\]"
)
pos_label_default = signature(metric).parameters["pos_label"].default
err_msg = (
err_msg_pos_label_1
if pos_label_default == 1
else err_msg_pos_label_None
)
with pytest.raises(ValueError, match=err_msg):
metric(y1, y2)
| {
"content_hash": "ae68514cc683de5f88b835e568247e27",
"timestamp": "",
"source": "github",
"line_count": 1530,
"max_line_length": 79,
"avg_line_length": 37.9562091503268,
"alnum_prop": 0.6301379298469169,
"repo_name": "ndingwall/scikit-learn",
"id": "767228d4607890e899b1944e1e0d8b8dd68dab5b",
"size": "58074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/metrics/tests/test_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
} |
import os
import sys
import django.core.handlers.wsgi
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
_HERE = os.path.dirname(os.path.abspath(__file__))
os.environ['DJANGO_SETTINGS_MODULE'] = "booksite.settings"
def main(port):
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
tornado_app = tornado.web.Application(
[('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app)),
])
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main(int(sys.argv[1]))
| {
"content_hash": "5ed5b920370fb8c797b377083725aee7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 27.791666666666668,
"alnum_prop": 0.697151424287856,
"repo_name": "tkliuxing/bookspider",
"id": "4b184743e25e141d697ddf66e4d5e8a7d03d17c5",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booksite/booksite/tornado_wsgi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "606163"
},
{
"name": "HTML",
"bytes": "468471"
},
{
"name": "JavaScript",
"bytes": "275579"
},
{
"name": "Python",
"bytes": "169957"
},
{
"name": "Shell",
"bytes": "361"
}
],
"symlink_target": ""
} |
import pickle as pkl
from mxnet.ndarray import NDArray
import mxnet as mx
from mxnet.test_utils import *
from common import random_seed
from mxnet.base import mx_real_t
from numpy.testing import assert_allclose
import numpy.random as rnd
import numpy as np
import scipy.sparse as spsp
from common import assertRaises, xfail_when_nonstandard_decimal_separator
from mxnet.ndarray.sparse import RowSparseNDArray, CSRNDArray
import pytest
mx.npx.reset_np()
def sparse_nd_ones(shape, stype):
return mx.nd.ones(shape).tostype(stype)
def test_sparse_nd_elemwise_add():
def check_sparse_nd_elemwise_binary(shapes, stypes, f, g):
# generate inputs
nds = []
for i, stype in enumerate(stypes):
if stype == 'row_sparse':
nd, _ = rand_sparse_ndarray(shapes[i], stype)
elif stype == 'default':
nd = mx.nd.array(random_arrays(shapes[i]), dtype = np.float32)
else:
assert(False)
nds.append(nd)
# check result
test = f(nds[0], nds[1])
assert_almost_equal(test.asnumpy(), g(nds[0].asnumpy(), nds[1].asnumpy()))
num_repeats = 3
g = lambda x,y: x + y
op = mx.nd.elemwise_add
for _ in range(num_repeats):
shape = [rand_shape_2d()] * 2
check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g)
check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g)
def test_sparse_nd_copy():
def check_sparse_nd_copy(from_stype, to_stype, shape):
from_nd = rand_ndarray(shape, from_stype)
# copy to ctx
to_ctx = from_nd.copyto(default_device())
# copy to stype
to_nd = rand_ndarray(shape, to_stype)
to_nd = from_nd.copyto(to_nd)
assert np.sum(np.abs(from_nd.asnumpy() != to_ctx.asnumpy())) == 0.0
assert np.sum(np.abs(from_nd.asnumpy() != to_nd.asnumpy())) == 0.0
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_sparse_nd_copy(stype, 'default', shape)
check_sparse_nd_copy('default', stype, shape)
check_sparse_nd_copy('row_sparse', 'row_sparse', shape_3d)
check_sparse_nd_copy('row_sparse', 'default', shape_3d)
check_sparse_nd_copy('default', 'row_sparse', shape_3d)
def test_sparse_nd_basic():
def check_sparse_nd_basic_rsp():
storage_type = 'row_sparse'
shape = rand_shape_2d()
nd, (v, idx) = rand_sparse_ndarray(shape, storage_type)
assert(nd._num_aux == 1)
assert(nd.indices.dtype == np.int64)
assert(nd.stype == 'row_sparse')
check_sparse_nd_basic_rsp()
def test_sparse_nd_setitem():
def check_sparse_nd_setitem(stype, shape, dst):
x = mx.nd.zeros(shape=shape, stype=stype)
x[:] = dst
dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)
shape = rand_shape_2d()
for stype in ['row_sparse', 'csr']:
# ndarray assignment
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
# numpy assignment
check_sparse_nd_setitem(stype, shape, np.ones(shape))
# scalar assigned to row_sparse NDArray
check_sparse_nd_setitem('row_sparse', shape, 2)
def test_sparse_nd_slice():
shape = (rnd.randint(2, 10), rnd.randint(2, 10))
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
assert same(A[start:end].asnumpy(), A2[start:end])
assert same(A[start - shape[0]:end].asnumpy(), A2[start:end])
assert same(A[start:].asnumpy(), A2[start:])
assert same(A[:end].asnumpy(), A2[:end])
ind = rnd.randint(-shape[0], shape[0] - 1)
assert same(A[ind].asnumpy(), A2[ind][np.newaxis, :])
start_col = rnd.randint(0, shape[1] - 1)
end_col = rnd.randint(start_col + 1, shape[1])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
A = mx.nd.sparse.zeros('csr', shape)
A2 = A.asnumpy()
assert same(A[start:end].asnumpy(), A2[start:end])
result = mx.nd.slice(A, begin=(start, start_col), end=(end, end_col))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start, start_col), end=(end, end_col))
assert same(result_dense.asnumpy(), result.asnumpy())
def check_slice_nd_csr_fallback(shape):
stype = 'csr'
A, _ = rand_sparse_ndarray(shape, stype)
A2 = A.asnumpy()
start = rnd.randint(0, shape[0] - 1)
end = rnd.randint(start + 1, shape[0])
# non-trivial step should fallback to dense slice op
result = mx.nd.sparse.slice(A, begin=(start,), end=(end + 1,), step=(2,))
result_dense = mx.nd.slice(mx.nd.array(A2), begin=(start,), end=(end + 1,), step=(2,))
assert same(result_dense.asnumpy(), result.asnumpy())
shape = (rnd.randint(2, 10), rnd.randint(1, 10))
check_slice_nd_csr_fallback(shape)
def test_sparse_nd_concat():
def check_concat(arrays):
ret = np.concatenate([arr.asnumpy() for arr in arrays], axis=0)
same(mx.nd.concat(*arrays, dim=0).asnumpy(), ret)
nds = []
zero_nds = []
ncols = rnd.randint(2, 10)
for _ in range(3):
shape = (rnd.randint(2, 10), ncols)
A, _ = rand_sparse_ndarray(shape, 'csr')
nds.append(A)
zero_nds.append(mx.nd.zeros(shape).tostype('csr'))
check_concat(nds)
check_concat(zero_nds)
def test_sparse_nd_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x == y
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 == y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = 1 == y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_not_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x != y
assert (z.asnumpy() == np.ones(shape)).all()
z = 0 != y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = 1 != y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
def test_sparse_nd_greater():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x > y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y > 0
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = 0 > y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
z = y > 1
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
def test_sparse_nd_greater_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = x >= y
assert (z.asnumpy() == np.zeros(shape)).all()
z = y >= 0
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == 'default'
z = 0 >= y
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = y >= 1
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_lesser():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y < x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 < y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
z = y < 0
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == stype
z = y < 1
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
def test_sparse_nd_lesser_equal():
for stype in ['row_sparse', 'csr']:
shape = rand_shape_2d()
x = mx.nd.zeros(shape=shape, stype=stype)
y = sparse_nd_ones(shape, stype)
z = y <= x
assert (z.asnumpy() == np.zeros(shape)).all()
z = 0 <= y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == 'default'
z = y <= 0
assert (z.asnumpy() == np.zeros(shape)).all()
assert z.stype == 'default'
z = 1 <= y
assert (z.asnumpy() == np.ones(shape)).all()
assert z.stype == stype
def test_sparse_nd_binary():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
bdim = 2
lshape = list(oshape)
# one for broadcast op, another for elemwise op
rshape = list(oshape[ndim-bdim:])
for i in range(bdim):
sep = np.random.uniform(0, 1)
if sep < 0.33:
lshape[ndim-i-1] = 1
elif sep < 0.66:
rshape[bdim-i-1] = 1
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs), fn(lhs_nd, rhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
assert_allclose(fn(lhs, lhs), fn(lhs_nd, lhs_nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check_binary(lambda x, y: x + y, stype)
check_binary(lambda x, y: x - y, stype)
check_binary(lambda x, y: x * y, stype)
check_binary(lambda x, y: x / y, stype)
check_binary(lambda x, y: x ** y, stype)
check_binary(lambda x, y: x > y, stype)
check_binary(lambda x, y: x < y, stype)
check_binary(lambda x, y: x >= y, stype)
check_binary(lambda x, y: x <= y, stype)
check_binary(lambda x, y: x == y, stype)
@xfail_when_nonstandard_decimal_separator
def test_sparse_nd_binary_scalar_op():
N = 3
def check(fn, stype, out_stype=None):
for _ in range(N):
ndim = 2
shape = np.random.randint(1, 6, size=(ndim,))
npy = np.random.normal(0, 1, size=shape)
nd = mx.nd.array(npy).tostype(stype)
if out_stype is not None:
assert(nd.stype == out_stype)
assert_allclose(fn(npy), fn(nd).asnumpy(), rtol=1e-4, atol=1e-4)
stypes = ['row_sparse', 'csr']
for stype in stypes:
check(lambda x: 1 + x, stype)
check(lambda x: 1 - x, stype)
check(lambda x: 1 * x, stype)
check(lambda x: 1 / x, stype)
check(lambda x: 2 ** x, stype)
check(lambda x: 1 > x, stype)
check(lambda x: 0.5 > x, stype)
check(lambda x: 0.5 < x, stype)
check(lambda x: 0.5 >= x, stype)
check(lambda x: 0.5 <= x, stype)
check(lambda x: 0.5 == x, stype)
check(lambda x: x / 2, stype, out_stype=stype)
check(lambda x: x + 0, stype, out_stype=stype)
check(lambda x: x - 0, stype, out_stype=stype)
def test_sparse_nd_binary_iop():
N = 3
def check_binary(fn, stype):
for _ in range(N):
ndim = 2
oshape = np.random.randint(1, 6, size=(ndim,))
lshape = list(oshape)
rshape = list(oshape)
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd = mx.nd.array(lhs).tostype(stype)
rhs_nd = mx.nd.array(rhs).tostype(stype)
assert_allclose(fn(lhs, rhs),
fn(lhs_nd, rhs_nd).asnumpy(),
rtol=1e-4, atol=1e-4)
def inplace_add(x, y):
x += y
return x
def inplace_mul(x, y):
x *= y
return x
stypes = ['csr', 'row_sparse']
fns = [inplace_add, inplace_mul]
for stype in stypes:
for fn in fns:
check_binary(fn, stype)
def test_sparse_nd_negate():
def check_sparse_nd_negate(shape, stype):
npy = np.random.uniform(-10, 10, rand_shape_2d())
arr = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert_almost_equal(npy, arr.asnumpy())
shape = rand_shape_2d()
stypes = ['csr', 'row_sparse']
for stype in stypes:
check_sparse_nd_negate(shape, stype)
def test_sparse_nd_broadcast():
sample_num = 1000
# TODO(haibin) test with more than 2 dimensions
def test_broadcast_to(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
def test_broadcast_like(stype):
for _ in range(sample_num):
ndim = 2
target_shape = np.random.randint(1, 11, size=ndim)
target = mx.nd.ones(shape=tuple(target_shape))
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray = mx.nd.array(dat).tostype(stype)
ndarray_ret = ndarray.broadcast_like(target)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
stypes = ['csr', 'row_sparse']
for stype in stypes:
test_broadcast_to(stype)
test_broadcast_like(stype)
def test_sparse_nd_transpose():
npy = np.random.uniform(-10, 10, rand_shape_2d())
stypes = ['csr', 'row_sparse']
for stype in stypes:
nd = mx.nd.array(npy).tostype(stype)
assert_almost_equal(npy.T, (nd.T).asnumpy())
def test_sparse_nd_storage_fallback():
def check_output_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.zeros(shape=shape, stype='csr')
mx.nd.broadcast_add(ones, ones * 2, out=out)
assert(np.sum(out.asnumpy() - 3) == 0)
def check_input_fallback(shape):
ones = mx.nd.ones(shape)
out = mx.nd.broadcast_add(ones.tostype('csr'), ones.tostype('row_sparse'))
assert(np.sum(out.asnumpy() - 2) == 0)
def check_fallback_with_temp_resource(shape):
ones = mx.nd.ones(shape)
out = mx.nd.sum(ones)
assert(out.asscalar() == np.prod(shape))
shape = rand_shape_2d()
check_output_fallback(shape)
check_input_fallback(shape)
check_fallback_with_temp_resource(shape)
def test_sparse_nd_random():
""" test sparse random operator on cpu """
# gpu random operator doesn't use fixed seed
if default_device().device_type is 'gpu':
return
shape = (100, 100)
fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
for fn in fns:
rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
dns_out = mx.nd.zeros(shape=shape, stype='default')
with random_seed(0):
fn(shape=shape, out=dns_out)
with random_seed(0):
fn(shape=shape, out=rsp_out)
assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
def test_sparse_nd_astype():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='float32')
y = x.astype('int32')
assert(y.dtype == np.int32), y.dtype
def test_sparse_nd_astype_copy():
stypes = ['row_sparse', 'csr']
for stype in stypes:
x = mx.nd.zeros(shape=rand_shape_2d(), stype=stype, dtype='int32')
y = x.astype('float32')
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('float32', copy=False)
assert (y.dtype == np.float32)
# Test that a new ndarray has been allocated
assert (id(x) != id(y))
y = x.astype('int32')
assert (y.dtype == np.int32)
# Test that a new ndarray has been allocated
# even though they have same dtype
assert (id(x) != id(y))
# Test that a new ndarray has not been allocated
y = x.astype('int32', copy=False)
assert (id(x) == id(y))
# Test the string version 'int32'
# has the same behaviour as the np.int32
y = x.astype(np.int32, copy=False)
assert (id(x) == id(y))
def test_sparse_nd_pickle():
dim0 = 40
dim1 = 40
stypes = ['row_sparse', 'csr']
densities = [0, 0.5]
stype_dict = {'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
shape = rand_shape_2d(dim0, dim1)
for stype in stypes:
for density in densities:
a, _ = rand_sparse_ndarray(shape, stype, density)
assert isinstance(a, stype_dict[stype])
data = pkl.dumps(a)
b = pkl.loads(data)
assert isinstance(b, stype_dict[stype])
assert same(a.asnumpy(), b.asnumpy())
@pytest.mark.parametrize('save_fn', [mx.nd.save, mx.npx.savez])
def test_sparse_nd_save_load(save_fn):
stypes = ['default', 'row_sparse', 'csr']
stype_dict = {'default': NDArray, 'row_sparse': RowSparseNDArray, 'csr': CSRNDArray}
num_data = 20
densities = [0, 0.5]
fname = 'tmp_list.npz'
data_list1 = []
for _ in range(num_data):
stype = stypes[np.random.randint(0, len(stypes))]
shape = rand_shape_2d(dim0=40, dim1=40)
density = densities[np.random.randint(0, len(densities))]
data_list1.append(rand_ndarray(shape, stype, density))
assert isinstance(data_list1[-1], stype_dict[stype])
if save_fn is mx.nd.save:
save_fn(fname, data_list1)
else:
save_fn(fname, *data_list1)
data_list2 = mx.nd.load(fname)
if save_fn is mx.npx.savez:
data_list2 = [data_list2['arr_' + str(i)] for i in range(num_data)]
assert len(data_list1) == len(data_list2)
for x, y in zip(data_list1, data_list2):
assert same(x.asnumpy(), y.asnumpy())
data_map1 = {f'ndarray xx {i}': x for i, x in enumerate(data_list1)}
if save_fn is mx.nd.save:
save_fn(fname, data_map1)
else:
save_fn(fname, **data_map1)
data_map2 = mx.nd.load(fname)
assert len(data_map1) == len(data_map2)
for k, x in data_map1.items():
y = data_map2[k]
assert same(x.asnumpy(), y.asnumpy())
os.remove(fname)
@pytest.mark.parametrize('save_fn', [mx.nd.save, mx.npx.savez])
def test_sparse_ndarray_load_csr_npz_scipy(tmp_path, save_fn):
csr_sp = spsp.rand(50, 100, density=0.5, format="csr")
spsp.save_npz(tmp_path / "csr.npz", csr_sp)
csr_mx = mx.nd.load(str(tmp_path / "csr.npz"))['']
assert np.sum(csr_mx.data.asnumpy() != csr_sp.data) == 0
assert np.sum(csr_mx.indices.asnumpy() != csr_sp.indices) == 0
assert np.sum(csr_mx.indptr.asnumpy() != csr_sp.indptr) == 0
csr_mx = save_fn(str(tmp_path / "csr_mx.npz"), csr_mx)
csr_mx_loaded = mx.nd.load(str(tmp_path / "csr_mx.npz"))
csr_mx_loaded = csr_mx_loaded[0] if save_fn is mx.nd.save else csr_mx_loaded['arr_0']
assert np.sum(csr_mx_loaded.data.asnumpy() != csr_sp.data) == 0
assert np.sum(csr_mx_loaded.indices.asnumpy() != csr_sp.indices) == 0
assert np.sum(csr_mx_loaded.indptr.asnumpy() != csr_sp.indptr) == 0
def test_sparse_nd_unsupported():
nd = mx.nd.zeros((2,2), stype='row_sparse')
fn_slice = lambda x: x._slice(None, None)
fn_at = lambda x: x._at(None)
fn_reshape = lambda x: x.reshape(None)
fns = [fn_slice, fn_at, fn_reshape]
for fn in fns:
try:
fn(nd)
assert(False)
except:
pass
def test_create_csr():
def check_create_csr_from_nd(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
# create data array with provided dtype and ctx
data = mx.nd.array(matrix.data.asnumpy(), dtype=dtype)
indptr = matrix.indptr
indices = matrix.indices
csr_created = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=shape)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), data.asnumpy())
assert same(csr_created.indptr.asnumpy(), indptr.asnumpy())
assert same(csr_created.indices.asnumpy(), indices.asnumpy())
# verify csr matrix dtype and ctx is consistent from the ones provided
assert csr_created.dtype == dtype, (csr_created, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == mx.context.current_context(), (csr_created.context, mx.context.current_context())
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
def check_create_csr_from_coo(shape, density, dtype):
matrix = rand_ndarray(shape, 'csr', density)
sp_csr = matrix.asscipy()
sp_coo = sp_csr.tocoo()
csr_created = mx.nd.sparse.csr_matrix((sp_coo.data, (sp_coo.row, sp_coo.col)), shape=shape, dtype=dtype)
assert csr_created.stype == 'csr'
assert same(csr_created.data.asnumpy(), sp_csr.data)
assert same(csr_created.indptr.asnumpy(), sp_csr.indptr)
assert same(csr_created.indices.asnumpy(), sp_csr.indices)
csr_copy = mx.nd.array(csr_created)
assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
# verify csr matrix dtype and ctx is consistent
assert csr_created.dtype == dtype, (csr_created.dtype, dtype)
assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
assert csr_created.context == mx.context.current_context(), (csr_created.context, mx.context.current_context())
def check_create_csr_from_scipy(shape, density, f):
def assert_csr_almost_equal(nd, sp):
assert_almost_equal(nd.data.asnumpy(), sp.data)
assert_almost_equal(nd.indptr.asnumpy(), sp.indptr)
assert_almost_equal(nd.indices.asnumpy(), sp.indices)
sp_csr = nd.asscipy()
assert_almost_equal(sp_csr.data, sp.data)
assert_almost_equal(sp_csr.indptr, sp.indptr)
assert_almost_equal(sp_csr.indices, sp.indices)
assert(sp.dtype == sp_csr.dtype), (sp.dtype, sp_csr.dtype)
# random canonical csr
csr_sp = spsp.rand(shape[0], shape[1], density, format="csr")
csr_nd = f(csr_sp)
assert_csr_almost_equal(csr_nd, csr_sp)
# non-canonical csr which contains duplicates and unsorted indices
indptr = np.array([0, 2, 3, 7])
indices = np.array([0, 2, 2, 0, 1, 2, 1])
data = np.array([1, 2, 3, 4, 5, 6, 1])
non_canonical_csr = spsp.csr_matrix((data, indices, indptr), shape=(3, 3), dtype=csr_nd.dtype)
canonical_csr_nd = f(non_canonical_csr, dtype=csr_nd.dtype)
canonical_csr_sp = non_canonical_csr.copy()
canonical_csr_sp.sum_duplicates()
canonical_csr_sp.sort_indices()
assert_csr_almost_equal(canonical_csr_nd, canonical_csr_sp)
dim0 = 20
dim1 = 20
densities = [0, 0.5]
dtype = np.float64
for density in densities:
shape = rand_shape_2d(dim0, dim1)
check_create_csr_from_nd(shape, density, dtype)
check_create_csr_from_coo(shape, density, dtype)
check_create_csr_from_scipy(shape, density, mx.nd.sparse.array)
check_create_csr_from_scipy(shape, density, mx.nd.array)
def test_create_row_sparse():
dim0 = 50
dim1 = 50
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape)
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
# add this test since we added np.int32 and np.int64 to integer_types
if len(shape) == 2:
for np_int_type in (np.int32, np.int64):
shape = list(shape)
shape = [np_int_type(x) for x in shape]
arg1 = tuple(shape)
mx.nd.sparse.row_sparse_array(arg1, tuple(shape))
shape[0] += 1
assert_exception(mx.nd.sparse.row_sparse_array, ValueError, arg1, tuple(shape))
def test_create_sparse_nd_infer_shape():
def check_create_csr_infer_shape(shape, density, dtype):
try:
matrix = rand_ndarray(shape, 'csr', density=density)
data = matrix.data
indptr = matrix.indptr
indices = matrix.indices
nd = mx.nd.sparse.csr_matrix((data, indices, indptr), dtype=dtype)
num_rows, num_cols = nd.shape
assert(num_rows == len(indptr) - 1)
assert(indices.shape[0] > 0), indices
assert(np.sum((num_cols <= indices).asnumpy()) == 0)
assert(nd.dtype == dtype), (nd.dtype, dtype)
# cannot infer on invalid shape
except ValueError:
pass
def check_create_rsp_infer_shape(shape, density, dtype):
try:
array = rand_ndarray(shape, 'row_sparse', density=density)
data = array.data
indices = array.indices
nd = mx.nd.sparse.row_sparse_array((data, indices), dtype=dtype)
inferred_shape = nd.shape
assert(inferred_shape[1:] == data.shape[1:])
assert(indices.ndim > 0)
assert(nd.dtype == dtype)
if indices.shape[0] > 0:
assert(np.sum((inferred_shape[0] <= indices).asnumpy()) == 0)
# cannot infer on invalid shape
except ValueError:
pass
dtype = np.int32
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0, 0.5, 1]
for density in densities:
check_create_csr_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape, density, dtype)
check_create_rsp_infer_shape(shape_3d, density, dtype)
def test_create_sparse_nd_from_dense():
def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
arr = f(dense_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(dense_arr)
assert(arr2.dtype == default_dtype)
assert(arr2.context == mx.context.current_context())
shape = rand_shape_2d()
dtype = np.int32
src_dtype = np.float64
ctx = mx.cpu(1)
dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
np.ones(shape, dtype=src_dtype).tolist()]
for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
for dense_arr in dense_arrs:
default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
else np.float32
check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
def test_create_sparse_nd_from_sparse():
def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
arr = f(sp_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(sp_arr)
assert(arr2.dtype == src_dtype)
assert(arr2.context == mx.context.current_context())
shape = rand_shape_2d()
src_dtype = np.float64
dtype = np.int32
ctx = mx.cpu(1)
ones = mx.nd.ones(shape, dtype=src_dtype)
csr_arrs = [ones.tostype('csr')]
rsp_arrs = [ones.tostype('row_sparse')]
csr_sp = spsp.csr_matrix(np.ones(shape, dtype=src_dtype))
csr_arrs.append(csr_sp)
f_csr = mx.nd.sparse.csr_matrix
f_rsp = mx.nd.sparse.row_sparse_array
for sp_arr in csr_arrs:
check_create_from_sp(shape, f_csr, sp_arr, dtype, src_dtype, ctx)
for sp_arr in rsp_arrs:
check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx)
def test_create_sparse_nd_empty():
def check_empty(shape, stype):
arr = mx.nd.empty(shape, stype=stype)
assert(arr.stype == stype)
assert same(arr.asnumpy(), np.zeros(shape))
def check_csr_empty(shape, dtype, ctx):
arr = mx.nd.sparse.csr_matrix(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'csr')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.csr_matrix(shape)
assert(arr.dtype == np.float32)
assert(arr.context == mx.context.current_context())
def check_rsp_empty(shape, dtype, ctx):
arr = mx.nd.sparse.row_sparse_array(shape, dtype=dtype, ctx=ctx)
assert(arr.stype == 'row_sparse')
assert(arr.dtype == dtype)
assert(arr.context == ctx)
assert same(arr.asnumpy(), np.zeros(shape))
# check the default value for dtype and ctx
arr = mx.nd.sparse.row_sparse_array(shape)
assert(arr.dtype == np.float32)
assert(arr.context == mx.context.current_context())
stypes = ['csr', 'row_sparse']
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
dtype = np.int32
ctx = mx.cpu(1)
for stype in stypes:
check_empty(shape, stype)
check_csr_empty(shape, dtype, ctx)
check_rsp_empty(shape, dtype, ctx)
check_rsp_empty(shape_3d, dtype, ctx)
def test_synthetic_dataset_generator():
def test_powerlaw_generator(csr_arr, final_row=1):
"""Test power law distribution
Total Elements: 32000, Number of zeros: 3200
Every row has 2 * non zero elements of the previous row.
Also since (2047 < 3200 < 4095) this will be true till 10th row"""
indices = csr_arr.indices.asnumpy()
indptr = csr_arr.indptr.asnumpy()
for row in range(1, final_row + 1):
nextrow = row + 1
current_row_nnz = indices[indptr[row] - 1] + 1
next_row_nnz = indices[indptr[nextrow] - 1] + 1
assert next_row_nnz == 2 * current_row_nnz
# Test if density is preserved
csr_arr_cols, _ = rand_sparse_ndarray(shape=(32, 10000), stype="csr",
density=0.01, distribution="powerlaw")
csr_arr_small, _ = rand_sparse_ndarray(shape=(5, 5), stype="csr",
density=0.5, distribution="powerlaw")
csr_arr_big, _ = rand_sparse_ndarray(shape=(32, 1000000), stype="csr",
density=0.4, distribution="powerlaw")
csr_arr_square, _ = rand_sparse_ndarray(shape=(1600, 1600), stype="csr",
density=0.5, distribution="powerlaw")
assert len(csr_arr_cols.data) == 3200
test_powerlaw_generator(csr_arr_cols, final_row=9)
test_powerlaw_generator(csr_arr_small, final_row=1)
test_powerlaw_generator(csr_arr_big, final_row=4)
test_powerlaw_generator(csr_arr_square, final_row=6)
def test_sparse_nd_fluent():
def check_fluent_regular(stype, func, kwargs, shape=(5, 17), equal_nan=False):
with mx.name.NameManager():
data = mx.nd.random_uniform(shape=shape, ctx=default_device()).tostype(stype)
regular = getattr(mx.ndarray, func)(data, **kwargs)
fluent = getattr(data, func)(**kwargs)
if isinstance(regular, list):
for r, f in zip(regular, fluent):
assert almost_equal(r.asnumpy(), f.asnumpy(), equal_nan=equal_nan)
else:
assert almost_equal(regular.asnumpy(), fluent.asnumpy(), equal_nan=equal_nan)
all_funcs = ['zeros_like', 'square', 'round', 'rint', 'fix', 'floor', 'ceil', 'trunc',
'abs', 'sign', 'sin', 'degrees', 'radians', 'expm1']
for func in all_funcs:
check_fluent_regular('csr', func, {})
check_fluent_regular('row_sparse', func, {})
all_funcs = ['arcsin', 'arctan', 'tan', 'sinh', 'tanh',
'arcsinh', 'arctanh', 'log1p', 'sqrt', 'relu']
for func in all_funcs:
check_fluent_regular('csr', func, {}, equal_nan=True)
check_fluent_regular('row_sparse', func, {}, equal_nan=True)
check_fluent_regular('csr', 'slice', {'begin': (2, 5), 'end': (4, 7)}, shape=(5, 17))
check_fluent_regular('row_sparse', 'clip', {'a_min': -0.25, 'a_max': 0.75})
check_fluent_regular('csr', 'clip', {'a_min': -0.25, 'a_max': 0.75})
for func in ['sum', 'mean', 'norm']:
check_fluent_regular('csr', func, {'axis': 0})
def test_sparse_nd_exception():
""" test invalid sparse operator will throw a exception """
a = mx.nd.ones((2,2))
assertRaises(mx.base.MXNetError, mx.nd.sparse.retain, a, invalid_arg="garbage_value")
assertRaises(ValueError, mx.nd.sparse.csr_matrix, a, shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.csr_matrix, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.row_sparse_array, (2,2), shape=(3,2))
assertRaises(ValueError, mx.nd.sparse.zeros, "invalid_stype", (2,2))
def test_sparse_nd_check_format():
""" test check_format for sparse ndarray """
shape = rand_shape_2d()
stypes = ["csr", "row_sparse"]
for stype in stypes:
arr, _ = rand_sparse_ndarray(shape, stype)
arr.check_format()
arr = mx.nd.sparse.zeros(stype, shape)
arr.check_format()
# CSR format index pointer array should be less than the number of rows
shape = (3, 4)
data_list = [7, 8, 9]
indices_list = [0, 2, 1]
indptr_list = [0, 5, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should be in ascending order per row
indices_list = [2, 1, 1]
indptr_list = [0, 2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indptr should end with value equal with size of indices
indices_list = [1, 2, 1]
indptr_list = [0, 2, 2, 4]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format indices should not be negative
indices_list = [0, 2, 1]
indptr_list = [0, -2, 2, 3]
a = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# CSR format should be 2 Dimensional.
a = mx.nd.array([1, 2, 3])
assertRaises(ValueError, a.tostype, 'csr')
a = mx.nd.array([[[1, 2, 3]]])
assertRaises(ValueError, a.tostype, 'csr')
# Row Sparse format indices should be less than the number of rows
shape = (3, 2)
data_list = [[1, 2], [3, 4]]
indices_list = [1, 4]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should be in ascending order
indices_list = [1, 0]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
# Row Sparse format indices should not be negative
indices_list = [1, -2]
a = mx.nd.sparse.row_sparse_array((data_list, indices_list), shape=shape)
assertRaises(mx.base.MXNetError, a.check_format)
def test_sparse_nd_norm():
def check_sparse_nd_norm(stype, shape, density, **kwargs):
data, _ = rand_sparse_ndarray(shape, stype, density)
norm = data.norm(**kwargs)
expected_norm = data.tostype('default').norm(**kwargs)
assert_almost_equal(norm.asnumpy(), expected_norm.asnumpy())
shape = (5, 5)
stypes = ['row_sparse', 'csr']
densities = [0, 0.5, 1]
for stype in stypes:
for density in densities:
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=False, ord=2)
# test fallback
check_sparse_nd_norm(stype, shape, density, axis=0, keepdims=False, ord=2)
check_sparse_nd_norm(stype, shape, density, axis=None, keepdims=True, ord=2)
def test_sparse_fc():
def check_sparse_fc(batch_size, dim_in, dim_out, stype):
data = rand_ndarray((batch_size, dim_in), stype, density=0.5)
weight = rand_ndarray((dim_out, dim_in), 'row_sparse', density=1)
bias = rand_ndarray((dim_out, 1), 'row_sparse', density=1)
out = mx.nd.sparse.FullyConnected(data, weight, num_hidden=dim_out, bias=bias)
data_dns = data.tostype('default')
weight_dns = weight.tostype('default')
out_dns = mx.nd.FullyConnected(data_dns, weight_dns, num_hidden=dim_out, bias=bias)
assert_almost_equal(out.asnumpy(), out_dns.asnumpy())
# test FC with row_sparse weight w/ density=1, dense data
check_sparse_fc(5, 10, 8, 'default')
# test FC with row_sparse weight w/ density=1, csr data (fallback)
check_sparse_fc(5, 10, 8, 'csr')
def test_sparse_take():
def check_sparse_take(density, mode):
data_shape = rand_shape_2d()
idx_shape = (np.random.randint(low=1, high=10),)
data = rand_ndarray(data_shape, 'csr', density=density).astype('int32')
idx = mx.nd.array(np.random.randint(low=-5, high=15, size=idx_shape))
data_np = data.asnumpy()
idx_np = idx.asnumpy().astype('int32')
expected_result = np.take(data_np, idx_np, mode=mode, axis=0)
result = mx.nd.take(data, idx, mode=mode)
assert_almost_equal(result.asnumpy(), expected_result)
assert result.indptr[0].asscalar() == 0
densities = [0, 0.5, 1]
modes = ['clip', 'wrap']
for d in densities:
for m in modes:
check_sparse_take(d, m)
def test_sparse_getnnz():
if default_device().device_type is 'gpu':
return
def check_sparse_getnnz(density, axis):
shape = rand_shape_2d()
data = rand_ndarray(shape, 'csr', density=density)
data_sp = data.asscipy()
result = mx.nd.contrib.getnnz(data, axis=axis)
expected_result = data_sp.getnnz(axis=axis)
assert_almost_equal(result.asnumpy(), expected_result)
densities = [0, 0.5, 1]
axis = [1, None]
for d in densities:
for a in axis:
check_sparse_getnnz(d, a)
| {
"content_hash": "092b208ae46b4e3b2edda4f28800b3ee",
"timestamp": "",
"source": "github",
"line_count": 1024,
"max_line_length": 119,
"avg_line_length": 39.5703125,
"alnum_prop": 0.5875370187561698,
"repo_name": "apache/incubator-mxnet",
"id": "750505c1eb4590cd3ab79473f7c25c3a94688f25",
"size": "41306",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/unittest/test_sparse_ndarray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151356"
},
{
"name": "C++",
"bytes": "12059300"
},
{
"name": "CMake",
"bytes": "213440"
},
{
"name": "Cuda",
"bytes": "1528224"
},
{
"name": "Cython",
"bytes": "26285"
},
{
"name": "Dockerfile",
"bytes": "54893"
},
{
"name": "Groovy",
"bytes": "132682"
},
{
"name": "Jupyter Notebook",
"bytes": "1889643"
},
{
"name": "Makefile",
"bytes": "8991"
},
{
"name": "PowerShell",
"bytes": "6699"
},
{
"name": "Python",
"bytes": "8626713"
},
{
"name": "Shell",
"bytes": "172547"
}
],
"symlink_target": ""
} |
import numpy, scipy
import scipy.stats
# BCES fitting
# ===============
def bces(y1,y1err,y2,y2err,cerr):
"""
Does the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal.
Fitting form: Y=AX+B.
Usage:
>>> a,b,aerr,berr,covab=bces(x,xerr,y,yerr,cov)
Output:
- a,b : best-fit parameters a,b of the linear regression
- aerr,berr : the standard deviations in a,b
- covab : the covariance between a and b (e.g. for plotting confidence bands)
Arguments:
- x,y : data
- xerr,yerr: measurement errors affecting x and y
- cov : covariance between the measurement errors
(all are arrays)
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
# Arrays holding the code main results for each method:
# Elements: 0-Y|X, 1-X|Y, 2-bisector, 3-orthogonal
a,b,avar,bvar,covarxiz,covar_ba=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
# Lists holding the xi and zeta arrays for each method above
xi,zeta=[],[]
# Calculate sigma's for datapoints using length of conf. intervals
sig11var = numpy.mean( y1err**2 )
sig22var = numpy.mean( y2err**2 )
sig12var = numpy.mean( cerr )
# Covariance of Y1 (X) and Y2 (Y)
covar_y1y2 = numpy.mean( (y1-y1.mean())*(y2-y2.mean()) )
# Compute the regression slopes
a[0] = (covar_y1y2 - sig12var)/(y1.var() - sig11var) # Y|X
a[1] = (y2.var() - sig22var)/(covar_y1y2 - sig12var) # X|Y
a[2] = ( a[0]*a[1] - 1.0 + numpy.sqrt((1.0 + a[0]**2)*(1.0 + a[1]**2)) ) / (a[0]+a[1]) # bisector
if covar_y1y2<0:
sign = -1.
else:
sign = 1.
a[3] = 0.5*((a[1]-(1./a[0])) + sign*numpy.sqrt(4.+(a[1]-(1./a[0]))**2)) # orthogonal
# Compute intercepts
for i in range(4):
b[i]=y2.mean()-a[i]*y1.mean()
# Set up variables to calculate standard deviations of slope/intercept
xi.append( ( (y1-y1.mean()) * (y2-a[0]*y1-b[0]) + a[0]*y1err**2 ) / (y1.var()-sig11var) ) # Y|X
xi.append( ( (y2-y2.mean()) * (y2-a[1]*y1-b[1]) - y2err**2 ) / covar_y1y2 ) # X|Y
xi.append( xi[0] * (1.+a[1]**2)*a[2] / ((a[0]+a[1])*numpy.sqrt((1.+a[0]**2)*(1.+a[1]**2))) + xi[1] * (1.+a[0]**2)*a[2] / ((a[0]+a[1])*numpy.sqrt((1.+a[0]**2)*(1.+a[1]**2))) ) # bisector
xi.append( xi[0] * a[3]/(a[0]**2*numpy.sqrt(4.+(a[1]-1./a[0])**2)) + xi[1]*a[3]/numpy.sqrt(4.+(a[1]-1./a[0])**2) ) # orthogonal
for i in range(4):
zeta.append( y2 - a[i]*y1 - y1.mean()*xi[i] )
for i in range(4):
# Calculate variance for all a and b
avar[i]=xi[i].var()/xi[i].size
bvar[i]=zeta[i].var()/zeta[i].size
# Sample covariance obtained from xi and zeta (paragraph after equation 15 in AB96)
covarxiz[i]=numpy.mean( (xi[i]-xi[i].mean()) * (zeta[i]-zeta[i].mean()) )
# Covariance between a and b (equation after eq. 15 in AB96)
covar_ab=covarxiz/y1.size
return a,b,numpy.sqrt(avar),numpy.sqrt(bvar),covar_ab
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot
def bcesboot(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
# Progress bar initialization
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation\method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
# Progress bar
# Bootstrapping results
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab
def bcesboot_backup(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
import fish
# Progress bar initialization
peixe = fish.ProgressFish(total=nsim)
print "Bootstrapping progress:"
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation\method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
# Progress bar
peixe.animate(amount=i)
# Bootstrapping results
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab
# Methods which make use of parallelization
# ===========================================
def ab(x):
"""
This method is the big bottleneck of the parallel BCES code. That's the
reason why I put these calculations in a separate method, in order to
distribute this among the cores. In the original BCES method, this is
inside the main routine.
Argument:
[y1,y1err,y2,y2err,cerr,nsim]
where nsim is the number of bootstrapping trials sent to each core.
:returns: am,bm : the matrixes with slope and intercept where each line corresponds to a bootrap trial and each column maps a different BCES method (ort, y|x etc).
Be very careful and do not use lambda functions when calling this
method and passing it to multiprocessing or ipython.parallel!
I spent >2 hours figuring out why the code was not working until I
realized the reason was the use of lambda functions.
"""
y1,y1err,y2,y2err,cerr,nsim=x[0],x[1],x[2],x[3],x[4],x[5]
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
return am,bm
def bcesp(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Parallel implementation of the BCES with bootstrapping.
Divide the bootstraps equally among the threads (cores) of
the machine. It will automatically detect the number of
cores available.
Usage:
>>> a,b,aerr,berr,covab=bcesp(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b - best-fit parameters a,b of the linear regression
:returns: aerr,berr - the standard deviations in a,b
:returns: covab - the covariance between a and b (e.g. for plotting confidence bands)
.. seealso:: Check out ~/work/projects/playground/parallel python/bcesp.py for the original, testing, code. I deleted some line from there to make the "production" version.
* v1 Mar 2012: serial version ported from bces_regress.f. Added covariance output.
* v2 May 3rd 2012: parallel version ported from nemmen.bcesboot.
.. codeauthor: Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
import time # for benchmarking
import multiprocessing
print "BCES,", nsim,"trials... ",
tic=time.time()
# Find out number of cores available
ncores=multiprocessing.cpu_count()
# We will divide the processing into how many parts?
n=2*ncores
"""
Must create lists that will be distributed among the many
cores with structure
core1 <- [y1,y1err,y2,y2err,cerr,nsim/n]
core2 <- [y1,y1err,y2,y2err,cerr,nsim/n]
etc...
"""
pargs=[] # this is a list of lists!
for i in range(n):
pargs.append([y1,y1err,y2,y2err,cerr,nsim/n])
# Initializes the parallel engine
pool = multiprocessing.Pool(processes=ncores) # multiprocessing package
"""
Each core processes ab(input)
return matrixes Am,Bm with the results of nsim/n
presult[i][0] = Am with nsim/n lines
presult[i][1] = Bm with nsim/n lines
"""
presult=pool.map(ab, pargs) # multiprocessing
pool.close() # close the parallel engine
# vstack the matrixes processed from all cores
i=0
for m in presult:
if i==0:
# Initialize the matrixes
am,bm=m[0].copy(),m[1].copy()
else:
am=numpy.vstack((am,m[0]))
bm=numpy.vstack((bm,m[1]))
i=i+1
# Computes the bootstrapping results on the stacked matrixes
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
print "%f s" % (time.time() - tic)
return a,b,erra,errb,covab
| {
"content_hash": "f26fc4cf3609a2679c5facbf4a159ed1",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 186,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6572481572481572,
"repo_name": "Delosari/dazer",
"id": "317df809e4e37c78a11c2a22dbad3757fec7e4cb",
"size": "12210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/lib/Math_Libraries/bces_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4544790"
}
],
"symlink_target": ""
} |
"""Utilities for querying Microsoft Visual Studio settings.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon.
@license: Licensed under the MIT license.
"""
import json
import os
import subprocess
import _winreg as winreg
import codecs
import cake.path
import cake.system
from cake.registry import queryString, KEY_WOW64_32KEY
def getMsvsInstallDir(version=r'VisualStudio\8.0'):
"""Returns the MSVS install directory.
Typically: 'C:\Program Files\Microsoft Visual Studio 8\Common7\IDE'.
@param version: The registry path used to search for MSVS.
@type version: string
@return: The path to the MSVS install directory.
@rtype: string
@raise WindowsError: If MSVS is not installed.
"""
subKey = r"SOFTWARE\Microsoft\%s" % version
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, "InstallDir")
def getMsvsProductDir(version=r'VisualStudio\8.0'):
"""Returns the MSVS product directory.
Typically: 'C:\Program Files\Microsoft Visual Studio 8\'.
@param version: The registry path used to search for MSVS.
@type version: string
@return: The path to the MSVS product directory.
@rtype: string
@raise WindowsError: If MSVS is not installed.
"""
subKey = r"SOFTWARE\Microsoft\%s\Setup\VS" % version
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, "ProductDir")
def getMsvcProductDir(version=r'VisualStudio\8.0'):
"""Returns the MSVC product directory as obtained from the registry.
Typically: 'C:\Program Files\Microsoft Visual Studio 8\VC'.
@param version: The registry path used to search for MSVS.
@type version: string
@return: The path to the MSVC product directory.
@rtype: string
@raise WindowsError: If MSVC is not installed.
"""
subKey = r"SOFTWARE\Microsoft\%s\Setup\VC" % version
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, "ProductDir")
def getDefaultPlatformSdkDir():
"""Returns the Microsoft Platform SDK directory.
@return: The path to the Platform SDK directory.
@rtype: string
@raise WindowsError: If the Platform SDK is not installed.
"""
subKey = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows"
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, "CurrentInstallFolder")
def getPlatformSdkVersions():
"""Returns a list of the installed Microsoft Platform SDK versions.
@return: A list of (key, productVersion, path) tuples sorted in reverse
order of product version.
@rtype: list of (str, tuple of int, string) tuples.
"""
key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows"
# Only bother looking on 32-bit registry as all PlatformSDK's register
# there, however only some are registered in 64-bit registry.
if cake.system.isWindows64():
sam = winreg.KEY_READ | KEY_WOW64_32KEY
else:
sam = winreg.KEY_READ
try:
keyHandle = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key, 0, sam)
except WindowsError:
return []
results = []
try:
subKeyCount, valueCount, timestamp = winreg.QueryInfoKey(keyHandle)
for i in xrange(subKeyCount):
name = winreg.EnumKey(keyHandle, i)
subKeyHandle = winreg.OpenKey(keyHandle, name, 0, sam)
try:
try:
installDir = str(winreg.QueryValueEx(subKeyHandle, "InstallationFolder")[0])
productVersion = str(winreg.QueryValueEx(subKeyHandle, "ProductVersion")[0])
except WindowsError:
continue
finally:
winreg.CloseKey(subKeyHandle)
productVersionTuple = tuple(int(s) if s.isdigit() else None for s in productVersion.split("."))
results.append((name, productVersionTuple, installDir))
finally:
winreg.CloseKey(keyHandle)
results.sort(key=(lambda x: x[1]), reverse=True)
return results
def getPlatformSdkDir(version=None):
"""Returns the directory of the specified Microsoft Platform SDK version.
@param version: The Platform SDK version to search for.
@type version: string
@raise WindowsError: If this version of the Platform SDK is not installed.
"""
if version:
subKey = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\%s" % version
valueName = "InstallationFolder"
else:
subKey = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows"
valueName = "CurrentInstallFolder"
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, valueName)
def getWindowsKitsDir(version='80'):
"""Returns the Microsoft Windows Kit directory.
@param version: The version of the SDK to look-up.
@type version: string
@return: The path to the Windows Kit directory.
@rtype: string
@raise WindowsError: If this version of the Platform SDK is not installed.
"""
subKey = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"
valueName = 'KitsRoot' if version == '80' else 'KitsRoot' + version
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, valueName)
def getDotNetFrameworkSdkDir(version='2.0'):
"""Looks up the path of the Microsoft .NET Framework SDK directory.
@param version: The .NET Framework version to search for.
@type version: string
@return: The path to the .NET Framework SDK root directory.
@rtype: string
@raise WindowsError: If the .NET Framework SDK is not installed.
"""
subKey = r"SOFTWARE\Microsoft\.NETFramework"
valueName = "sdkInstallRootv" + version
return queryString(winreg.HKEY_LOCAL_MACHINE, subKey, valueName)
try:
import ctypes
import ctypes.wintypes
_GetConsoleOutputCP = ctypes.windll.kernel32.GetConsoleOutputCP
_GetConsoleOutputCP.argtypes = []
_GetConsoleOutputCP.restype = ctypes.wintypes.UINT
# Constructed from translating between:
# https://docs.microsoft.com/en-gb/windows/desktop/Intl/code-page-identifiers
# and
# https://docs.python.org/2.4/lib/standard-encodings.html
_codepageToCodec = {
950 : "big5",
1200 : "utf_16_le",
1201 : "utf_16_be",
12000 : "utf_32_le",
12001 : "utf_32_be",
20127 : "us-ascii",
28591 : "latin_1",
28592 : "iso8859_2",
28593 : "iso8859_3",
28594 : "iso8859_4",
28595 : "iso8859_5",
28596 : "iso8859_6",
28597 : "iso8859_7",
28598 : "iso8859_8",
28599 : "iso8859_9",
28603 : "iso8859_13",
28605 : "iso8859_15",
65000 : "utf_7",
65001 : "utf_8",
}
def _getCodecFromCodepage():
codepage = _GetConsoleOutputCP()
codecName = _codepageToCodec.get(codepage, None)
if codecName is None:
codecName = "cp{0:03}".format(codepage)
try:
return codecs.lookup(codecName)
except LookupError:
return None
except Exception:
def _getCodecFromCodepage():
return None
def vswhere(args=[]):
"""Helper function for running vswhere helper utility and parsing the output.
The vswhere utility can be used to find the installation locations of Visual Studio 2017 or later.
It can also be used to find older install locations by passing "-legacy" as an argument.
@return: An array of dictionaries containing information about each installation.
@raise EnvironmentError:
If there was a problem running vswhere with the provided arguments.
"""
if cake.system.isWindows64():
programFiles = os.environ.get('ProgramFiles(x86)', r'C:\Program Files (x86)')
else:
programFiles = os.environ.get('ProgramFiles', r'C:\Program Files')
vsInstaller = cake.path.join(programFiles, 'Microsoft Visual Studio', 'Installer')
vsWherePath = cake.path.join(vsInstaller, 'vswhere.exe')
if not os.path.isfile(vsWherePath):
raise EnvironmentError("vswhere not found at " + vsWherePath)
p = subprocess.Popen(
args=["vswhere", "-format", "json", "-utf8"] + args,
executable=vsWherePath,
cwd=vsInstaller,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
)
out, err = p.communicate(input=b"")
codec = codecs.lookup("utf_8")
if p.returncode != 0:
# Probably failed because it's an old version of vswhere that doesn't support
# -utf8 flag. Let's try using it without -utf8 and then use whatever the current
# Windows codepage is to decode it.
p = subprocess.Popen(
args=["vswhere", "-format", "json"] + args,
executable=vsWherePath,
cwd=vsInstaller,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
)
out, err = p.communicate(input=b"")
codec = _getCodecFromCodepage()
if codec is None:
# Fall back to ASCII if we couldn't figure out the codec for
# the current Windows codepage.
codec = codecs.lookup("ascii")
if p.returncode != 0:
raise EnvironmentError("vswhere: returned with exit code " + str(p.returncode) + "\n" + out)
return json.loads(codec.decode(out, 'replace')[0])
| {
"content_hash": "61171c7ad8ed7a042394d691d4a01bf7",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 101,
"avg_line_length": 33.226765799256505,
"alnum_prop": 0.6882971582009398,
"repo_name": "lewissbaker/cake",
"id": "890448746f205d91babcb7928b2a7a8ad71b1a6d",
"size": "8938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cake/msvs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "228"
},
{
"name": "C#",
"bytes": "2856"
},
{
"name": "C++",
"bytes": "1787"
},
{
"name": "CoffeeScript",
"bytes": "2315"
},
{
"name": "Python",
"bytes": "552156"
}
],
"symlink_target": ""
} |
import asyncio
import json
import logging
import os
import aiohttp
import questionary
from aiohttp import ClientTimeout
from prompt_toolkit.styles import Style
from typing import Any, Generator
from typing import Text, Optional, Dict, List
import rasa.shared.utils.cli
import rasa.shared.utils.io
from rasa.cli import utils as cli_utils
from rasa.core import utils
from rasa.core.channels.rest import RestInput
from rasa.core.constants import DEFAULT_SERVER_URL
from rasa.shared.constants import INTENT_MESSAGE_PREFIX
from rasa.shared.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
STREAM_READING_TIMEOUT_ENV = "RASA_SHELL_STREAM_READING_TIMEOUT_IN_SECONDS"
DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS = 10
def print_buttons(
message: Dict[Text, Any],
is_latest_message: bool = False,
color: Text = rasa.shared.utils.io.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if is_latest_message:
choices = cli_utils.button_choices_from_message_data(
message, allow_free_text_input=True
)
question = questionary.select(
message.get("text"),
choices,
style=Style([("qmark", "#6d91d3"), ("", "#6d91d3"), ("answer", "#b373d6")]),
)
return question
else:
rasa.shared.utils.cli.print_color("Buttons:", color=color)
for idx, button in enumerate(message.get("buttons")):
rasa.shared.utils.cli.print_color(
cli_utils.button_to_string(button, idx), color=color
)
return None
def _print_bot_output(
message: Dict[Text, Any],
is_latest_message: bool = False,
color: Text = rasa.shared.utils.io.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if "buttons" in message:
question = print_buttons(message, is_latest_message, color)
if question:
return question
if "text" in message:
rasa.shared.utils.cli.print_color(message["text"], color=color)
if "image" in message:
rasa.shared.utils.cli.print_color("Image: " + message["image"], color=color)
if "attachment" in message:
rasa.shared.utils.cli.print_color(
"Attachment: " + message["attachment"], color=color
)
if "elements" in message:
rasa.shared.utils.cli.print_color("Elements:", color=color)
for idx, element in enumerate(message["elements"]):
rasa.shared.utils.cli.print_color(
cli_utils.element_to_string(element, idx), color=color
)
if "quick_replies" in message:
rasa.shared.utils.cli.print_color("Quick Replies:", color=color)
for idx, element in enumerate(message["quick_replies"]):
rasa.shared.utils.cli.print_color(
cli_utils.button_to_string(element, idx), color=color
)
if "custom" in message:
rasa.shared.utils.cli.print_color("Custom json:", color=color)
rasa.shared.utils.cli.print_color(
json.dumps(message["custom"], indent=2), color=color
)
return None
def _get_user_input(previous_response: Optional[Dict[str, Any]]) -> Optional[Text]:
button_response = None
if previous_response is not None:
button_response = _print_bot_output(previous_response, is_latest_message=True)
if button_response is not None:
response = cli_utils.payload_from_button_question(button_response)
if response == cli_utils.FREE_TEXT_INPUT_PROMPT:
# Re-prompt user with a free text input
response = _get_user_input({})
else:
response = questionary.text(
"",
qmark="Your input ->",
style=Style([("qmark", "#b373d6"), ("", "#b373d6")]),
).ask()
return response.strip() if response is not None else None
async def send_message_receive_block(
server_url: Text, auth_token: Text, sender_id: Text, message: Text
) -> List[Dict[Text, Any]]:
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?token={auth_token}"
async with aiohttp.ClientSession() as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
return await resp.json()
async def _send_message_receive_stream(
server_url: Text, auth_token: Text, sender_id: Text, message: Text
) -> Generator[Dict[Text, Any], None, None]:
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?stream=true&token={auth_token}"
# Define timeout to not keep reading in case the server crashed in between
timeout = _get_stream_reading_timeout()
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
async for line in resp.content:
if line:
yield json.loads(line.decode(DEFAULT_ENCODING))
def _get_stream_reading_timeout() -> ClientTimeout:
timeout_in_seconds = int(
os.environ.get(
STREAM_READING_TIMEOUT_ENV, DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS
)
)
return ClientTimeout(timeout_in_seconds)
async def record_messages(
sender_id: Text,
server_url: Text = DEFAULT_SERVER_URL,
auth_token: Text = "",
max_message_limit: Optional[int] = None,
use_response_stream: bool = True,
) -> int:
"""Read messages from the command line and print bot responses."""
exit_text = INTENT_MESSAGE_PREFIX + "stop"
rasa.shared.utils.cli.print_success(
"Bot loaded. Type a message and press enter "
"(use '{}' to exit): ".format(exit_text)
)
num_messages = 0
previous_response = None
await asyncio.sleep(0.5) # Wait for server to start
while not utils.is_limit_reached(num_messages, max_message_limit):
text = _get_user_input(previous_response)
if text == exit_text or text is None:
break
if use_response_stream:
bot_responses = _send_message_receive_stream(
server_url, auth_token, sender_id, text
)
previous_response = None
async for response in bot_responses:
if previous_response is not None:
_print_bot_output(previous_response)
previous_response = response
else:
bot_responses = await send_message_receive_block(
server_url, auth_token, sender_id, text
)
previous_response = None
for response in bot_responses:
if previous_response is not None:
_print_bot_output(previous_response)
previous_response = response
num_messages += 1
await asyncio.sleep(0) # Yield event loop for others coroutines
return num_messages
class CmdlineInput(RestInput):
@classmethod
def name(cls) -> Text:
return "cmdline"
def url_prefix(self) -> Text:
return RestInput.name()
| {
"content_hash": "0aa52004dd13a8a8784043b896f6bae4",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 88,
"avg_line_length": 33.822966507177036,
"alnum_prop": 0.6343188569811855,
"repo_name": "RasaHQ/rasa_nlu",
"id": "f1f00576b2e2a118a7d2506e1c60bc0b0edda237",
"size": "7122",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/core/channels/console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
import argparse
import codecs
import sys
from collections import defaultdict
from itertools import izip
def utf8read(f):
return codecs.open(f, 'r', 'utf-8')
class Ngram:
"""
Simple ngram model
"""
def __init__(self, formFile, lemmaFile, ngram):
self.gramStats(formFile, lemmaFile, ngram)
def gramStats(self, formFile, lemmaFile, ngram):
# List of dictionaries that contains the stats for ngram mapping
self.lemmaStats = [defaultdict(lambda : defaultdict(float))] * ngram
for words, lemmas in izip(utf8read(formFile),utf8read(lemmaFile)):
# Collect bigram with based on lemma
words = words.rstrip().lower().split()
lemmas = lemmas.rstrip().lower().split()
# Collect stats for n-grams
for i in xrange(1, ngram + 1):
for n in range(len(lemmas) + 1 - i):
self.lemmaStats[i-1][tuple(lemmas[n:n+i])][tuple(words[n:n+i])] += 1
return self.lemmaStats
def backoffLM(self, lemma):
if tuple(lemma) not in self.lemmaStats[len(lemma)-1]:
if len(lemma) == 1:
return lemma
return self.backoffLM(lemma[0:len(lemma)-1])
return sorted(self.lemmaStats[len(lemma)-1][tuple(lemma)].iteritems(), key=lambda (k,v): v,reverse=True)[0][0]
def main():
PARSER = argparse.ArgumentParser(description="Inflect a lemmatized corpus with n-gram model")
PARSER.add_argument("-t", type=str, default="data/train", help="training data prefix")
PARSER.add_argument("-l", type=str, default="lemma", help="lemma file suffix")
PARSER.add_argument("-w", type=str, default="form", help="word file suffix")
PARSER.add_argument("-n", type=int, default=2, help="n-gram model(default 2 to be bigram)")
PARSER.add_argument("-d", type=str, default="data/dtest", help="test file")
args = PARSER.parse_args()
# Python sucks at UTF-8
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
ng = Ngram(args.t + "." + args.w, args.t + "." + args.l, args.n)
for line in utf8read(args.d + "." + args.l):
line = line.rstrip().lower().split()
sen = ""
i = 0
while i < len(line):
lem = ng.backoffLM(line[i:min(i+args.n, len(line))])
i += len(lem)
for le in lem:
sen += le + " "
print sen.rstrip()
if __name__ == '__main__':
main() | {
"content_hash": "976a45400a280b85f7d8ee64ce13f78d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 118,
"avg_line_length": 38.328125,
"alnum_prop": 0.5923359152058704,
"repo_name": "Nero-Hu/mt",
"id": "999e94d3c38122f098352c80f56cba185a76aff9",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inflect/ngram.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Eiffel",
"bytes": "9638684"
},
{
"name": "FORTRAN",
"bytes": "11607801"
},
{
"name": "Forth",
"bytes": "133"
},
{
"name": "Python",
"bytes": "118389"
},
{
"name": "TeX",
"bytes": "75173"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import tempfile
import shutil
import utils
import logging
import fcntl
from urlparse import urlparse
from zips import UnzipUtil
from hashes import HashUtil
from cache import DirectoryCacheManager
from downloads import Downloader
from downloads import CurlDownloader
from utils import safe_makedirs
from utils import find_git_url
from utils import wrap
_log = logging.getLogger('cloudfoundry')
class CloudFoundryUtil(object):
@staticmethod
def initialize():
# set stdout as non-buffered
if hasattr(sys.stdout, 'fileno'):
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
fl |= os.O_DSYNC
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL)
ctx = utils.FormattedDict()
# Add environment variables
for key, val in os.environ.iteritems():
ctx[key] = wrap(val)
# Convert JSON env variables
ctx['VCAP_APPLICATION'] = json.loads(ctx.get('VCAP_APPLICATION',
wrap('{}')))
ctx['VCAP_SERVICES'] = json.loads(ctx.get('VCAP_SERVICES', wrap('{}')))
# Build Pack Location
ctx['BP_DIR'] = os.path.dirname(os.path.dirname(sys.argv[0]))
# User's Application Files, build droplet here
ctx['BUILD_DIR'] = sys.argv[1]
# Cache space for the build pack
ctx['CACHE_DIR'] = (len(sys.argv) == 3) and sys.argv[2] or None
# Temp space
if 'TMPDIR' not in ctx.keys():
ctx['TMPDIR'] = tempfile.gettempdir()
# Make sure cache & build directories exist
if not os.path.exists(ctx['BUILD_DIR']):
os.makedirs(ctx['BUILD_DIR'])
if ctx['CACHE_DIR'] and not os.path.exists(ctx['CACHE_DIR']):
os.makedirs(ctx['CACHE_DIR'])
# Add place holder for extensions
ctx['EXTENSIONS'] = []
# Init Logging
CloudFoundryUtil.init_logging(ctx)
_log.info('CloudFoundry Initialized.')
_log.debug("CloudFoundry Context Setup [%s]", ctx)
# Git URL, if one exists
ctx['BP_GIT_URL'] = find_git_url(ctx['BP_DIR'])
_log.info('Build Pack Version: %s', ctx['BP_GIT_URL'])
return ctx
@staticmethod
def init_logging(ctx):
logFmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s'
if ctx.get('BP_DEBUG', False):
logging.basicConfig(level=logging.DEBUG, format=logFmt)
else:
logLevelStr = ctx.get('BP_LOG_LEVEL', 'INFO')
logLevel = getattr(logging, logLevelStr, logging.INFO)
logDir = os.path.join(ctx['BUILD_DIR'], '.bp', 'logs')
safe_makedirs(logDir)
logging.basicConfig(level=logLevel, format=logFmt,
filename=os.path.join(logDir, 'bp.log'))
@staticmethod
def load_json_config_file_from(folder, cfgFile):
return CloudFoundryUtil.load_json_config_file(os.path.join(folder,
cfgFile))
@staticmethod
def load_json_config_file(cfgPath):
if os.path.exists(cfgPath):
_log.debug("Loading config from [%s]", cfgPath)
with open(cfgPath, 'rt') as cfgFile:
try:
return json.load(cfgFile)
except ValueError, e:
_log.warn("Error reading [%s]", cfgPath)
_log.debug("Error reading [%s]", cfgPath, exc_info=e)
return {}
class CloudFoundryInstaller(object):
def __init__(self, ctx):
self._log = _log
self._ctx = ctx
self._unzipUtil = UnzipUtil(ctx)
self._hashUtil = HashUtil(ctx)
self._dcm = DirectoryCacheManager(ctx)
self._dwn = self._get_downloader(ctx)(ctx)
def _get_downloader(self, ctx):
method = ctx.get('DOWNLOAD_METHOD', 'python')
if method == 'python':
self._log.debug('Using python downloader.')
return Downloader
elif method == 'curl':
self._log.debug('Using cURL downloader.')
return CurlDownloader
elif method == 'custom':
fullClsName = ctx['DOWNLOAD_CLASS']
self._log.debug('Using custom downloader [%s].', fullClsName)
dotLoc = fullClsName.rfind('.')
if dotLoc >= 0:
clsName = fullClsName[dotLoc + 1: len(fullClsName)]
modName = fullClsName[0:dotLoc]
m = __import__(modName, globals(), locals(), [clsName])
try:
return getattr(m, clsName)
except AttributeError:
self._log.exception(
'WARNING: DOWNLOAD_CLASS not found!')
else:
self._log.error(
'WARNING: DOWNLOAD_CLASS invalid, must include '
'package name!')
return Downloader
def _is_url(self, val):
return urlparse(val).scheme != ''
def install_binary_direct(self, url, hsh, installDir,
fileName=None, strip=False,
extract=True):
self._log.debug("Installing direct [%s]", url)
if not fileName:
fileName = urlparse(url).path.split('/')[-1]
if self._is_url(hsh):
digest = self._dwn.download_direct(hsh)
else:
digest = hsh
self._log.debug(
"Installing [%s] with digest [%s] into [%s] with "
"name [%s] stripping [%s]",
url, digest, installDir, fileName, strip)
fileToInstall = self._dcm.get(fileName, digest)
if fileToInstall is None:
self._log.debug('File [%s] not in cache.', fileName)
fileToInstall = os.path.join(self._ctx['TMPDIR'], fileName)
self._dwn.download(url, fileToInstall)
digest = self._hashUtil.calculate_hash(fileToInstall)
fileToInstall = self._dcm.put(fileName, fileToInstall, digest)
if extract:
return self._unzipUtil.extract(fileToInstall,
installDir,
strip)
else:
shutil.copy(fileToInstall, installDir)
return installDir
def install_binary(self, installKey):
self._log.debug('Installing [%s]', installKey)
url = self._ctx['%s_DOWNLOAD_URL' % installKey]
hashUrl = self._ctx.get(
'%s_HASH_DOWNLOAD_URL' % installKey,
"%s.%s" % (url, self._ctx['CACHE_HASH_ALGORITHM']))
installDir = os.path.join(self._ctx['BUILD_DIR'],
self._ctx.get(
'%s_PACKAGE_INSTALL_DIR' % installKey,
installKey.lower()))
strip = self._ctx.get('%s_STRIP' % installKey, False)
return self.install_binary_direct(url, hashUrl, installDir,
strip=strip)
def _install_from(self, fromPath, fromLoc, toLocation=None, ignore=None):
"""Copy file or directory from a location to the droplet
Copies a file or directory from a location to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
fromLoc -> root of the from path. Full path to file or
directory to be copied is fromLoc + fromPath
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._log.debug("Install file [%s] from [%s]", fromPath, fromLoc)
fullPathFrom = os.path.join(fromLoc, fromPath)
if os.path.exists(fullPathFrom):
fullPathTo = os.path.join(
self._ctx['BUILD_DIR'],
((toLocation is None) and fromPath or toLocation))
safe_makedirs(os.path.dirname(fullPathTo))
self._log.debug("Copying [%s] to [%s]", fullPathFrom, fullPathTo)
if os.path.isfile(fullPathFrom):
shutil.copy(fullPathFrom, fullPathTo)
else:
utils.copytree(fullPathFrom, fullPathTo, ignore=ignore)
def install_from_build_pack(self, fromPath, toLocation=None, ignore=None):
"""Copy file or directory from the build pack to the droplet
Copies a file or directory from the build pack to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._install_from(
fromPath,
self._ctx['BP_DIR'],
toLocation,
ignore)
def install_from_application(self, fromPath, toLocation, ignore=None):
"""Copy file or directory from one place to another in the application
Copies a file or directory from one place to another place within the
application droplet.
fromPath -> file or directory to copy, relative
to application droplet.
toLocation -> location where to copy the file,
relative to app droplet.
ignore -> optional callable that is passed to the
ignore argument of shutil.copytree
"""
self._install_from(
fromPath,
self._ctx['BUILD_DIR'],
toLocation,
ignore)
| {
"content_hash": "6b661f550891e6769078d3e977a4aa0d",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 41.983539094650205,
"alnum_prop": 0.5591060576357577,
"repo_name": "dmikusa-pivotal/cf-download-build-pack",
"id": "39e5983b38961e0038245e00175ca43c829c89f4",
"size": "10202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/build_pack_utils/cloudfoundry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114213"
},
{
"name": "Shell",
"bytes": "3709"
}
],
"symlink_target": ""
} |
"""Tests for trainer_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
# Dependency imports
from tensor2tensor.data_generators import algorithmic
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.models import transformer
from tensor2tensor.utils import model_builder
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_utils
import tensorflow as tf
flags = tf.flags
FLAGS = tf.flags.FLAGS
flags.DEFINE_string("schedule", "train_and_evaluate", "")
flags.DEFINE_integer("eval_steps", 10, "Number of steps in evaluation.")
flags.DEFINE_string("master", "", "Address of TensorFlow master.")
flags.DEFINE_string("output_dir", "", "Base output directory for run.")
@registry.register_problem
class TinyAlgo(algorithmic.AlgorithmicIdentityBinary40):
def generate_data(self, data_dir, _):
identity_problem = algorithmic.AlgorithmicIdentityBinary40()
generator_utils.generate_files(
identity_problem.generator(self.num_symbols, 40, 100000),
self.training_filepaths(data_dir, 1, shuffled=True), 100)
generator_utils.generate_files(
identity_problem.generator(self.num_symbols, 400, 10000),
self.dev_filepaths(data_dir, 1, shuffled=True), 100)
@registry.register_hparams
def transformer_test():
hparams = transformer.transformer_base()
hparams.batch_size = 10
hparams.hidden_size = 10
hparams.num_hidden_layers = 1
hparams.num_heads = 2
hparams.max_length = 16
return hparams
class TrainerUtilsTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
# Generate a small test dataset
FLAGS.problems = "tiny_algo"
TrainerUtilsTest.data_dir = tmp_dir
registry.problem(FLAGS.problems).generate_data(TrainerUtilsTest.data_dir,
None)
def testModelsImported(self):
models = registry.list_models()
self.assertTrue("lstm_seq2seq" in models)
def testHParamsImported(self):
hparams = registry.list_hparams()
self.assertTrue("transformer_base" in hparams)
def testSingleStep(self):
model_name = "transformer"
data_dir = TrainerUtilsTest.data_dir
hparams = trainer_utils.create_hparams("transformer_test", data_dir)
hparams = trainer_utils.add_problem_hparams(hparams, FLAGS.problems)
exp = trainer_utils.create_experiment(
data_dir=data_dir,
model_name=model_name,
train_steps=1,
eval_steps=1,
hparams=hparams,
run_config=trainer_utils.create_run_config(
output_dir=tf.test.get_temp_dir()))
exp.test()
def testSingleEvalStepRawSession(self):
"""Illustrate how to run a T2T model in a raw session."""
# Set model name, hparams, problems as would be set on command line.
model_name = "transformer"
FLAGS.hparams_set = "transformer_test"
FLAGS.problems = "tiny_algo"
data_dir = "/tmp" # Used only when a vocab file or such like is needed.
# Create the problem object, hparams, placeholders, features dict.
encoders = registry.problem(FLAGS.problems).feature_encoders(data_dir)
hparams = trainer_utils.create_hparams(FLAGS.hparams_set, data_dir)
hparams = trainer_utils.add_problem_hparams(hparams, FLAGS.problems)
inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.
# In INFER mode targets can be None.
targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.
features = {
"inputs": batch_inputs,
"targets": batch_targets,
"problem_choice": 0, # We run on the first problem here.
"input_space_id": hparams.problems[0].input_space_id,
"target_space_id": hparams.problems[0].target_space_id
}
# Now set a mode and create the graph by invoking model_fn.
mode = tf.estimator.ModeKeys.EVAL
estimator_spec = model_builder.model_fn(
model_name, features, mode, hparams, problem_names=[FLAGS.problems])
predictions_dict = estimator_spec.predictions
predictions = tf.squeeze( # These are not images, axis=2,3 are not needed.
predictions_dict["predictions"],
axis=[2, 3])
# Having the graph, let's run it on some data.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
inputs = "0 1 0"
targets = "0 1 0"
# Encode from raw string to numpy input array using problem encoders.
inputs_numpy = encoders["inputs"].encode(inputs)
targets_numpy = encoders["targets"].encode(targets)
# Feed the encoded inputs and targets and run session.
feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy}
np_predictions = sess.run(predictions, feed)
# Check that the result has the correct shape: batch x length x vocab_size
# where, for us, batch = 1, length = 3, vocab_size = 4.
self.assertEqual(np_predictions.shape, (1, 3, 4))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "e723ef7f5db5e2844236d3bad745cbc7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 36.76223776223776,
"alnum_prop": 0.6891763363134867,
"repo_name": "waterblue13/tensor2tensor",
"id": "16a8149f4bf1a20468aeee0116b233903b5c6b14",
"size": "5863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/utils/trainer_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10699"
},
{
"name": "Jupyter Notebook",
"bytes": "14442"
},
{
"name": "Python",
"bytes": "1070492"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
} |
"""Status/config and exposure windows for NIC-FPS.
History:
2004-10-19 ROwen
2008-02-11 ROwen Modified to use new TUI.Inst.StatusConfigWdg.
2008-02-12 ROwen Misfeature fix: was using instName=Expose for the expose window.
2011-08-11 ROwen Modified to save state.
2014-02-03 ROwen Updated to use modernized TestData.
"""
import RO.Alg
import TUI.Inst.ExposeWdg
import TUI.Inst.StatusConfigWdg
import StatusConfigInputWdg
InstName = StatusConfigInputWdg.StatusConfigInputWdg.InstName
def addWindow(tlSet):
tlSet.createToplevel (
name = "None.%s Expose" % (InstName,),
defGeom = "+452+280",
resizable = False,
wdgFunc = RO.Alg.GenericCallback (
TUI.Inst.ExposeWdg.ExposeWdg,
instName = InstName,
),
visible = False,
)
tlSet.createToplevel (
name = "Inst.%s" % (InstName,),
defGeom = "+676+280",
resizable = False,
wdgFunc = StatusConfigWdg,
visible = False,
doSaveState = True,
)
class StatusConfigWdg(TUI.Inst.StatusConfigWdg.StatusConfigWdg):
def __init__(self, master):
TUI.Inst.StatusConfigWdg.StatusConfigWdg.__init__(self,
master = master,
statusConfigInputClass = StatusConfigInputWdg.StatusConfigInputWdg,
)
if __name__ == "__main__":
import RO.Wdg
import TestData
root = TestData.tuiModel.tkRoot
root.resizable(width=0, height=0)
tlSet = TestData.tuiModel.tlSet
addWindow(tlSet)
tlSet.makeVisible("Inst.%s" % (InstName,))
TestData.start()
root.mainloop()
| {
"content_hash": "d0bb25ab6af72ac7e2e127db5fcffab1",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 27.322033898305083,
"alnum_prop": 0.6488833746898263,
"repo_name": "r-owen/TUI",
"id": "23da1b07a435d4a6f87e721d2a968e1b1963b9ad",
"size": "1634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TUI/Inst/NICFPS/NICFPSWindow.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "412255"
},
{
"name": "Python",
"bytes": "1443987"
}
],
"symlink_target": ""
} |
"""Classes to generate plain text from a message object tree.
"""
import re
import sys
import time
import locale
import random
from types import ListType, StringType
from cStringIO import StringIO
from email.Header import Header
from email.Parser import NLCRE
try:
from email._compat22 import _isstring
except SyntaxError:
from email._compat21 import _isstring
try:
True, False
except NameError:
True = 1
False = 0
EMPTYSTRING = ''
SEMISPACE = '; '
BAR = '|'
UNDERSCORE = '_'
NL = '\n'
NLTAB = '\n\t'
SEMINLTAB = ';\n\t'
SPACE8 = ' ' * 8
fcre = re.compile(r'^From ', re.MULTILINE)
def _is8bitstring(s):
if isinstance(s, StringType):
try:
unicode(s, 'us-ascii')
except UnicodeError:
return True
return False
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces), than maxheaderlen, the header will be broken on
semicolons and continued as per RFC 2822. If no semicolon is found,
then the header is left alone. Set to zero to disable wrapping
headers. Default is 78, as recommended (but not required by RFC
2822.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self.__maxheaderlen = maxheaderlen
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
# For backwards compatibility, but this is slower
__call__ = flatten
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self.__maxheaderlen)
#
# Protected interface - undocumented ;/
#
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a StringIO. The we write the
# headers and the StringIO contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = StringIO()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.items():
print >> self._fp, '%s:' % h,
if self.__maxheaderlen == 0:
# Explicit no-wrapping
print >> self._fp, v
elif isinstance(v, Header):
# Header instances know what to do
print >> self._fp, v.encode()
elif _is8bitstring(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
print >> self._fp, v
else:
# Header's got lots of smarts, so use it.
print >> self._fp, Header(
v, maxlinelen=self.__maxheaderlen,
header_name=h, continuation_ws='\t').encode()
# A blank line always separates headers from body
print >> self._fp
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
cset = msg.get_charset()
if cset is not None:
payload = cset.body_encode(payload)
if not _isstring(payload):
raise TypeError, 'string payload expected: %s' % type(payload)
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._fp.write(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
# Nothing has ever been attached
boundary = msg.get_boundary(failobj=_make_boundary())
print >> self._fp, '--' + boundary
print >> self._fp, '\n'
print >> self._fp, '--' + boundary + '--'
return
elif _isstring(subparts):
# e.g. a non-strict parse of a message with no starting boundary.
self._fp.write(subparts)
return
elif not isinstance(subparts, ListType):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
msgtexts.append(s.getvalue())
# Now make sure the boundary we've selected doesn't appear in any of
# the message texts.
alltext = NL.join(msgtexts)
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary(failobj=_make_boundary(alltext))
# If we had to calculate a new boundary because the body text
# contained that string, set the new boundary. We don't do it
# unconditionally because, while set_boundary() preserves order, it
# doesn't preserve newlines/continuations in headers. This is no big
# deal in practice, but turns out to be inconvenient for the unittest
# suite.
if msg.get_boundary() <> boundary:
msg.set_boundary(boundary)
# Write out any preamble
if msg.preamble is not None:
self._fp.write(msg.preamble)
# If preamble is the empty string, the length of the split will be
# 1, but the last element will be the empty string. If it's
# anything else but does not end in a line separator, the length
# will be > 1 and not end in an empty string. We need to
# guarantee a newline after the preamble, but don't add too many.
plines = NLCRE.split(msg.preamble)
if plines <> [''] and plines[-1] <> '':
self._fp.write('\n')
# First boundary is a bit different; it doesn't have a leading extra
# newline.
print >> self._fp, '--' + boundary
# Join and write the individual parts
joiner = '\n--' + boundary + '\n'
self._fp.write(joiner.join(msgtexts))
print >> self._fp, '\n--' + boundary + '--',
# Write out any epilogue
if msg.epilogue is not None:
if not msg.epilogue.startswith('\n'):
print >> self._fp
self._fp.write(msg.epilogue)
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
text = s.getvalue()
lines = text.split('\n')
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == '':
blocks.append(NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(NL.join(blocks))
def _handle_message(self, msg):
s = StringIO()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
g.flatten(msg.get_payload(0), unixfrom=False)
self._fp.write(s.getvalue())
class DecodedGenerator(Generator):
"""Generator a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
fmt = ('[Non-text (%(type)s) part of message omitted, '
'filename %(filename)s]')
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_main_type('text')
if maintype == 'text':
print >> self, part.get_payload(decode=True)
elif maintype == 'multipart':
# Just skip this
pass
else:
print >> self, self._fmt % {
'type' : part.get_type('[no MIME type]'),
'maintype' : part.get_main_type('[no main MIME type]'),
'subtype' : part.get_subtype('[no sub-MIME type]'),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}
# Helper
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
def _make_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| {
"content_hash": "da932f542f82672b6ec5c94921d7b44a",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 78,
"avg_line_length": 37.47883597883598,
"alnum_prop": 0.5777511117385473,
"repo_name": "MalloyPower/parsing-python",
"id": "3e578a2f745b6a57c9ff9a9e75c51ca4b251290e",
"size": "14261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/email/Generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from pygccxml import utils
from pygccxml import declarations
class default_argument_patcher_t( object ):
def __init__( self, enums ):
object.__init__( self )
self.__enums = enums
def __call__(self, decl):
for arg in decl.arguments:
if not arg.default_value:
continue
fixer = self.__find_fixer( decl, arg )
if fixer:
arg.default_value = fixer( decl, arg )
def __find_fixer(self, func, arg):
if not arg.default_value:
return False
elif self.__is_unqualified_enum( func, arg ):
return self.__fix_unqualified_enum
elif self.__is_double_call( func, arg ):
return self.__fix_double_call
elif self.__is_invalid_integral( func, arg ):
return self.__fix_invalid_integral
elif self.__is_constructor_call( func, arg ):
return self.__fix_constructor_call
else:
return None
def __join_names( self, prefix, suffix ):
if prefix == '::':
return '::' + suffix
else:
return prefix + '::' + suffix
def __is_unqualified_enum(self, func, arg):
type_ = declarations.remove_reference( declarations.remove_cv( arg.type ) )
if not declarations.is_enum( type_ ):
return False
enum_type = declarations.enum_declaration( type_ )
return enum_type.has_value_name( arg.default_value )
def __fix_unqualified_enum( self, func, arg):
type_ = declarations.remove_reference( declarations.remove_cv( arg.type ) )
enum_type = declarations.enum_declaration( type_ )
return self.__join_names( enum_type.parent.decl_string, arg.default_value )
def __is_invalid_integral(self, func, arg):
type_ = declarations.remove_reference( declarations.remove_cv( arg.type ) )
if not declarations.is_integral( type_ ):
return False
try:
int( arg.default_value )
return False
except:
return True
def __fix_invalid_integral(self, func, arg):
try:
int( arg.default_value )
return arg.default_value
except:
pass
try:
int( arg.default_value, 16 )
if 64 == utils.get_architecture():
#on 64 bit architecture, gccxml reports 0fffff, which is valid number
#the problem is that in this case it is so buggy so pygccxml can not fix it
#users will have to fix the default value manually
return arg.default_value
default_value = arg.default_value.lower()
found_hex = filter( lambda ch: ch in 'abcdef', default_value )
if found_hex and not default_value.startswith( '0x' ):
int( '0x' + default_value, 16 )
return '0x' + default_value
except:
pass
#may be we deal with enum
parent = func.parent
while parent:
found = self.__find_enum( parent, arg.default_value )
if found:
if declarations.is_fundamental( arg.type ) and ' ' in arg.type.decl_string:
template = '(%s)(%s)'
else:
template = '%s(%s)'
return template % ( arg.type.decl_string
, self.__join_names( found.parent.decl_string, arg.default_value ) )
else:
parent = parent.parent
return arg.default_value
def __find_enum( self, scope, default_value ):
#this algorithm could be improved: it could take into account
#1. unnamed namespaced
#2. location within files
for enum in self.__enums:
if enum.parent is scope and enum.has_value_name( default_value ):
return enum
return None
def __is_double_call( self, func, arg ):
call_invocation = declarations.call_invocation
dv = arg.default_value
found1 = call_invocation.find_args( dv )
if found1 == call_invocation.NOT_FOUND:
return False
found2 = call_invocation.find_args( dv, found1[1] + 1 )
if found2 == call_invocation.NOT_FOUND:
return False
args1 = call_invocation.args( dv[ found1[0] : found1[1] + 1 ] )
args2 = call_invocation.args( dv[ found2[0] : found2[1] + 1 ] )
return len(args1) == len(args2)
def __fix_double_call( self, func, arg ):
call_invocation = declarations.call_invocation
dv = arg.default_value
found1 = call_invocation.find_args( dv )
found2 = call_invocation.find_args( dv, found1[1] + 1 )
#args1 = call_invocation.args( dv[ found1[0] : found1[1] + 1 ] )
args2 = call_invocation.args( dv[ found2[0] : found2[1] + 1 ] )
return call_invocation.join( dv[:found1[0]], args2 )
def __is_constructor_call( self, func, arg ):
#if '0.9' in func.compiler:
# return False
call_invocation = declarations.call_invocation
dv = arg.default_value
if not call_invocation.is_call_invocation( dv ):
return False
name = call_invocation.name( dv )
base_type = declarations.base_type( arg.type )
if not isinstance( base_type, declarations.declarated_t ):
return False
decl = base_type.declaration
return decl.name == name \
or ( isinstance( decl, declarations.class_t ) \
and name in map( lambda typedef: typedef.name, decl.aliases ) )
def __fix_constructor_call( self, func, arg ):
call_invocation = declarations.call_invocation
dv = arg.default_value
if not call_invocation.is_call_invocation( dv ):
return False
base_type = declarations.base_type( arg.type )
decl = base_type.declaration
name, args = call_invocation.split( dv )
if decl.name != name:
#we have some alias to the class
relevant_typedefs = filter( lambda typedef: typedef.name == name
, decl.aliases )
if 1 == len( relevant_typedefs ):
f_q_name = self.__join_names( declarations.full_name( relevant_typedefs[0].parent )
, name )
else:#in this case we can not say which typedef user uses:
f_q_name = self.__join_names( declarations.full_name( decl.parent )
, decl.name )
else:
f_q_name = self.__join_names( declarations.full_name( decl.parent ), name )
return call_invocation.join( f_q_name, args )
class casting_operator_patcher_t( object ):
def __init__( self ):
object.__init__( self )
def __call__(self, decl):
decl.name = 'operator ' + decl.return_type.decl_string
_casting_oper_patcher_ = casting_operator_patcher_t()
def fix_calldef_decls(decls, enums):
default_arg_patcher = default_argument_patcher_t(enums)
#decls should be flat list of all declarations, you want to apply patch on
for decl in decls:
default_arg_patcher( decl )
if isinstance( decl, declarations.casting_operator_t):
_casting_oper_patcher_( decl )
| {
"content_hash": "f4abf8376296a3ca54900a1dda144f47",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 104,
"avg_line_length": 41.34594594594594,
"alnum_prop": 0.5451693031768858,
"repo_name": "avaitla/Haskell-to-C---Bridge",
"id": "c3b752444584eeb149fda79f7817212712e1f2ce",
"size": "7845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygccxml-1.0.0/pygccxml/parser/patcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1194590"
},
{
"name": "C",
"bytes": "40073785"
},
{
"name": "C++",
"bytes": "2198628"
},
{
"name": "Haskell",
"bytes": "22377"
},
{
"name": "JavaScript",
"bytes": "10874"
},
{
"name": "Perl",
"bytes": "1373"
},
{
"name": "Python",
"bytes": "696243"
},
{
"name": "Shell",
"bytes": "1623468"
}
],
"symlink_target": ""
} |
import datetime
import decimal
import uuid
import sqlalchemy as sa
from sqlalchemy import any_
from sqlalchemy import ARRAY
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import column
from sqlalchemy import DateTime
from sqlalchemy import Enum
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import Float
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import null
from sqlalchemy import Numeric
from sqlalchemy import REAL
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import types
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects.postgresql import base
from sqlalchemy.dialects.postgresql import DATERANGE
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.dialects.postgresql import hstore
from sqlalchemy.dialects.postgresql import INT4RANGE
from sqlalchemy.dialects.postgresql import INT8RANGE
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import NUMRANGE
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.dialects.postgresql import TSTZRANGE
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy.sql import operators
from sqlalchemy.sql import sqltypes
from sqlalchemy.testing import engines
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import assert_raises_message
from sqlalchemy.testing.assertions import AssertsCompiledSQL
from sqlalchemy.testing.assertions import AssertsExecutionResults
from sqlalchemy.testing.assertions import ComparesTables
from sqlalchemy.testing.assertions import eq_
from sqlalchemy.testing.assertions import is_
from sqlalchemy.testing.suite import test_types as suite
from sqlalchemy.testing.util import round_decimal
tztable = notztable = metadata = table = None
class FloatCoercionTest(fixtures.TablesTest, AssertsExecutionResults):
__only_on__ = "postgresql"
__dialect__ = postgresql.dialect()
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
)
@classmethod
def insert_data(cls):
data_table = cls.tables.data_table
with testing.db.begin() as connection:
connection.execute(
data_table.insert().values(
[
{"data": 3},
{"data": 5},
{"data": 7},
{"data": 2},
{"data": 15},
{"data": 12},
{"data": 6},
{"data": 478},
{"data": 52},
{"data": 9},
]
)
)
def test_float_coercion(self, connection):
data_table = self.tables.data_table
for type_, result in [
(Numeric, decimal.Decimal("140.381230939")),
(Float, 140.381230939),
(Float(asdecimal=True), decimal.Decimal("140.381230939")),
(Numeric(asdecimal=False), 140.381230939),
]:
ret = connection.execute(
select([func.stddev_pop(data_table.c.data, type_=type_)])
).scalar()
eq_(round_decimal(ret, 9), result)
ret = connection.execute(
select([cast(func.stddev_pop(data_table.c.data), type_)])
).scalar()
eq_(round_decimal(ret, 9), result)
@testing.provide_metadata
def test_arrays_pg(self, connection):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x", postgresql.ARRAY(Float)),
Column("y", postgresql.ARRAY(REAL)),
Column("z", postgresql.ARRAY(postgresql.DOUBLE_PRECISION)),
Column("q", postgresql.ARRAY(Numeric)),
)
metadata.create_all()
connection.execute(
t1.insert(), x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")]
)
row = connection.execute(t1.select()).first()
eq_(row, ([5], [5], [6], [decimal.Decimal("6.4")]))
@testing.provide_metadata
def test_arrays_base(self, connection):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x", sqltypes.ARRAY(Float)),
Column("y", sqltypes.ARRAY(REAL)),
Column("z", sqltypes.ARRAY(postgresql.DOUBLE_PRECISION)),
Column("q", sqltypes.ARRAY(Numeric)),
)
metadata.create_all()
connection.execute(
t1.insert(), x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")]
)
row = connection.execute(t1.select()).first()
eq_(row, ([5], [5], [6], [decimal.Decimal("6.4")]))
class EnumTest(fixtures.TestBase, AssertsExecutionResults):
__backend__ = True
__only_on__ = "postgresql > 8.3"
@testing.provide_metadata
def test_create_table(self, connection):
metadata = self.metadata
t1 = Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value", Enum("one", "two", "three", name="onetwothreetype")
),
)
t1.create(connection)
t1.create(connection, checkfirst=True) # check the create
connection.execute(t1.insert(), value="two")
connection.execute(t1.insert(), value="three")
connection.execute(t1.insert(), value="three")
eq_(
connection.execute(t1.select().order_by(t1.c.id)).fetchall(),
[(1, "two"), (2, "three"), (3, "three")],
)
@testing.combinations(None, "foo")
def test_create_table_schema_translate_map(self, symbol_name):
# note we can't use the fixture here because it will not drop
# from the correct schema
metadata = MetaData()
t1 = Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value",
Enum(
"one",
"two",
"three",
name="schema_enum",
schema=symbol_name,
),
),
schema=symbol_name,
)
with testing.db.connect() as conn:
conn = conn.execution_options(
schema_translate_map={symbol_name: testing.config.test_schema}
)
t1.create(conn)
assert "schema_enum" in [
e["name"]
for e in inspect(conn).get_enums(
schema=testing.config.test_schema
)
]
t1.create(conn, checkfirst=True)
conn.execute(t1.insert(), value="two")
conn.execute(t1.insert(), value="three")
conn.execute(t1.insert(), value="three")
eq_(
conn.execute(t1.select().order_by(t1.c.id)).fetchall(),
[(1, "two"), (2, "three"), (3, "three")],
)
t1.drop(conn)
assert "schema_enum" not in [
e["name"]
for e in inspect(conn).get_enums(
schema=testing.config.test_schema
)
]
t1.drop(conn, checkfirst=True)
def test_name_required(self):
metadata = MetaData(testing.db)
etype = Enum("four", "five", "six", metadata=metadata)
assert_raises(exc.CompileError, etype.create)
assert_raises(
exc.CompileError, etype.compile, dialect=postgresql.dialect()
)
@testing.provide_metadata
def test_unicode_labels(self, connection):
metadata = self.metadata
t1 = Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value",
Enum(
util.u("réveillé"),
util.u("drôle"),
util.u("S’il"),
name="onetwothreetype",
),
),
)
metadata.create_all()
connection.execute(t1.insert(), value=util.u("drôle"))
connection.execute(t1.insert(), value=util.u("réveillé"))
connection.execute(t1.insert(), value=util.u("S’il"))
eq_(
connection.execute(t1.select().order_by(t1.c.id)).fetchall(),
[
(1, util.u("drôle")),
(2, util.u("réveillé")),
(3, util.u("S’il")),
],
)
m2 = MetaData(testing.db)
t2 = Table("table", m2, autoload=True)
eq_(
t2.c.value.type.enums,
[util.u("réveillé"), util.u("drôle"), util.u("S’il")],
)
@testing.provide_metadata
def test_non_native_enum(self, connection):
metadata = self.metadata
t1 = Table(
"foo",
metadata,
Column(
"bar",
Enum("one", "two", "three", name="myenum", native_enum=False),
),
)
def go():
t1.create(testing.db)
self.assert_sql(
testing.db,
go,
[
(
"CREATE TABLE foo (\tbar "
"VARCHAR(5), \tCONSTRAINT myenum CHECK "
"(bar IN ('one', 'two', 'three')))",
{},
)
],
)
connection.execute(t1.insert(), {"bar": "two"})
eq_(connection.scalar(select([t1.c.bar])), "two")
@testing.provide_metadata
def test_non_native_enum_w_unicode(self, connection):
metadata = self.metadata
t1 = Table(
"foo",
metadata,
Column(
"bar", Enum("B", util.u("Ü"), name="myenum", native_enum=False)
),
)
def go():
t1.create(testing.db)
self.assert_sql(
testing.db,
go,
[
(
util.u(
"CREATE TABLE foo (\tbar "
"VARCHAR(1), \tCONSTRAINT myenum CHECK "
"(bar IN ('B', 'Ü')))"
),
{},
)
],
)
connection.execute(t1.insert(), {"bar": util.u("Ü")})
eq_(connection.scalar(select([t1.c.bar])), util.u("Ü"))
@testing.provide_metadata
def test_disable_create(self):
metadata = self.metadata
e1 = postgresql.ENUM(
"one", "two", "three", name="myenum", create_type=False
)
t1 = Table("e1", metadata, Column("c1", e1))
# table can be created separately
# without conflict
e1.create(bind=testing.db)
t1.create(testing.db)
t1.drop(testing.db)
e1.drop(bind=testing.db)
@testing.provide_metadata
def test_generate_multiple(self):
"""Test that the same enum twice only generates once
for the create_all() call, without using checkfirst.
A 'memo' collection held by the DDL runner
now handles this.
"""
metadata = self.metadata
e1 = Enum("one", "two", "three", name="myenum")
Table("e1", metadata, Column("c1", e1))
Table("e2", metadata, Column("c1", e1))
metadata.create_all(checkfirst=False)
metadata.drop_all(checkfirst=False)
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
@testing.provide_metadata
def test_generate_alone_on_metadata(self):
"""Test that the same enum twice only generates once
for the create_all() call, without using checkfirst.
A 'memo' collection held by the DDL runner
now handles this.
"""
metadata = self.metadata
Enum("one", "two", "three", name="myenum", metadata=self.metadata)
metadata.create_all(checkfirst=False)
assert "myenum" in [e["name"] for e in inspect(testing.db).get_enums()]
metadata.drop_all(checkfirst=False)
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
@testing.provide_metadata
def test_generate_multiple_on_metadata(self):
metadata = self.metadata
e1 = Enum("one", "two", "three", name="myenum", metadata=metadata)
t1 = Table("e1", metadata, Column("c1", e1))
t2 = Table("e2", metadata, Column("c1", e1))
metadata.create_all(checkfirst=False)
assert "myenum" in [e["name"] for e in inspect(testing.db).get_enums()]
metadata.drop_all(checkfirst=False)
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
e1.create() # creates ENUM
t1.create() # does not create ENUM
t2.create() # does not create ENUM
@testing.provide_metadata
def test_generate_multiple_schemaname_on_metadata(self):
metadata = self.metadata
Enum("one", "two", "three", name="myenum", metadata=metadata)
Enum(
"one",
"two",
"three",
name="myenum",
metadata=metadata,
schema="test_schema",
)
metadata.create_all(checkfirst=False)
assert "myenum" in [e["name"] for e in inspect(testing.db).get_enums()]
assert "myenum" in [
e["name"]
for e in inspect(testing.db).get_enums(schema="test_schema")
]
metadata.drop_all(checkfirst=False)
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
assert "myenum" not in [
e["name"]
for e in inspect(testing.db).get_enums(schema="test_schema")
]
@testing.provide_metadata
def test_drops_on_table(self):
metadata = self.metadata
e1 = Enum("one", "two", "three", name="myenum")
table = Table("e1", metadata, Column("c1", e1))
table.create()
table.drop()
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
table.create()
assert "myenum" in [e["name"] for e in inspect(testing.db).get_enums()]
table.drop()
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
@testing.provide_metadata
def test_remain_on_table_metadata_wide(self):
metadata = self.metadata
e1 = Enum("one", "two", "three", name="myenum", metadata=metadata)
table = Table("e1", metadata, Column("c1", e1))
# need checkfirst here, otherwise enum will not be created
assert_raises_message(
sa.exc.ProgrammingError,
'.*type "myenum" does not exist',
table.create,
)
table.create(checkfirst=True)
table.drop()
table.create(checkfirst=True)
table.drop()
assert "myenum" in [e["name"] for e in inspect(testing.db).get_enums()]
metadata.drop_all()
assert "myenum" not in [
e["name"] for e in inspect(testing.db).get_enums()
]
def test_non_native_dialect(self):
engine = engines.testing_engine()
engine.connect()
engine.dialect.supports_native_enum = False
metadata = MetaData()
t1 = Table(
"foo",
metadata,
Column("bar", Enum("one", "two", "three", name="myenum")),
)
def go():
t1.create(engine)
try:
self.assert_sql(
engine,
go,
[
(
"CREATE TABLE foo (bar "
"VARCHAR(5), CONSTRAINT myenum CHECK "
"(bar IN ('one', 'two', 'three')))",
{},
)
],
)
finally:
metadata.drop_all(engine)
def test_standalone_enum(self):
metadata = MetaData(testing.db)
etype = Enum(
"four", "five", "six", name="fourfivesixtype", metadata=metadata
)
etype.create()
try:
assert testing.db.dialect.has_type(testing.db, "fourfivesixtype")
finally:
etype.drop()
assert not testing.db.dialect.has_type(
testing.db, "fourfivesixtype"
)
metadata.create_all()
try:
assert testing.db.dialect.has_type(testing.db, "fourfivesixtype")
finally:
metadata.drop_all()
assert not testing.db.dialect.has_type(
testing.db, "fourfivesixtype"
)
def test_no_support(self):
def server_version_info(self):
return (8, 2)
e = engines.testing_engine()
dialect = e.dialect
dialect._get_server_version_info = server_version_info
assert dialect.supports_native_enum
e.connect()
assert not dialect.supports_native_enum
# initialize is called again on new pool
e.dispose()
e.connect()
assert not dialect.supports_native_enum
@testing.provide_metadata
def test_reflection(self):
metadata = self.metadata
etype = Enum(
"four", "five", "six", name="fourfivesixtype", metadata=metadata
)
Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value", Enum("one", "two", "three", name="onetwothreetype")
),
Column("value2", etype),
)
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table("table", m2, autoload=True)
eq_(t2.c.value.type.enums, ["one", "two", "three"])
eq_(t2.c.value.type.name, "onetwothreetype")
eq_(t2.c.value2.type.enums, ["four", "five", "six"])
eq_(t2.c.value2.type.name, "fourfivesixtype")
@testing.provide_metadata
def test_schema_reflection(self):
metadata = self.metadata
etype = Enum(
"four",
"five",
"six",
name="fourfivesixtype",
schema="test_schema",
metadata=metadata,
)
Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"value",
Enum(
"one",
"two",
"three",
name="onetwothreetype",
schema="test_schema",
),
),
Column("value2", etype),
)
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table("table", m2, autoload=True)
eq_(t2.c.value.type.enums, ["one", "two", "three"])
eq_(t2.c.value.type.name, "onetwothreetype")
eq_(t2.c.value2.type.enums, ["four", "five", "six"])
eq_(t2.c.value2.type.name, "fourfivesixtype")
eq_(t2.c.value2.type.schema, "test_schema")
@testing.provide_metadata
def test_custom_subclass(self, connection):
class MyEnum(TypeDecorator):
impl = Enum("oneHI", "twoHI", "threeHI", name="myenum")
def process_bind_param(self, value, dialect):
if value is not None:
value += "HI"
return value
def process_result_value(self, value, dialect):
if value is not None:
value += "THERE"
return value
t1 = Table("table1", self.metadata, Column("data", MyEnum()))
self.metadata.create_all(testing.db)
connection.execute(t1.insert(), {"data": "two"})
eq_(connection.scalar(select([t1.c.data])), "twoHITHERE")
@testing.provide_metadata
def test_generic_w_pg_variant(self, connection):
some_table = Table(
"some_table",
self.metadata,
Column(
"data",
Enum(
"one",
"two",
"three",
native_enum=True # make sure this is True because
# it should *not* take effect due to
# the variant
).with_variant(
postgresql.ENUM("four", "five", "six", name="my_enum"),
"postgresql",
),
),
)
assert "my_enum" not in [
e["name"] for e in inspect(connection).get_enums()
]
self.metadata.create_all(connection)
assert "my_enum" in [
e["name"] for e in inspect(connection).get_enums()
]
connection.execute(some_table.insert(), {"data": "five"})
self.metadata.drop_all(connection)
assert "my_enum" not in [
e["name"] for e in inspect(connection).get_enums()
]
@testing.provide_metadata
def test_generic_w_some_other_variant(self, connection):
some_table = Table(
"some_table",
self.metadata,
Column(
"data",
Enum(
"one", "two", "three", name="my_enum", native_enum=True
).with_variant(Enum("four", "five", "six"), "mysql"),
),
)
assert "my_enum" not in [
e["name"] for e in inspect(connection).get_enums()
]
self.metadata.create_all(connection)
assert "my_enum" in [
e["name"] for e in inspect(connection).get_enums()
]
connection.execute(some_table.insert(), {"data": "two"})
self.metadata.drop_all(connection)
assert "my_enum" not in [
e["name"] for e in inspect(connection).get_enums()
]
class OIDTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
@testing.provide_metadata
def test_reflection(self):
metadata = self.metadata
Table(
"table",
metadata,
Column("x", Integer),
Column("y", postgresql.OID),
)
metadata.create_all()
m2 = MetaData()
t2 = Table("table", m2, autoload_with=testing.db, autoload=True)
assert isinstance(t2.c.y.type, postgresql.OID)
class RegClassTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
@staticmethod
def _scalar(expression):
with testing.db.connect() as conn:
return conn.scalar(select([expression]))
def test_cast_name(self):
eq_(self._scalar(cast("pg_class", postgresql.REGCLASS)), "pg_class")
def test_cast_path(self):
eq_(
self._scalar(cast("pg_catalog.pg_class", postgresql.REGCLASS)),
"pg_class",
)
def test_cast_oid(self):
regclass = cast("pg_class", postgresql.REGCLASS)
oid = self._scalar(cast(regclass, postgresql.OID))
assert isinstance(oid, int)
eq_(self._scalar(cast(oid, postgresql.REGCLASS)), "pg_class")
def test_cast_whereclause(self):
pga = Table(
"pg_attribute",
MetaData(testing.db),
Column("attrelid", postgresql.OID),
Column("attname", String(64)),
)
with testing.db.connect() as conn:
oid = conn.scalar(
select([pga.c.attrelid]).where(
pga.c.attrelid == cast("pg_class", postgresql.REGCLASS)
)
)
assert isinstance(oid, int)
class NumericInterpretationTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
def test_numeric_codes(self):
from sqlalchemy.dialects.postgresql import (
pg8000,
pygresql,
psycopg2,
psycopg2cffi,
base,
)
dialects = (
pg8000.dialect(),
pygresql.dialect(),
psycopg2.dialect(),
psycopg2cffi.dialect(),
)
for dialect in dialects:
typ = Numeric().dialect_impl(dialect)
for code in (
base._INT_TYPES + base._FLOAT_TYPES + base._DECIMAL_TYPES
):
proc = typ.result_processor(dialect, code)
val = 23.7
if proc is not None:
val = proc(val)
assert val in (23.7, decimal.Decimal("23.7"))
@testing.provide_metadata
def test_numeric_default(self, connection):
metadata = self.metadata
# pg8000 appears to fail when the value is 0,
# returns an int instead of decimal.
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("nd", Numeric(asdecimal=True), default=1),
Column("nf", Numeric(asdecimal=False), default=1),
Column("fd", Float(asdecimal=True), default=1),
Column("ff", Float(asdecimal=False), default=1),
)
metadata.create_all()
connection.execute(t.insert())
row = connection.execute(t.select()).first()
assert isinstance(row[1], decimal.Decimal)
assert isinstance(row[2], float)
assert isinstance(row[3], decimal.Decimal)
assert isinstance(row[4], float)
eq_(row, (1, decimal.Decimal("1"), 1, decimal.Decimal("1"), 1))
class PythonTypeTest(fixtures.TestBase):
def test_interval(self):
is_(postgresql.INTERVAL().python_type, datetime.timedelta)
class TimezoneTest(fixtures.TestBase):
__backend__ = True
"""Test timezone-aware datetimes.
psycopg will return a datetime with a tzinfo attached to it, if
postgresql returns it. python then will not let you compare a
datetime with a tzinfo to a datetime that doesn't have one. this
test illustrates two ways to have datetime types with and without
timezone info. """
__only_on__ = "postgresql"
@classmethod
def setup_class(cls):
global tztable, notztable, metadata
metadata = MetaData(testing.db)
# current_timestamp() in postgresql is assumed to return
# TIMESTAMP WITH TIMEZONE
tztable = Table(
"tztable",
metadata,
Column("id", Integer, primary_key=True),
Column(
"date",
DateTime(timezone=True),
onupdate=func.current_timestamp(),
),
Column("name", String(20)),
)
notztable = Table(
"notztable",
metadata,
Column("id", Integer, primary_key=True),
Column(
"date",
DateTime(timezone=False),
onupdate=cast(
func.current_timestamp(), DateTime(timezone=False)
),
),
Column("name", String(20)),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_with_timezone(self, connection):
# get a date with a tzinfo
somedate = testing.db.connect().scalar(
func.current_timestamp().select()
)
assert somedate.tzinfo
connection.execute(tztable.insert(), id=1, name="row1", date=somedate)
row = connection.execute(
select([tztable.c.date], tztable.c.id == 1)
).first()
eq_(row[0], somedate)
eq_(
somedate.tzinfo.utcoffset(somedate),
row[0].tzinfo.utcoffset(row[0]),
)
result = connection.execute(
tztable.update(tztable.c.id == 1).returning(tztable.c.date),
name="newname",
)
row = result.first()
assert row[0] >= somedate
def test_without_timezone(self, connection):
# get a date without a tzinfo
somedate = datetime.datetime(2005, 10, 20, 11, 52, 0)
assert not somedate.tzinfo
connection.execute(
notztable.insert(), id=1, name="row1", date=somedate
)
row = connection.execute(
select([notztable.c.date], notztable.c.id == 1)
).first()
eq_(row[0], somedate)
eq_(row[0].tzinfo, None)
result = connection.execute(
notztable.update(notztable.c.id == 1).returning(notztable.c.date),
name="newname",
)
row = result.first()
assert row[0] >= somedate
class TimePrecisionCompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
@testing.combinations(
(postgresql.TIME(), "TIME WITHOUT TIME ZONE"),
(postgresql.TIME(precision=5), "TIME(5) WITHOUT TIME ZONE"),
(
postgresql.TIME(timezone=True, precision=5),
"TIME(5) WITH TIME ZONE",
),
(postgresql.TIMESTAMP(), "TIMESTAMP WITHOUT TIME ZONE"),
(postgresql.TIMESTAMP(precision=5), "TIMESTAMP(5) WITHOUT TIME ZONE"),
(
postgresql.TIMESTAMP(timezone=True, precision=5),
"TIMESTAMP(5) WITH TIME ZONE",
),
(postgresql.TIME(precision=0), "TIME(0) WITHOUT TIME ZONE"),
(postgresql.TIMESTAMP(precision=0), "TIMESTAMP(0) WITHOUT TIME ZONE"),
)
def test_compile(self, type_, expected):
self.assert_compile(type_, expected)
class TimePrecisionTest(fixtures.TestBase):
__dialect__ = postgresql.dialect()
__prefer__ = "postgresql"
__backend__ = True
@testing.only_on("postgresql", "DB specific feature")
@testing.provide_metadata
def test_reflection(self):
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("c1", postgresql.TIME()),
Column("c2", postgresql.TIME(precision=5)),
Column("c3", postgresql.TIME(timezone=True, precision=5)),
Column("c4", postgresql.TIMESTAMP()),
Column("c5", postgresql.TIMESTAMP(precision=5)),
Column("c6", postgresql.TIMESTAMP(timezone=True, precision=5)),
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table("t1", m2, autoload=True)
eq_(t2.c.c1.type.precision, None)
eq_(t2.c.c2.type.precision, 5)
eq_(t2.c.c3.type.precision, 5)
eq_(t2.c.c4.type.precision, None)
eq_(t2.c.c5.type.precision, 5)
eq_(t2.c.c6.type.precision, 5)
eq_(t2.c.c1.type.timezone, False)
eq_(t2.c.c2.type.timezone, False)
eq_(t2.c.c3.type.timezone, True)
eq_(t2.c.c4.type.timezone, False)
eq_(t2.c.c5.type.timezone, False)
eq_(t2.c.c6.type.timezone, True)
class ArrayTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
def test_array_literal(self):
obj = postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
self.assert_compile(
obj,
"ARRAY[%(param_1)s, %(param_2)s] || "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]",
params={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
"param_5": 5,
},
)
self.assert_compile(
obj[1],
"(ARRAY[%(param_1)s, %(param_2)s] || ARRAY[%(param_3)s, "
"%(param_4)s, %(param_5)s])[%(param_6)s]",
params={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
"param_5": 5,
},
)
def test_array_literal_getitem_multidim(self):
obj = postgresql.array(
[postgresql.array([1, 2]), postgresql.array([3, 4])]
)
self.assert_compile(
obj,
"ARRAY[ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, %(param_4)s]]",
)
self.assert_compile(
obj[1],
"(ARRAY[ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, %(param_4)s]])[%(param_5)s]",
)
self.assert_compile(
obj[1][0],
"(ARRAY[ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, %(param_4)s]])[%(param_5)s][%(param_6)s]",
)
def test_array_type_render_str(self):
self.assert_compile(postgresql.ARRAY(Unicode(30)), "VARCHAR(30)[]")
def test_array_type_render_str_collate(self):
self.assert_compile(
postgresql.ARRAY(Unicode(30, collation="en_US")),
'VARCHAR(30)[] COLLATE "en_US"',
)
def test_array_type_render_str_multidim(self):
self.assert_compile(
postgresql.ARRAY(Unicode(30), dimensions=2), "VARCHAR(30)[][]"
)
self.assert_compile(
postgresql.ARRAY(Unicode(30), dimensions=3), "VARCHAR(30)[][][]"
)
def test_array_type_render_str_collate_multidim(self):
self.assert_compile(
postgresql.ARRAY(Unicode(30, collation="en_US"), dimensions=2),
'VARCHAR(30)[][] COLLATE "en_US"',
)
self.assert_compile(
postgresql.ARRAY(Unicode(30, collation="en_US"), dimensions=3),
'VARCHAR(30)[][][] COLLATE "en_US"',
)
def test_array_int_index(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col[3]]),
"SELECT x[%(x_1)s] AS anon_1",
checkparams={"x_1": 3},
)
def test_array_any(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col.any(7, operator=operators.lt)]),
"SELECT %(param_1)s < ANY (x) AS anon_1",
checkparams={"param_1": 7},
)
def test_array_all(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col.all(7, operator=operators.lt)]),
"SELECT %(param_1)s < ALL (x) AS anon_1",
checkparams={"param_1": 7},
)
def test_array_contains(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col.contains(array([4, 5, 6]))]),
"SELECT x @> ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
"AS anon_1",
checkparams={"param_1": 4, "param_3": 6, "param_2": 5},
)
def test_contains_override_raises(self):
col = column("x", postgresql.ARRAY(Integer))
assert_raises_message(
NotImplementedError,
"Operator 'contains' is not supported on this expression",
lambda: "foo" in col,
)
def test_array_contained_by(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col.contained_by(array([4, 5, 6]))]),
"SELECT x <@ ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
"AS anon_1",
checkparams={"param_1": 4, "param_3": 6, "param_2": 5},
)
def test_array_overlap(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col.overlap(array([4, 5, 6]))]),
"SELECT x && ARRAY[%(param_1)s, %(param_2)s, %(param_3)s] "
"AS anon_1",
checkparams={"param_1": 4, "param_3": 6, "param_2": 5},
)
def test_array_slice_index(self):
col = column("x", postgresql.ARRAY(Integer))
self.assert_compile(
select([col[5:10]]),
"SELECT x[%(x_1)s:%(x_2)s] AS anon_1",
checkparams={"x_2": 10, "x_1": 5},
)
def test_array_dim_index(self):
col = column("x", postgresql.ARRAY(Integer, dimensions=2))
self.assert_compile(
select([col[3][5]]),
"SELECT x[%(x_1)s][%(param_1)s] AS anon_1",
checkparams={"x_1": 3, "param_1": 5},
)
def test_array_concat(self):
col = column("x", postgresql.ARRAY(Integer))
literal = array([4, 5])
self.assert_compile(
select([col + literal]),
"SELECT x || ARRAY[%(param_1)s, %(param_2)s] AS anon_1",
checkparams={"param_1": 4, "param_2": 5},
)
def test_array_index_map_dimensions(self):
col = column("x", postgresql.ARRAY(Integer, dimensions=3))
is_(col[5].type._type_affinity, ARRAY)
assert isinstance(col[5].type, postgresql.ARRAY)
eq_(col[5].type.dimensions, 2)
is_(col[5][6].type._type_affinity, ARRAY)
assert isinstance(col[5][6].type, postgresql.ARRAY)
eq_(col[5][6].type.dimensions, 1)
is_(col[5][6][7].type._type_affinity, Integer)
def test_array_getitem_single_type(self):
m = MetaData()
arrtable = Table(
"arrtable",
m,
Column("intarr", postgresql.ARRAY(Integer)),
Column("strarr", postgresql.ARRAY(String)),
)
is_(arrtable.c.intarr[1].type._type_affinity, Integer)
is_(arrtable.c.strarr[1].type._type_affinity, String)
def test_array_getitem_slice_type(self):
m = MetaData()
arrtable = Table(
"arrtable",
m,
Column("intarr", postgresql.ARRAY(Integer)),
Column("strarr", postgresql.ARRAY(String)),
)
# type affinity is Array...
is_(arrtable.c.intarr[1:3].type._type_affinity, ARRAY)
is_(arrtable.c.strarr[1:3].type._type_affinity, ARRAY)
# but the slice returns the actual type
assert isinstance(arrtable.c.intarr[1:3].type, postgresql.ARRAY)
assert isinstance(arrtable.c.strarr[1:3].type, postgresql.ARRAY)
def test_array_functions_plus_getitem(self):
"""test parenthesizing of functions plus indexing, which seems
to be required by PostgreSQL.
"""
stmt = select(
[
func.array_cat(
array([1, 2, 3]),
array([4, 5, 6]),
type_=postgresql.ARRAY(Integer),
)[2:5]
]
)
self.assert_compile(
stmt,
"SELECT (array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], "
"ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))"
"[%(param_7)s:%(param_8)s] AS anon_1",
)
self.assert_compile(
func.array_cat(
array([1, 2, 3]),
array([4, 5, 6]),
type_=postgresql.ARRAY(Integer),
)[3],
"(array_cat(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s], "
"ARRAY[%(param_4)s, %(param_5)s, %(param_6)s]))[%(array_cat_1)s]",
)
def test_array_agg_generic(self):
expr = func.array_agg(column("q", Integer))
is_(expr.type.__class__, types.ARRAY)
is_(expr.type.item_type.__class__, Integer)
def test_array_agg_specific(self):
from sqlalchemy.dialects.postgresql import array_agg
expr = array_agg(column("q", Integer))
is_(expr.type.__class__, postgresql.ARRAY)
is_(expr.type.item_type.__class__, Integer)
class ArrayRoundTripTest(object):
__only_on__ = "postgresql"
__backend__ = True
__unsupported_on__ = ("postgresql+pg8000",)
ARRAY = postgresql.ARRAY
@classmethod
def define_tables(cls, metadata):
class ProcValue(TypeDecorator):
impl = cls.ARRAY(Integer, dimensions=2)
def process_bind_param(self, value, dialect):
if value is None:
return None
return [[x + 5 for x in v] for v in value]
def process_result_value(self, value, dialect):
if value is None:
return None
return [[x - 7 for x in v] for v in value]
Table(
"arrtable",
metadata,
Column("id", Integer, primary_key=True),
Column("intarr", cls.ARRAY(Integer)),
Column("strarr", cls.ARRAY(Unicode())),
Column("dimarr", ProcValue),
)
Table(
"dim_arrtable",
metadata,
Column("id", Integer, primary_key=True),
Column("intarr", cls.ARRAY(Integer, dimensions=1)),
Column("strarr", cls.ARRAY(Unicode(), dimensions=1)),
Column("dimarr", ProcValue),
)
def _fixture_456(self, table):
with testing.db.begin() as conn:
conn.execute(table.insert(), intarr=[4, 5, 6])
def test_reflect_array_column(self):
metadata2 = MetaData(testing.db)
tbl = Table("arrtable", metadata2, autoload=True)
assert isinstance(tbl.c.intarr.type, self.ARRAY)
assert isinstance(tbl.c.strarr.type, self.ARRAY)
assert isinstance(tbl.c.intarr.type.item_type, Integer)
assert isinstance(tbl.c.strarr.type.item_type, String)
@testing.provide_metadata
def test_array_str_collation(self):
m = self.metadata
t = Table(
"t",
m,
Column("data", sqltypes.ARRAY(String(50, collation="en_US"))),
)
t.create()
@testing.provide_metadata
def test_array_agg(self, connection):
values_table = Table("values", self.metadata, Column("value", Integer))
self.metadata.create_all(testing.db)
connection.execute(
values_table.insert(), [{"value": i} for i in range(1, 10)]
)
stmt = select([func.array_agg(values_table.c.value)])
eq_(connection.execute(stmt).scalar(), list(range(1, 10)))
stmt = select([func.array_agg(values_table.c.value)[3]])
eq_(connection.execute(stmt).scalar(), 3)
stmt = select([func.array_agg(values_table.c.value)[2:4]])
eq_(connection.execute(stmt).scalar(), [2, 3, 4])
def test_array_index_slice_exprs(self, connection):
"""test a variety of expressions that sometimes need parenthesizing"""
stmt = select([array([1, 2, 3, 4])[2:3]])
eq_(connection.execute(stmt).scalar(), [2, 3])
stmt = select([array([1, 2, 3, 4])[2]])
eq_(connection.execute(stmt).scalar(), 2)
stmt = select([(array([1, 2]) + array([3, 4]))[2:3]])
eq_(connection.execute(stmt).scalar(), [2, 3])
stmt = select([array([1, 2]) + array([3, 4])[2:3]])
eq_(connection.execute(stmt).scalar(), [1, 2, 4])
stmt = select([array([1, 2])[2:3] + array([3, 4])])
eq_(connection.execute(stmt).scalar(), [2, 3, 4])
stmt = select(
[
func.array_cat(
array([1, 2, 3]),
array([4, 5, 6]),
type_=self.ARRAY(Integer),
)[2:5]
]
)
eq_(connection.execute(stmt).scalar(), [2, 3, 4, 5])
def test_any_all_exprs_array(self, connection):
stmt = select(
[
3
== any_(
func.array_cat(
array([1, 2, 3]),
array([4, 5, 6]),
type_=self.ARRAY(Integer),
)
)
]
)
eq_(connection.execute(stmt).scalar(), True)
def test_insert_array(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[1, 2, 3],
strarr=[util.u("abc"), util.u("def")],
)
results = connection.execute(arrtable.select()).fetchall()
eq_(len(results), 1)
eq_(results[0].intarr, [1, 2, 3])
eq_(results[0].strarr, [util.u("abc"), util.u("def")])
def test_insert_array_w_null(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[1, None, 3],
strarr=[util.u("abc"), None],
)
results = connection.execute(arrtable.select()).fetchall()
eq_(len(results), 1)
eq_(results[0].intarr, [1, None, 3])
eq_(results[0].strarr, [util.u("abc"), None])
def test_array_where(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[1, 2, 3],
strarr=[util.u("abc"), util.u("def")],
)
connection.execute(
arrtable.insert(), intarr=[4, 5, 6], strarr=util.u("ABC")
)
results = connection.execute(
arrtable.select().where(arrtable.c.intarr == [1, 2, 3])
).fetchall()
eq_(len(results), 1)
eq_(results[0].intarr, [1, 2, 3])
def test_array_concat(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[1, 2, 3],
strarr=[util.u("abc"), util.u("def")],
)
results = connection.execute(
select([arrtable.c.intarr + [4, 5, 6]])
).fetchall()
eq_(len(results), 1)
eq_(results[0][0], [1, 2, 3, 4, 5, 6])
def test_array_comparison(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
id=5,
intarr=[1, 2, 3],
strarr=[util.u("abc"), util.u("def")],
)
results = connection.execute(
select([arrtable.c.id]).where(arrtable.c.intarr < [4, 5, 6])
).fetchall()
eq_(len(results), 1)
eq_(results[0][0], 5)
def test_array_subtype_resultprocessor(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[4, 5, 6],
strarr=[[util.ue("m\xe4\xe4")], [util.ue("m\xf6\xf6")]],
)
connection.execute(
arrtable.insert(),
intarr=[1, 2, 3],
strarr=[util.ue("m\xe4\xe4"), util.ue("m\xf6\xf6")],
)
results = connection.execute(
arrtable.select(order_by=[arrtable.c.intarr])
).fetchall()
eq_(len(results), 2)
eq_(results[0].strarr, [util.ue("m\xe4\xe4"), util.ue("m\xf6\xf6")])
eq_(
results[1].strarr,
[[util.ue("m\xe4\xe4")], [util.ue("m\xf6\xf6")]],
)
def test_array_literal_roundtrip(self, connection):
eq_(
connection.scalar(
select(
[postgresql.array([1, 2]) + postgresql.array([3, 4, 5])]
)
),
[1, 2, 3, 4, 5],
)
eq_(
connection.scalar(
select(
[
(
postgresql.array([1, 2])
+ postgresql.array([3, 4, 5])
)[3]
]
)
),
3,
)
eq_(
connection.scalar(
select(
[
(
postgresql.array([1, 2])
+ postgresql.array([3, 4, 5])
)[2:4]
]
)
),
[2, 3, 4],
)
def test_array_literal_multidimensional_roundtrip(self, connection):
eq_(
connection.scalar(
select(
[
postgresql.array(
[
postgresql.array([1, 2]),
postgresql.array([3, 4]),
]
)
]
)
),
[[1, 2], [3, 4]],
)
eq_(
connection.scalar(
select(
[
postgresql.array(
[
postgresql.array([1, 2]),
postgresql.array([3, 4]),
]
)[2][1]
]
)
),
3,
)
def test_array_literal_compare(self, connection):
eq_(
connection.scalar(select([postgresql.array([1, 2]) < [3, 4, 5]])),
True,
)
def test_array_getitem_single_exec(self, connection):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
eq_(connection.scalar(select([arrtable.c.intarr[2]])), 5)
connection.execute(arrtable.update().values({arrtable.c.intarr[2]: 7}))
eq_(connection.scalar(select([arrtable.c.intarr[2]])), 7)
def test_array_getitem_slice_exec(self, connection):
arrtable = self.tables.arrtable
connection.execute(
arrtable.insert(),
intarr=[4, 5, 6],
strarr=[util.u("abc"), util.u("def")],
)
eq_(connection.scalar(select([arrtable.c.intarr[2:3]])), [5, 6])
connection.execute(
arrtable.update().values({arrtable.c.intarr[2:3]: [7, 8]})
)
eq_(connection.scalar(select([arrtable.c.intarr[2:3]])), [7, 8])
def test_multi_dim_roundtrip(self, connection):
arrtable = self.tables.arrtable
connection.execute(arrtable.insert(), dimarr=[[1, 2, 3], [4, 5, 6]])
eq_(
connection.scalar(select([arrtable.c.dimarr])),
[[-1, 0, 1], [2, 3, 4]],
)
def test_array_any_exec(self, connection):
arrtable = self.tables.arrtable
connection.execute(arrtable.insert(), intarr=[4, 5, 6])
eq_(
connection.scalar(
select([arrtable.c.intarr]).where(
postgresql.Any(5, arrtable.c.intarr)
)
),
[4, 5, 6],
)
def test_array_all_exec(self, connection):
arrtable = self.tables.arrtable
connection.execute(arrtable.insert(), intarr=[4, 5, 6])
eq_(
connection.scalar(
select([arrtable.c.intarr]).where(
arrtable.c.intarr.all(4, operator=operators.le)
)
),
[4, 5, 6],
)
@testing.provide_metadata
def test_tuple_flag(self, connection):
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
Column("data", self.ARRAY(String(5), as_tuple=True)),
Column(
"data2", self.ARRAY(Numeric(asdecimal=False), as_tuple=True)
),
)
metadata.create_all()
connection.execute(
t1.insert(), id=1, data=["1", "2", "3"], data2=[5.4, 5.6]
)
connection.execute(
t1.insert(), id=2, data=["4", "5", "6"], data2=[1.0]
)
connection.execute(
t1.insert(),
id=3,
data=[["4", "5"], ["6", "7"]],
data2=[[5.4, 5.6], [1.0, 1.1]],
)
r = connection.execute(t1.select().order_by(t1.c.id)).fetchall()
eq_(
r,
[
(1, ("1", "2", "3"), (5.4, 5.6)),
(2, ("4", "5", "6"), (1.0,)),
(3, (("4", "5"), ("6", "7")), ((5.4, 5.6), (1.0, 1.1))),
],
)
# hashable
eq_(
set(row[1] for row in r),
set([("1", "2", "3"), ("4", "5", "6"), (("4", "5"), ("6", "7"))]),
)
def test_array_plus_native_enum_create(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"data_1",
self.ARRAY(postgresql.ENUM("a", "b", "c", name="my_enum_1")),
),
Column(
"data_2",
self.ARRAY(types.Enum("a", "b", "c", name="my_enum_2")),
),
)
t.create(testing.db)
eq_(
set(e["name"] for e in inspect(testing.db).get_enums()),
set(["my_enum_1", "my_enum_2"]),
)
t.drop(testing.db)
eq_(inspect(testing.db).get_enums(), [])
class CoreArrayRoundTripTest(
ArrayRoundTripTest, fixtures.TablesTest, AssertsExecutionResults
):
ARRAY = sqltypes.ARRAY
class PGArrayRoundTripTest(
ArrayRoundTripTest, fixtures.TablesTest, AssertsExecutionResults
):
ARRAY = postgresql.ARRAY
@testing.combinations((set,), (list,), (lambda elem: (x for x in elem),))
def test_undim_array_contains_typed_exec(self, struct):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
with testing.db.begin() as conn:
eq_(
conn.scalar(
select([arrtable.c.intarr]).where(
arrtable.c.intarr.contains(struct([4, 5]))
)
),
[4, 5, 6],
)
@testing.combinations((set,), (list,), (lambda elem: (x for x in elem),))
def test_dim_array_contains_typed_exec(self, struct):
dim_arrtable = self.tables.dim_arrtable
self._fixture_456(dim_arrtable)
with testing.db.begin() as conn:
eq_(
conn.scalar(
select([dim_arrtable.c.intarr]).where(
dim_arrtable.c.intarr.contains(struct([4, 5]))
)
),
[4, 5, 6],
)
def test_array_contained_by_exec(self, connection):
arrtable = self.tables.arrtable
connection.execute(arrtable.insert(), intarr=[6, 5, 4])
eq_(
connection.scalar(
select([arrtable.c.intarr.contained_by([4, 5, 6, 7])])
),
True,
)
def test_undim_array_empty(self, connection):
arrtable = self.tables.arrtable
self._fixture_456(arrtable)
eq_(
connection.scalar(
select([arrtable.c.intarr]).where(
arrtable.c.intarr.contains([])
)
),
[4, 5, 6],
)
def test_array_overlap_exec(self, connection):
arrtable = self.tables.arrtable
connection.execute(arrtable.insert(), intarr=[4, 5, 6])
eq_(
connection.scalar(
select([arrtable.c.intarr]).where(
arrtable.c.intarr.overlap([7, 6])
)
),
[4, 5, 6],
)
class HashableFlagORMTest(fixtures.TestBase):
"""test the various 'collection' types that they flip the 'hashable' flag
appropriately. [ticket:3499]"""
__only_on__ = "postgresql"
@testing.combinations(
(
"ARRAY",
postgresql.ARRAY(Text()),
[["a", "b", "c"], ["d", "e", "f"]],
),
(
"JSON",
postgresql.JSON(),
[
{"a": "1", "b": "2", "c": "3"},
{
"d": "4",
"e": {"e1": "5", "e2": "6"},
"f": {"f1": [9, 10, 11]},
},
],
),
id_="iaa",
)
@testing.provide_metadata
def test_hashable_flag(self, type_, data):
Base = declarative_base(metadata=self.metadata)
class A(Base):
__tablename__ = "a1"
id = Column(Integer, primary_key=True)
data = Column(type_)
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add_all([A(data=elem) for elem in data])
s.commit()
eq_(
[
(obj.A.id, obj.data)
for obj in s.query(A, A.data).order_by(A.id)
],
list(enumerate(data, 1)),
)
@testing.requires.hstore
def test_hstore(self):
self.test_hashable_flag(
postgresql.HSTORE(),
[{"a": "1", "b": "2", "c": "3"}, {"d": "4", "e": "5", "f": "6"}],
)
@testing.requires.postgresql_jsonb
def test_jsonb(self):
self.test_hashable_flag(
postgresql.JSONB(),
[
{"a": "1", "b": "2", "c": "3"},
{
"d": "4",
"e": {"e1": "5", "e2": "6"},
"f": {"f1": [9, 10, 11]},
},
],
)
class TimestampTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "postgresql"
__backend__ = True
def test_timestamp(self, connection):
s = select([text("timestamp '2007-12-25'")])
result = connection.execute(s).first()
eq_(result[0], datetime.datetime(2007, 12, 25, 0, 0))
def test_interval_arithmetic(self, connection):
# basically testing that we get timedelta back for an INTERVAL
# result. more of a driver assertion.
s = select([text("timestamp '2007-12-25' - timestamp '2007-11-15'")])
result = connection.execute(s).first()
eq_(result[0], datetime.timedelta(40))
def test_interval_coercion(self):
expr = column("bar", postgresql.INTERVAL) + column("foo", types.Date)
eq_(expr.type._type_affinity, types.DateTime)
expr = column("bar", postgresql.INTERVAL) * column(
"foo", types.Numeric
)
eq_(expr.type._type_affinity, types.Interval)
assert isinstance(expr.type, postgresql.INTERVAL)
class SpecialTypesCompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "postgresql"
@testing.combinations(
(postgresql.BIT(), "BIT(1)"),
(postgresql.BIT(5), "BIT(5)"),
(postgresql.BIT(varying=True), "BIT VARYING"),
(postgresql.BIT(5, varying=True), "BIT VARYING(5)"),
)
def test_bit_compile(self, type_, expected):
self.assert_compile(type_, expected)
class SpecialTypesTest(fixtures.TablesTest, ComparesTables):
"""test DDL and reflection of PG-specific types """
__only_on__ = ("postgresql >= 8.3.0",)
__backend__ = True
@testing.metadata_fixture()
def special_types_table(self, metadata):
# create these types so that we can issue
# special SQL92 INTERVAL syntax
class y2m(types.UserDefinedType, postgresql.INTERVAL):
def get_col_spec(self):
return "INTERVAL YEAR TO MONTH"
class d2s(types.UserDefinedType, postgresql.INTERVAL):
def get_col_spec(self):
return "INTERVAL DAY TO SECOND"
table = Table(
"sometable",
metadata,
Column("id", postgresql.UUID, primary_key=True),
Column("flag", postgresql.BIT),
Column("bitstring", postgresql.BIT(4)),
Column("addr", postgresql.INET),
Column("addr2", postgresql.MACADDR),
Column("price", postgresql.MONEY),
Column("addr3", postgresql.CIDR),
Column("doubleprec", postgresql.DOUBLE_PRECISION),
Column("plain_interval", postgresql.INTERVAL),
Column("year_interval", y2m()),
Column("month_interval", d2s()),
Column("precision_interval", postgresql.INTERVAL(precision=3)),
Column("tsvector_document", postgresql.TSVECTOR),
)
return table
def test_reflection(self, special_types_table):
# cheat so that the "strict type check"
# works
special_types_table.c.year_interval.type = postgresql.INTERVAL()
special_types_table.c.month_interval.type = postgresql.INTERVAL()
m = MetaData(testing.db)
t = Table("sometable", m, autoload=True)
self.assert_tables_equal(special_types_table, t, strict_types=True)
assert t.c.plain_interval.type.precision is None
assert t.c.precision_interval.type.precision == 3
assert t.c.bitstring.type.length == 4
@testing.provide_metadata
def test_tsvector_round_trip(self, connection):
t = Table("t1", self.metadata, Column("data", postgresql.TSVECTOR))
t.create()
connection.execute(t.insert(), data="a fat cat sat")
eq_(connection.scalar(select([t.c.data])), "'a' 'cat' 'fat' 'sat'")
connection.execute(t.update(), data="'a' 'cat' 'fat' 'mat' 'sat'")
eq_(
connection.scalar(select([t.c.data])),
"'a' 'cat' 'fat' 'mat' 'sat'",
)
@testing.provide_metadata
def test_bit_reflection(self):
metadata = self.metadata
t1 = Table(
"t1",
metadata,
Column("bit1", postgresql.BIT()),
Column("bit5", postgresql.BIT(5)),
Column("bitvarying", postgresql.BIT(varying=True)),
Column("bitvarying5", postgresql.BIT(5, varying=True)),
)
t1.create()
m2 = MetaData(testing.db)
t2 = Table("t1", m2, autoload=True)
eq_(t2.c.bit1.type.length, 1)
eq_(t2.c.bit1.type.varying, False)
eq_(t2.c.bit5.type.length, 5)
eq_(t2.c.bit5.type.varying, False)
eq_(t2.c.bitvarying.type.length, None)
eq_(t2.c.bitvarying.type.varying, True)
eq_(t2.c.bitvarying5.type.length, 5)
eq_(t2.c.bitvarying5.type.varying, True)
class UUIDTest(fixtures.TestBase):
"""Test the bind/return values of the UUID type."""
__only_on__ = "postgresql >= 8.3"
__backend__ = True
@testing.combinations(
(
"not_as_uuid",
postgresql.UUID(as_uuid=False),
str(uuid.uuid4()),
str(uuid.uuid4()),
),
("as_uuid", postgresql.UUID(as_uuid=True), uuid.uuid4(), uuid.uuid4()),
id_="iaaa",
argnames="datatype, value1, value2",
)
def test_round_trip(self, datatype, value1, value2, connection):
utable = Table("utable", MetaData(), Column("data", datatype))
utable.create(connection)
connection.execute(utable.insert(), {"data": value1})
connection.execute(utable.insert(), {"data": value2})
r = connection.execute(
select([utable.c.data]).where(utable.c.data != value1)
)
eq_(r.fetchone()[0], value2)
eq_(r.fetchone(), None)
@testing.combinations(
(
"as_uuid",
postgresql.ARRAY(postgresql.UUID(as_uuid=True)),
[uuid.uuid4(), uuid.uuid4()],
[uuid.uuid4(), uuid.uuid4()],
),
(
"not_as_uuid",
postgresql.ARRAY(postgresql.UUID(as_uuid=False)),
[str(uuid.uuid4()), str(uuid.uuid4())],
[str(uuid.uuid4()), str(uuid.uuid4())],
),
id_="iaaa",
argnames="datatype, value1, value2",
)
@testing.fails_on("postgresql+pg8000", "No support for UUID with ARRAY")
def test_uuid_array(self, datatype, value1, value2, connection):
self.test_round_trip(datatype, value1, value2, connection)
def test_no_uuid_available(self):
uuid_type = base._python_UUID
base._python_UUID = None
try:
assert_raises(NotImplementedError, postgresql.UUID, as_uuid=True)
finally:
base._python_UUID = uuid_type
class HStoreTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
def setup(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("hash", HSTORE),
)
self.hashcol = self.test_table.c.hash
def _test_where(self, whereclause, expected):
stmt = select([self.test_table]).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.hash FROM test_table "
"WHERE %s" % expected,
)
def test_bind_serialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"',
)
def test_bind_serialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(proc({'\\"a': '\\"1'}), '"\\\\\\"a"=>"\\\\\\"1"')
def test_parse_error(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
assert_raises_message(
ValueError,
r"""After u?'\[\.\.\.\], "key1"=>"value1", ', could not parse """
r"""residual at position 36: u?'crapcrapcrap, "key3"\[\.\.\.\]""",
proc,
'"key2"=>"value2", "key1"=>"value1", '
'crapcrapcrap, "key3"=>"value3"',
)
def test_result_deserialize_default(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"},
)
def test_result_deserialize_with_slashes_and_quotes(self):
dialect = postgresql.dialect()
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(proc('"\\\\\\"a"=>"\\\\\\"1"'), {'\\"a': '\\"1'})
def test_bind_serialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_bind_processor(dialect)
eq_(
proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])),
'"key1"=>"value1", "key2"=>"value2"',
)
def test_result_deserialize_psycopg2(self):
from sqlalchemy.dialects.postgresql import psycopg2
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = True
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
is_(proc, None)
dialect = psycopg2.PGDialect_psycopg2()
dialect._has_native_hstore = False
proc = self.test_table.c.hash.type._cached_result_processor(
dialect, None
)
eq_(
proc('"key2"=>"value2", "key1"=>"value1"'),
{"key1": "value1", "key2": "value2"},
)
def test_ret_type_text(self):
col = column("x", HSTORE())
is_(col["foo"].type.__class__, Text)
def test_ret_type_custom(self):
class MyType(types.UserDefinedType):
pass
col = column("x", HSTORE(text_type=MyType))
is_(col["foo"].type.__class__, MyType)
def test_where_has_key(self):
self._test_where(
# hide from 2to3
getattr(self.hashcol, "has_key")("foo"),
"test_table.hash ? %(hash_1)s",
)
def test_where_has_all(self):
self._test_where(
self.hashcol.has_all(postgresql.array(["1", "2"])),
"test_table.hash ?& ARRAY[%(param_1)s, %(param_2)s]",
)
def test_where_has_any(self):
self._test_where(
self.hashcol.has_any(postgresql.array(["1", "2"])),
"test_table.hash ?| ARRAY[%(param_1)s, %(param_2)s]",
)
def test_where_defined(self):
self._test_where(
self.hashcol.defined("foo"),
"defined(test_table.hash, %(defined_1)s)",
)
def test_where_contains(self):
self._test_where(
self.hashcol.contains({"foo": "1"}),
"test_table.hash @> %(hash_1)s",
)
def test_where_contained_by(self):
self._test_where(
self.hashcol.contained_by({"foo": "1", "bar": None}),
"test_table.hash <@ %(hash_1)s",
)
def test_where_getitem(self):
self._test_where(
self.hashcol["bar"] == None, # noqa
"(test_table.hash -> %(hash_1)s) IS NULL",
)
@testing.combinations(
(
lambda self: self.hashcol["foo"],
"test_table.hash -> %(hash_1)s AS anon_1",
True,
),
(
lambda self: self.hashcol.delete("foo"),
"delete(test_table.hash, %(delete_2)s) AS delete_1",
True,
),
(
lambda self: self.hashcol.delete(postgresql.array(["foo", "bar"])),
(
"delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS delete_1"
),
True,
),
(
lambda self: self.hashcol.delete(hstore("1", "2")),
(
"delete(test_table.hash, hstore(%(hstore_1)s, %(hstore_2)s)) "
"AS delete_1"
),
True,
),
(
lambda self: self.hashcol.slice(postgresql.array(["1", "2"])),
(
"slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) "
"AS slice_1"
),
True,
),
(
lambda self: hstore("foo", "3")["foo"],
"hstore(%(hstore_1)s, %(hstore_2)s) -> %(hstore_3)s AS anon_1",
False,
),
(
lambda self: hstore(
postgresql.array(["1", "2"]), postgresql.array(["3", None])
)["1"],
(
"hstore(ARRAY[%(param_1)s, %(param_2)s], "
"ARRAY[%(param_3)s, NULL]) -> %(hstore_1)s AS anon_1"
),
False,
),
(
lambda self: hstore(postgresql.array(["1", "2", "3", None]))["3"],
(
"hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]) "
"-> %(hstore_1)s AS anon_1"
),
False,
),
(
lambda self: self.hashcol.concat(
hstore(cast(self.test_table.c.id, Text), "3")
),
(
"test_table.hash || hstore(CAST(test_table.id AS TEXT), "
"%(hstore_1)s) AS anon_1"
),
True,
),
(
lambda self: hstore("foo", "bar") + self.hashcol,
"hstore(%(hstore_1)s, %(hstore_2)s) || test_table.hash AS anon_1",
True,
),
(
lambda self: (self.hashcol + self.hashcol)["foo"],
"(test_table.hash || test_table.hash) -> %(param_1)s AS anon_1",
True,
),
(
lambda self: self.hashcol["foo"] != None, # noqa
"(test_table.hash -> %(hash_1)s) IS NOT NULL AS anon_1",
True,
),
(
# hide from 2to3
lambda self: getattr(self.hashcol, "keys")(),
"akeys(test_table.hash) AS akeys_1",
True,
),
(
lambda self: self.hashcol.vals(),
"avals(test_table.hash) AS avals_1",
True,
),
(
lambda self: self.hashcol.array(),
"hstore_to_array(test_table.hash) AS hstore_to_array_1",
True,
),
(
lambda self: self.hashcol.matrix(),
"hstore_to_matrix(test_table.hash) AS hstore_to_matrix_1",
True,
),
)
def test_cols(self, colclause_fn, expected, from_):
colclause = colclause_fn(self)
stmt = select([colclause])
self.assert_compile(
stmt,
("SELECT %s" + (" FROM test_table" if from_ else "")) % expected,
)
class HStoreRoundTripTest(fixtures.TablesTest):
__requires__ = ("hstore",)
__dialect__ = "postgresql"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", HSTORE),
)
def _fixture_data(self, engine):
data_table = self.tables.data_table
with engine.begin() as conn:
conn.execute(
data_table.insert(),
{"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}},
{"name": "r2", "data": {"k1": "r2v1", "k2": "r2v2"}},
{"name": "r3", "data": {"k1": "r3v1", "k2": "r3v2"}},
{"name": "r4", "data": {"k1": "r4v1", "k2": "r4v2"}},
{"name": "r5", "data": {"k1": "r5v1", "k2": "r5v2"}},
)
def _assert_data(self, compare, conn):
data = conn.execute(
select([self.tables.data_table.c.data]).order_by(
self.tables.data_table.c.name
)
).fetchall()
eq_([d for d, in data], compare)
def _test_insert(self, engine):
with engine.begin() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}},
)
self._assert_data([{"k1": "r1v1", "k2": "r1v2"}], conn)
def _non_native_engine(self):
if testing.requires.psycopg2_native_hstore.enabled:
engine = engines.testing_engine(
options=dict(use_native_hstore=False)
)
else:
engine = testing.db
engine.connect().close()
return engine
def test_reflect(self):
insp = inspect(testing.db)
cols = insp.get_columns("data_table")
assert isinstance(cols[2]["type"], HSTORE)
def test_literal_round_trip(self, connection):
# in particular, this tests that the array index
# operator against the function is handled by PG; with some
# array functions it requires outer parenthezisation on the left and
# we may not be doing that here
expr = hstore(
postgresql.array(["1", "2"]), postgresql.array(["3", None])
)["1"]
eq_(connection.scalar(select([expr])), "3")
@testing.requires.psycopg2_native_hstore
def test_insert_native(self):
engine = testing.db
self._test_insert(engine)
def test_insert_python(self):
engine = self._non_native_engine()
self._test_insert(engine)
@testing.requires.psycopg2_native_hstore
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
self._test_criterion(engine)
def test_criterion_python(self):
engine = self._non_native_engine()
self._fixture_data(engine)
self._test_criterion(engine)
def _test_criterion(self, engine):
data_table = self.tables.data_table
with engine.begin() as conn:
result = conn.execute(
select([data_table.c.data]).where(
data_table.c.data["k1"] == "r3v1"
)
).first()
eq_(result, ({"k1": "r3v1", "k2": "r3v2"},))
def _test_fixed_round_trip(self, engine):
with engine.begin() as conn:
s = select(
[
hstore(
array(["key1", "key2", "key3"]),
array(["value1", "value2", "value3"]),
)
]
)
eq_(
conn.scalar(s),
{"key1": "value1", "key2": "value2", "key3": "value3"},
)
def test_fixed_round_trip_python(self):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
def _test_unicode_round_trip(self, engine):
with engine.begin() as conn:
s = select(
[
hstore(
array(
[
util.u("réveillé"),
util.u("drôle"),
util.u("S’il"),
]
),
array(
[
util.u("réveillé"),
util.u("drôle"),
util.u("S’il"),
]
),
)
]
)
eq_(
conn.scalar(s),
{
util.u("réveillé"): util.u("réveillé"),
util.u("drôle"): util.u("drôle"),
util.u("S’il"): util.u("S’il"),
},
)
@testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_python(self):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
def test_escaped_quotes_round_trip_python(self):
engine = self._non_native_engine()
self._test_escaped_quotes_round_trip(engine)
@testing.requires.psycopg2_native_hstore
def test_escaped_quotes_round_trip_native(self):
engine = testing.db
self._test_escaped_quotes_round_trip(engine)
def _test_escaped_quotes_round_trip(self, engine):
with engine.begin() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": {r"key \"foo\"": r'value \"bar"\ xyz'}},
)
self._assert_data([{r"key \"foo\"": r'value \"bar"\ xyz'}], conn)
def test_orm_round_trip(self):
from sqlalchemy import orm
class Data(object):
def __init__(self, name, data):
self.name = name
self.data = data
orm.mapper(Data, self.tables.data_table)
s = orm.Session(testing.db)
d = Data(
name="r1",
data={"key1": "value1", "key2": "value2", "key3": "value3"},
)
s.add(d)
eq_(s.query(Data.data, Data).all(), [(d.data, d)])
class _RangeTypeCompilation(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
# operator tests
@classmethod
def setup_class(cls):
table = Table(
"data_table",
MetaData(),
Column("range", cls._col_type, primary_key=True),
)
cls.col = table.c.range
def _test_clause(self, colclause, expected):
self.assert_compile(colclause, expected)
def test_where_equal(self):
self._test_clause(
self.col == self._data_str, "data_table.range = %(range_1)s"
)
def test_where_not_equal(self):
self._test_clause(
self.col != self._data_str, "data_table.range <> %(range_1)s"
)
def test_where_is_null(self):
self._test_clause(self.col == None, "data_table.range IS NULL")
def test_where_is_not_null(self):
self._test_clause(self.col != None, "data_table.range IS NOT NULL")
def test_where_less_than(self):
self._test_clause(
self.col < self._data_str, "data_table.range < %(range_1)s"
)
def test_where_greater_than(self):
self._test_clause(
self.col > self._data_str, "data_table.range > %(range_1)s"
)
def test_where_less_than_or_equal(self):
self._test_clause(
self.col <= self._data_str, "data_table.range <= %(range_1)s"
)
def test_where_greater_than_or_equal(self):
self._test_clause(
self.col >= self._data_str, "data_table.range >= %(range_1)s"
)
def test_contains(self):
self._test_clause(
self.col.contains(self._data_str),
"data_table.range @> %(range_1)s",
)
def test_contained_by(self):
self._test_clause(
self.col.contained_by(self._data_str),
"data_table.range <@ %(range_1)s",
)
def test_overlaps(self):
self._test_clause(
self.col.overlaps(self._data_str),
"data_table.range && %(range_1)s",
)
def test_strictly_left_of(self):
self._test_clause(
self.col << self._data_str, "data_table.range << %(range_1)s"
)
self._test_clause(
self.col.strictly_left_of(self._data_str),
"data_table.range << %(range_1)s",
)
def test_strictly_right_of(self):
self._test_clause(
self.col >> self._data_str, "data_table.range >> %(range_1)s"
)
self._test_clause(
self.col.strictly_right_of(self._data_str),
"data_table.range >> %(range_1)s",
)
def test_not_extend_right_of(self):
self._test_clause(
self.col.not_extend_right_of(self._data_str),
"data_table.range &< %(range_1)s",
)
def test_not_extend_left_of(self):
self._test_clause(
self.col.not_extend_left_of(self._data_str),
"data_table.range &> %(range_1)s",
)
def test_adjacent_to(self):
self._test_clause(
self.col.adjacent_to(self._data_str),
"data_table.range -|- %(range_1)s",
)
def test_union(self):
self._test_clause(
self.col + self.col, "data_table.range + data_table.range"
)
def test_intersection(self):
self._test_clause(
self.col * self.col, "data_table.range * data_table.range"
)
def test_different(self):
self._test_clause(
self.col - self.col, "data_table.range - data_table.range"
)
class _RangeTypeRoundTrip(fixtures.TablesTest):
__requires__ = "range_types", "psycopg2_compatibility"
__backend__ = True
def extras(self):
# done this way so we don't get ImportErrors with
# older psycopg2 versions.
if testing.against("postgresql+psycopg2cffi"):
from psycopg2cffi import extras
else:
from psycopg2 import extras
return extras
@classmethod
def define_tables(cls, metadata):
# no reason ranges shouldn't be primary keys,
# so lets just use them as such
table = Table(
"data_table",
metadata,
Column("range", cls._col_type, primary_key=True),
)
cls.col = table.c.range
def test_actual_type(self):
eq_(str(self._col_type()), self._col_str)
def test_reflect(self):
from sqlalchemy import inspect
insp = inspect(testing.db)
cols = insp.get_columns("data_table")
assert isinstance(cols[0]["type"], self._col_type)
def _assert_data(self, conn):
data = conn.execute(
select([self.tables.data_table.c.range])
).fetchall()
eq_(data, [(self._data_obj(),)])
def test_insert_obj(self, connection):
connection.execute(
self.tables.data_table.insert(), {"range": self._data_obj()}
)
self._assert_data(connection)
def test_insert_text(self, connection):
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str}
)
self._assert_data(connection)
def test_union_result(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select([range_ + range_])).fetchall()
eq_(data, [(self._data_obj(),)])
def test_intersection_result(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select([range_ * range_])).fetchall()
eq_(data, [(self._data_obj(),)])
def test_difference_result(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select([range_ - range_])).fetchall()
eq_(data, [(self._data_obj().__class__(empty=True),)])
class _Int4RangeTests(object):
_col_type = INT4RANGE
_col_str = "INT4RANGE"
_data_str = "[1,2)"
def _data_obj(self):
return self.extras().NumericRange(1, 2)
class _Int8RangeTests(object):
_col_type = INT8RANGE
_col_str = "INT8RANGE"
_data_str = "[9223372036854775806,9223372036854775807)"
def _data_obj(self):
return self.extras().NumericRange(
9223372036854775806, 9223372036854775807
)
class _NumRangeTests(object):
_col_type = NUMRANGE
_col_str = "NUMRANGE"
_data_str = "[1.0,2.0)"
def _data_obj(self):
return self.extras().NumericRange(
decimal.Decimal("1.0"), decimal.Decimal("2.0")
)
class _DateRangeTests(object):
_col_type = DATERANGE
_col_str = "DATERANGE"
_data_str = "[2013-03-23,2013-03-24)"
def _data_obj(self):
return self.extras().DateRange(
datetime.date(2013, 3, 23), datetime.date(2013, 3, 24)
)
class _DateTimeRangeTests(object):
_col_type = TSRANGE
_col_str = "TSRANGE"
_data_str = "[2013-03-23 14:30,2013-03-23 23:30)"
def _data_obj(self):
return self.extras().DateTimeRange(
datetime.datetime(2013, 3, 23, 14, 30),
datetime.datetime(2013, 3, 23, 23, 30),
)
class _DateTimeTZRangeTests(object):
_col_type = TSTZRANGE
_col_str = "TSTZRANGE"
# make sure we use one, steady timestamp with timezone pair
# for all parts of all these tests
_tstzs = None
def tstzs(self):
if self._tstzs is None:
with testing.db.begin() as conn:
lower = conn.scalar(func.current_timestamp().select())
upper = lower + datetime.timedelta(1)
self._tstzs = (lower, upper)
return self._tstzs
@property
def _data_str(self):
return "[%s,%s)" % self.tstzs()
def _data_obj(self):
return self.extras().DateTimeTZRange(*self.tstzs())
class Int4RangeCompilationTest(_Int4RangeTests, _RangeTypeCompilation):
pass
class Int4RangeRoundTripTest(_Int4RangeTests, _RangeTypeRoundTrip):
pass
class Int8RangeCompilationTest(_Int8RangeTests, _RangeTypeCompilation):
pass
class Int8RangeRoundTripTest(_Int8RangeTests, _RangeTypeRoundTrip):
pass
class NumRangeCompilationTest(_NumRangeTests, _RangeTypeCompilation):
pass
class NumRangeRoundTripTest(_NumRangeTests, _RangeTypeRoundTrip):
pass
class DateRangeCompilationTest(_DateRangeTests, _RangeTypeCompilation):
pass
class DateRangeRoundTripTest(_DateRangeTests, _RangeTypeRoundTrip):
pass
class DateTimeRangeCompilationTest(_DateTimeRangeTests, _RangeTypeCompilation):
pass
class DateTimeRangeRoundTripTest(_DateTimeRangeTests, _RangeTypeRoundTrip):
pass
class DateTimeTZRangeCompilationTest(
_DateTimeTZRangeTests, _RangeTypeCompilation
):
pass
class DateTimeTZRangeRoundTripTest(_DateTimeTZRangeTests, _RangeTypeRoundTrip):
pass
class JSONTest(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
def setup(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("test_column", JSON),
)
self.jsoncol = self.test_table.c.test_column
@testing.combinations(
(
lambda self: self.jsoncol["bar"] == None, # noqa
"(test_table.test_column -> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol[("foo", 1)] == None, # noqa
"(test_table.test_column #> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol["bar"].astext == None, # noqa
"(test_table.test_column ->> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol["bar"].astext.cast(Integer) == 5,
"CAST((test_table.test_column ->> %(test_column_1)s) AS INTEGER) "
"= %(param_1)s",
),
(
lambda self: self.jsoncol["bar"].cast(Integer) == 5,
"CAST((test_table.test_column -> %(test_column_1)s) AS INTEGER) "
"= %(param_1)s",
),
(
lambda self: self.jsoncol[("foo", 1)].astext == None, # noqa
"(test_table.test_column #>> %(test_column_1)s) IS NULL",
),
)
def test_where(self, whereclause_fn, expected):
whereclause = whereclause_fn(self)
stmt = select([self.test_table]).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.test_column FROM test_table "
"WHERE %s" % expected,
)
def test_path_typing(self):
col = column("x", JSON())
is_(col["q"].type._type_affinity, types.JSON)
is_(col[("q",)].type._type_affinity, types.JSON)
is_(col["q"]["p"].type._type_affinity, types.JSON)
is_(col[("q", "p")].type._type_affinity, types.JSON)
def test_custom_astext_type(self):
class MyType(types.UserDefinedType):
pass
col = column("x", JSON(astext_type=MyType))
is_(col["q"].astext.type.__class__, MyType)
is_(col[("q", "p")].astext.type.__class__, MyType)
is_(col["q"]["p"].astext.type.__class__, MyType)
@testing.combinations(
(
lambda self: self.jsoncol["foo"],
"test_table.test_column -> %(test_column_1)s AS anon_1",
True,
)
)
def test_cols(self, colclause_fn, expected, from_):
colclause = colclause_fn(self)
stmt = select([colclause])
self.assert_compile(
stmt,
("SELECT %s" + (" FROM test_table" if from_ else "")) % expected,
)
class JSONRoundTripTest(fixtures.TablesTest):
__only_on__ = ("postgresql >= 9.3",)
__backend__ = True
test_type = JSON
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", cls.test_type),
Column("nulldata", cls.test_type(none_as_null=True)),
)
def _fixture_data(self, engine):
data_table = self.tables.data_table
with engine.begin() as conn:
conn.execute(
data_table.insert(),
{"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}},
{"name": "r2", "data": {"k1": "r2v1", "k2": "r2v2"}},
{"name": "r3", "data": {"k1": "r3v1", "k2": "r3v2"}},
{"name": "r4", "data": {"k1": "r4v1", "k2": "r4v2"}},
{"name": "r5", "data": {"k1": "r5v1", "k2": "r5v2", "k3": 5}},
{"name": "r6", "data": {"k1": {"r6v1": {"subr": [1, 2, 3]}}}},
)
def _assert_data(self, compare, conn, column="data"):
col = self.tables.data_table.c[column]
data = conn.execute(
select([col]).order_by(self.tables.data_table.c.name)
).fetchall()
eq_([d for d, in data], compare)
def _assert_column_is_NULL(self, conn, column="data"):
col = self.tables.data_table.c[column]
data = conn.execute(select([col]).where(col.is_(null()))).fetchall()
eq_([d for d, in data], [None])
def _assert_column_is_JSON_NULL(self, conn, column="data"):
col = self.tables.data_table.c[column]
data = conn.execute(
select([col]).where(cast(col, String) == "null")
).fetchall()
eq_([d for d, in data], [None])
def _test_insert(self, engine):
with engine.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}},
)
self._assert_data([{"k1": "r1v1", "k2": "r1v2"}], conn)
def _test_insert_nulls(self, engine):
with engine.connect() as conn:
conn.execute(
self.tables.data_table.insert(), {"name": "r1", "data": null()}
)
self._assert_data([None], conn)
def _test_insert_none_as_null(self, engine):
with engine.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "nulldata": None},
)
self._assert_column_is_NULL(conn, column="nulldata")
def _test_insert_nulljson_into_none_as_null(self, engine):
with engine.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "nulldata": JSON.NULL},
)
self._assert_column_is_JSON_NULL(conn, column="nulldata")
def _non_native_engine(self, json_serializer=None, json_deserializer=None):
if json_serializer is not None or json_deserializer is not None:
options = {
"json_serializer": json_serializer,
"json_deserializer": json_deserializer,
}
else:
options = {}
if testing.against(
"postgresql+psycopg2"
) and testing.db.dialect.psycopg2_version >= (2, 5):
from psycopg2.extras import register_default_json
engine = engines.testing_engine(options=options)
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
engine.dialect._has_native_json = False
def pass_(value):
return value
register_default_json(dbapi_connection, loads=pass_)
elif options:
engine = engines.testing_engine(options=options)
else:
engine = testing.db
engine.connect().close()
return engine
def test_reflect(self):
insp = inspect(testing.db)
cols = insp.get_columns("data_table")
assert isinstance(cols[2]["type"], self.test_type)
@testing.requires.psycopg2_native_json
def test_insert_native(self, connection):
self._test_insert(connection)
@testing.requires.psycopg2_native_json
def test_insert_native_nulls(self, connection):
self._test_insert_nulls(connection)
@testing.requires.psycopg2_native_json
def test_insert_native_none_as_null(self, connection):
self._test_insert_none_as_null(connection)
@testing.requires.psycopg2_native_json
def test_insert_native_nulljson_into_none_as_null(self, connection):
self._test_insert_nulljson_into_none_as_null(connection)
def test_insert_python(self):
engine = self._non_native_engine()
self._test_insert(engine)
def test_insert_python_nulls(self):
engine = self._non_native_engine()
self._test_insert_nulls(engine)
def test_insert_python_none_as_null(self):
engine = self._non_native_engine()
self._test_insert_none_as_null(engine)
def test_insert_python_nulljson_into_none_as_null(self):
engine = self._non_native_engine()
self._test_insert_nulljson_into_none_as_null(engine)
def _test_custom_serialize_deserialize(self, native):
import json
def loads(value):
value = json.loads(value)
value["x"] = value["x"] + "_loads"
return value
def dumps(value):
value = dict(value)
value["x"] = "dumps_y"
return json.dumps(value)
if native:
engine = engines.testing_engine(
options=dict(json_serializer=dumps, json_deserializer=loads)
)
else:
engine = self._non_native_engine(
json_serializer=dumps, json_deserializer=loads
)
s = select([cast({"key": "value", "x": "q"}, self.test_type)])
with engine.begin() as conn:
eq_(conn.scalar(s), {"key": "value", "x": "dumps_y_loads"})
@testing.requires.psycopg2_native_json
def test_custom_native(self):
self._test_custom_serialize_deserialize(True)
@testing.requires.psycopg2_native_json
def test_custom_python(self):
self._test_custom_serialize_deserialize(False)
@testing.requires.psycopg2_native_json
def test_criterion_native(self):
engine = testing.db
self._fixture_data(engine)
self._test_criterion(engine)
def test_criterion_python(self):
engine = self._non_native_engine()
self._fixture_data(engine)
self._test_criterion(engine)
def test_path_query(self, connection):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = connection.execute(
select([data_table.c.name]).where(
data_table.c.data[("k1", "r6v1", "subr")].astext == "[1, 2, 3]"
)
)
eq_(result.scalar(), "r6")
@testing.fails_on(
"postgresql < 9.4", "Improvement in PostgreSQL behavior?"
)
def test_multi_index_query(self, connection):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = connection.execute(
select([data_table.c.name]).where(
data_table.c.data["k1"]["r6v1"]["subr"].astext == "[1, 2, 3]"
)
)
eq_(result.scalar(), "r6")
def test_query_returned_as_text(self, connection):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = connection.execute(
select([data_table.c.data["k1"].astext])
).first()
if engine.dialect.returns_unicode_strings:
assert isinstance(result[0], util.text_type)
else:
assert isinstance(result[0], util.string_types)
def test_query_returned_as_int(self, connection):
engine = testing.db
self._fixture_data(engine)
data_table = self.tables.data_table
result = connection.execute(
select([data_table.c.data["k3"].astext.cast(Integer)]).where(
data_table.c.name == "r5"
)
).first()
assert isinstance(result[0], int)
def _test_criterion(self, engine):
data_table = self.tables.data_table
with engine.begin() as conn:
result = conn.execute(
select([data_table.c.data]).where(
data_table.c.data["k1"].astext == "r3v1"
)
).first()
eq_(result, ({"k1": "r3v1", "k2": "r3v2"},))
result = conn.execute(
select([data_table.c.data]).where(
data_table.c.data["k1"].astext.cast(String) == "r3v1"
)
).first()
eq_(result, ({"k1": "r3v1", "k2": "r3v2"},))
def _test_fixed_round_trip(self, engine):
with engine.begin() as conn:
s = select(
[
cast(
{"key": "value", "key2": {"k1": "v1", "k2": "v2"}},
self.test_type,
)
]
)
eq_(
conn.scalar(s),
{"key": "value", "key2": {"k1": "v1", "k2": "v2"}},
)
def test_fixed_round_trip_python(self):
engine = self._non_native_engine()
self._test_fixed_round_trip(engine)
@testing.requires.psycopg2_native_json
def test_fixed_round_trip_native(self):
engine = testing.db
self._test_fixed_round_trip(engine)
def _test_unicode_round_trip(self, engine):
with engine.begin() as conn:
s = select(
[
cast(
{
util.u("réveillé"): util.u("réveillé"),
"data": {"k1": util.u("drôle")},
},
self.test_type,
)
]
)
eq_(
conn.scalar(s),
{
util.u("réveillé"): util.u("réveillé"),
"data": {"k1": util.u("drôle")},
},
)
def test_unicode_round_trip_python(self):
engine = self._non_native_engine()
self._test_unicode_round_trip(engine)
@testing.requires.psycopg2_native_json
def test_unicode_round_trip_native(self):
engine = testing.db
self._test_unicode_round_trip(engine)
def test_eval_none_flag_orm(self):
Base = declarative_base()
class Data(Base):
__table__ = self.tables.data_table
s = Session(testing.db)
d1 = Data(name="d1", data=None, nulldata=None)
s.add(d1)
s.commit()
s.bulk_insert_mappings(
Data, [{"name": "d2", "data": None, "nulldata": None}]
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d1")
.first(),
("null", None),
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d2")
.first(),
("null", None),
)
class JSONBTest(JSONTest):
def setup(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("test_column", JSONB),
)
self.jsoncol = self.test_table.c.test_column
@testing.combinations(
(
# hide from 2to3
lambda self: getattr(self.jsoncol, "has_key")("data"),
"test_table.test_column ? %(test_column_1)s",
),
(
lambda self: self.jsoncol.has_all(
{"name": "r1", "data": {"k1": "r1v1", "k2": "r1v2"}}
),
"test_table.test_column ?& %(test_column_1)s",
),
(
lambda self: self.jsoncol.has_any(
postgresql.array(["name", "data"])
),
"test_table.test_column ?| ARRAY[%(param_1)s, %(param_2)s]",
),
(
lambda self: self.jsoncol.contains({"k1": "r1v1"}),
"test_table.test_column @> %(test_column_1)s",
),
(
lambda self: self.jsoncol.contained_by({"foo": "1", "bar": None}),
"test_table.test_column <@ %(test_column_1)s",
),
)
def test_where(self, whereclause_fn, expected):
super(JSONBTest, self).test_where(whereclause_fn, expected)
class JSONBRoundTripTest(JSONRoundTripTest):
__requires__ = ("postgresql_jsonb",)
test_type = JSONB
@testing.requires.postgresql_utf8_server_encoding
def test_unicode_round_trip_python(self):
super(JSONBRoundTripTest, self).test_unicode_round_trip_python()
@testing.requires.postgresql_utf8_server_encoding
def test_unicode_round_trip_native(self):
super(JSONBRoundTripTest, self).test_unicode_round_trip_native()
class JSONBSuiteTest(suite.JSONTest):
__requires__ = ("postgresql_jsonb",)
datatype = JSONB
class JSONBCastSuiteTest(suite.JSONStringCastIndexTest):
__requires__ = ("postgresql_jsonb",)
datatype = JSONB
| {
"content_hash": "a8f58c752cd6486d0fde4cb74077904a",
"timestamp": "",
"source": "github",
"line_count": 3293,
"max_line_length": 79,
"avg_line_length": 32.03249316732463,
"alnum_prop": 0.5176568736194458,
"repo_name": "graingert/sqlalchemy",
"id": "1903e47d48e99205cc70ee957e57934d564dce4c",
"size": "105553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/dialect/postgresql/test_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49149"
},
{
"name": "Python",
"bytes": "11845913"
}
],
"symlink_target": ""
} |
""" guilloches, following. observe how detail grows and your cpu melts.
move mouse horizontally and vertically to change parameters
http://ministryoftype.co.uk/words/article/guilloches/
TODO - this is now brokeh, need to find how to get back canvas-like behavior
which wouldn't repaint at each frame
"""
from gi.repository import Gtk as gtk
from lib import graphics
import colorsys
import math
import cairo
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
self.set_double_buffered(False)
self.a = 1.4191403
self.b = -2.2841523
self.c = 2.4275403
self.d = -2.177196
self.points = 2000
self.x, self.y = 0,0
self.image = None
self.prev_width, self.prev_height = 0, 0
self.connect("on-enter-frame", self.on_enter_frame)
def on_enter_frame(self, scene, context):
g = graphics.Graphics(context)
if self.prev_width != self.width or self.prev_height != self.height:
self.x, self.y = 0,0
if self.x == 0 and self.y ==0:
g.fill_area(0,0, self.width, self.height, "#fff")
for i in range(1000):
self.x = math.sin(self.a * self.y) - math.cos(self.b * self.x)
self.y = math.sin(self.c * self.x) - math.cos(self.d * self.y)
x = int(self.x * self.width * 0.2 + self.width / 2)
y = int(self.y * self.height * 0.2 + self.height / 2)
g.rectangle(x, y, 1, 1)
g.fill("#000", 0.08)
self.prev_width, self.prev_height = self.width, self.height
self.redraw()
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_default_size(800, 500)
window.connect("delete_event", lambda *args: gtk.main_quit())
window.add(Scene())
window.show_all()
if __name__ == "__main__":
example = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
| {
"content_hash": "43d5e615d2227c8bbce67eba36066e72",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 28.690140845070424,
"alnum_prop": 0.5930289641629848,
"repo_name": "projecthamster/experiments",
"id": "dd3dc69734b7df4d4911ffb5c1e0452ae7bebb6c",
"size": "2140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strange_attractor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "734313"
}
],
"symlink_target": ""
} |
""" Temp file to generate mock data. """
import argparse
import pathlib
import random
from collections import defaultdict
from typing import DefaultDict, Dict, List
from graph_structures_pb2 import (
SLI,
Client,
Dependency,
Node,
NodeType,
SLIType,
UserJourney,
)
from . import server_utils
# define the mock data in a convenient format to generate protobufs
# service and endpoint names correspond 1:1
SERVICE_ENDPOINT_NAME_MAP: Dict[str, List[str]] = {
"APIServer": [
"StartGame",
"UpdateGameState",
],
"WebServer": ["GetProfilePage", "GetLeaderboardPage", "BuyCurrency"],
"GameService": [
"GetPlayerLocation",
"GetScore",
],
"LeaderboardService": [
"GetLeaderboard",
"SetUserHighScore",
"GetUserHighScore",
],
"ProfileService": [
"Authenticate",
"GetUserInfo",
],
"StoreService": ["VerifyPayment"],
"GameDB": [
"ReadHighScore",
"WriteHighScore",
],
"ProfileDB": [
"ReadFriendsList",
"WriteFriendsList",
],
"ExternalAuthProvider": [],
"ExternalPaymentProvider": [],
}
# define a dependency map from endpoint to its dependencies
# each tuple represents a dependency in the form (target_service_name, target_endpoint_name)
# note we use empty string instead of None to match the protobuf convention for "unset" fields
NODE_DEPENDENCY_MAP: DefaultDict[str, List[str]] = defaultdict(list)
NODE_DEPENDENCY_MAP.update(
{
"APIServer.StartGame": ["APIServer.UpdateGameState"],
"APIServer.UpdateGameState": [
"GameService.GetPlayerLocation",
"GameService.GetScore",
"LeaderboardService.SetUserHighScore",
],
"WebServer.GetLeaderboardPage": ["LeaderboardService.GetLeaderboard"],
"WebServer.GetProfilePage": [
"ProfileService.Authenticate",
"ProfileService.GetUserInfo",
],
"WebServer.BuyCurrency": [("StoreService.VerifyPayment")],
"LeaderboardService.GetLeaderboard": ["GameDB.ReadHighScore"],
"LeaderboardService.SetUserHighScore": ["GameDB.WriteHighScore"],
"LeaderboardService.GetUserHighScore": ["GameDB.ReadHighScore"],
"ProfileService.Authenticate": ["ExternalAuthProvider"],
"ProfileService.GetUserInfo": [
"LeaderboardService.GetUserHighScore",
"ProfileDB.ReadFriendsList",
],
# StoreService
"StoreService.VerifyPayment": ["ExternalPaymentProvider"],
}
)
# client names and user journeys correspond 1:1
CLIENT_USER_JOURNEY_NAME_MAP: Dict[str, List[str]] = {
"MobileClient": ["PlayGame"],
"WebBrowser": [
"ViewLeaderboard",
"ViewProfile",
"ConductMicrotransaction",
],
}
USER_JOURNEY_DEPENDENCY_MAP: Dict[str, List[str]] = {
"MobileClient.PlayGame": ["APIServer.StartGame"],
"WebBrowser.ViewLeaderboard": ["WebServer.GetLeaderboardPage"],
"WebBrowser.ViewProfile": ["WebServer.GetProfilePage"],
"WebBrowser.ConductMicrotransaction": ["WebServer.BuyCurrency"],
}
SLO_BOUNDS: Dict[str, float] = {
"slo_error_lower_bound": 0.1,
"slo_warn_lower_bound": 0.2,
"slo_warn_upper_bound": 0.8,
"slo_error_upper_bound": 0.9,
}
SLO_TARGET = 0.5
INTRA_STATUS_CHANGE_THRESHOLD = 0.03
def generate_nodes():
"""Generates mock service data used to test the UJT.
Returns: a list of Service protobufs.
"""
services = []
endpoints = []
for service_name, relative_endpoint_names in SERVICE_ENDPOINT_NAME_MAP.items():
service = Node(node_type=NodeType.NODETYPE_SERVICE, name=service_name)
fully_qualified_endpoint_names = [
f"{service.name}.{relative_endpoint_name}"
for relative_endpoint_name in relative_endpoint_names
]
service.child_names.extend(fully_qualified_endpoint_names)
service.slis.extend(
[
SLI(
node_name=service_name,
sli_value=random.random(),
slo_target=SLO_TARGET,
sli_type=SLIType.SLITYPE_AVAILABILITY,
intra_status_change_threshold=INTRA_STATUS_CHANGE_THRESHOLD,
**SLO_BOUNDS,
)
]
)
services.append(service)
for endpoint_name in fully_qualified_endpoint_names:
endpoint = Node(
node_type=NodeType.NODETYPE_ENDPOINT,
name=endpoint_name,
parent_name=service.name,
)
endpoint.dependencies.extend(
[
Dependency(target_name=target_name, source_name=endpoint_name)
for target_name in NODE_DEPENDENCY_MAP[endpoint_name]
]
)
endpoint.slis.extend(
[
SLI(
node_name=endpoint_name,
sli_value=random.random(),
slo_target=SLO_TARGET,
sli_type=SLIType.SLITYPE_LATENCY,
intra_status_change_threshold=INTRA_STATUS_CHANGE_THRESHOLD,
**SLO_BOUNDS,
)
]
)
endpoints.append(endpoint)
return services + endpoints
def generate_clients():
"""Generates the mock client data used to test the UJT.
Returns: A list of Client protobufs.
"""
clients = []
for (
client_name,
relative_user_journey_names,
) in CLIENT_USER_JOURNEY_NAME_MAP.items():
client = Client(name=client_name)
fully_qualified_user_journey_names = [
f"{client.name}.{relative_user_journey_name}"
for relative_user_journey_name in relative_user_journey_names
]
for user_journey_name in fully_qualified_user_journey_names:
user_journey = UserJourney(name=user_journey_name, client_name=client_name)
user_journey.dependencies.extend(
[
Dependency(
target_name=target_name,
source_name=user_journey_name,
toplevel=True,
)
for target_name in USER_JOURNEY_DEPENDENCY_MAP[user_journey_name]
]
)
client.user_journeys.extend([user_journey])
clients.append(client)
return clients
def save_mock_data(data_path_str: str = None):
"""Saves the mock data used to test the UJT to disk.
Args:
data_path_str: path to directory where mock data should be saved.
"""
proto_type_message_map = {
Node: generate_nodes(),
Client: generate_clients(),
}
if data_path_str is None:
data_path = pathlib.Path(__file__).parent / "data"
else:
data_path = pathlib.Path(data_path_str)
for proto_type, messages in proto_type_message_map.items():
for message in messages:
path = data_path / server_utils.named_proto_file_name(
message.name, proto_type
)
server_utils.write_proto_to_file(path, message)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"-o",
"--output-directory",
help="Path to directory to store mock data",
)
args = arg_parser.parse_args()
save_mock_data(data_path_str=args.output_directory)
| {
"content_hash": "9b3179489ac8ffbf3be0c6705a2be370",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 94,
"avg_line_length": 31.781512605042018,
"alnum_prop": 0.5896351136964569,
"repo_name": "googleinterns/userjourneytool",
"id": "87f621087c21ceaa8d4cb06fb17d21697a7319cf",
"size": "7564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ujt/server/generate_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "Python",
"bytes": "257132"
},
{
"name": "Shell",
"bytes": "442"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from arenas import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$',
views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/rate/$', views.RatingView.as_view(), name='rate'),
url(r'^(?P<pk>\d+)/add_player/$', 'arenas.views.add_player', name='add_player'),
url(r'^(?P<pk>\d+)/freeagents/$', 'arenas.views.freeagents', name='freeagents'),
url(r'^search/$', 'arenas.views.search', name='search')
)
| {
"content_hash": "62408ecfc5b0377e601c6189dc7197e0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 103,
"avg_line_length": 53.46153846153846,
"alnum_prop": 0.4719424460431655,
"repo_name": "ThrowsException/CLeagueHero",
"id": "d179e9ff32a77eeb83308340c1df7abcb1ae8b68",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arenas/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9297"
},
{
"name": "JavaScript",
"bytes": "1773"
},
{
"name": "Python",
"bytes": "34926"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
} |
"""
Author: Peter Zujko (@zujko)
Description: Handles views and endpoints for all profile related operations.
Date Created: Nov 7 2016
Updated: Feb 16 2018
"""
import logging
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Q
from django.http import HttpResponse, HttpResponseForbidden, JsonResponse
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST
from .models import Profile
from profile.models import GlobalAlert
logger = logging.getLogger("pawprints." + __name__)
CONFIG = settings.CONFIG
@login_required
def profile(request):
""" Handles displaying information about the user
and option to update their settings.
"""
profile = Profile.objects.get(user=request.user)
data_object = {
'first_name': profile.user.first_name,
'last_name': profile.user.last_name,
'email': profile.user.email,
'uid': profile.user.id,
'notification_settings': profile.notifications,
'petitions_created': profile.petitions_created.filter(~Q(status=2)),
'main_logo': CONFIG['main_logo'],
'analytics_id': settings.ANALYTICS,
'name': CONFIG['name'],
'generate_top_nav': CONFIG['generate_top_nav']
}
return render(request, 'profile.html', data_object)
@login_required
@user_passes_test(lambda u: u.is_superuser)
def admin(request):
"""
Handles displaying the staff managing panel.
User must be logged in and a superuser.
"""
profile = Profile.objects.get(user=request.user)
superusers = User.objects.filter(is_superuser=True)
superusers_id = superusers.values("id")
alert, created = GlobalAlert.objects.get_or_create(id=1, defaults={'active': 'False', 'content': 'Placeholder alert content.'})
report_user_profiles = Profile.objects.filter(notifications__reported=True).distinct("id")
report_users = [prof.user for prof in report_user_profiles]
threshold_user_profiles = Profile.objects.filter(notifications__threshold=True).distinct("id")
threshold_users = [prof.user for prof in threshold_user_profiles]
data_object = {
'superusers': superusers,
'staff': User.objects.filter(is_staff=True).exclude(id__in=superusers_id),
'all_users': User.objects.all(),
'main_logo': CONFIG['main_logo'],
'generate_top_nav': CONFIG['generate_top_nav'],
'analytics_id': settings.ANALYTICS,
'name': CONFIG['name'],
'alert': alert,
'reportUsers': report_users,
'thresholdUsers': threshold_users,
}
return render(request, 'admin.html', data_object)
# ENDPOINTS #
@require_POST
@login_required
def add_superuser(request, user_id):
if request.user.is_superuser:
if user_id is not None:
user = User.objects.get(id=int(user_id))
user.is_superuser = True
user.is_staff = True
user.save()
return HttpResponse(True)
return HttpResponseForbidden(False)
@require_POST
@login_required
def add_staff_member(request, user_id):
if request.user.is_superuser:
if user_id is not None:
user = User.objects.get(id=int(user_id))
user.is_staff = True
user.save()
return HttpResponse(True)
return HttpResponseForbidden(False)
@require_POST
@login_required
def remove_superuser(request, user_id):
if request.user.is_superuser:
if user_id is not None:
user = User.objects.get(id=int(user_id))
user.is_superuser = False
user.save()
return HttpResponse(True)
return HttpResponseForbidden(False)
@require_POST
@login_required
def remove_staff_member(request, user_id):
if request.user.is_superuser:
if user_id is not None:
user = User.objects.get(id=int(user_id))
user.is_staff = False
user.save()
return HttpResponse(True)
return HttpResponseForbidden(False)
@require_POST
@login_required
def update_alert(request):
if request.user.is_superuser:
post = request.POST
active = True if post.get('alert-active') == 'on' else False
content = post.get('alert-content')
alert, created = GlobalAlert.objects.get_or_create(id=1, defaults={'active': active, 'content': content})
alert.active = active
alert.content = content
alert.save()
return HttpResponse(True)
return HttpResponseForbidden(False)
@require_POST
@login_required
def update_notifications(request, user_id):
""" Handles updating a users
notification settings.
"""
if request.user.id != int(user_id):
return HttpResponse(False)
user = request.user
user.profile.notifications.update = True if "updates" in request.POST else False
user.profile.notifications.response = True if "response" in request.POST else False
user.save()
return HttpResponse(True)
@require_POST
@login_required
def update_staff_emailing(request, username):
""" Handles updating a users
notification settings.
"""
if request.user.is_superuser:
user = User.objects.get(username=username)
if user:
emailing_setting = request.POST.get('email-setting')
emailing_value = request.POST.get('email-value')
if emailing_setting == 'report':
user.profile.notifications.reported = True if emailing_value == 'true' else False
elif emailing_setting == 'threshold':
user.profile.notifications.threshold = True if emailing_value == 'true' else False
else:
return HttpResponse(False)
user.save()
return HttpResponse(True)
return HttpResponse(False)
@login_required
def user_logout(request):
""" Handles logging a user out
"""
logout(request)
url_next = request.GET.get('next', '/')
return redirect(url_next)
| {
"content_hash": "63a8ee3ca0c881429d5fa4c4ebf077bb",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 131,
"avg_line_length": 31.984455958549223,
"alnum_prop": 0.66337275230844,
"repo_name": "ritstudentgovernment/PawPrints",
"id": "abf94a4b68678940489ebf920896f69d5dea7ad1",
"size": "6173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profile/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "51771"
},
{
"name": "Dockerfile",
"bytes": "369"
},
{
"name": "HTML",
"bytes": "142080"
},
{
"name": "JavaScript",
"bytes": "118459"
},
{
"name": "Python",
"bytes": "140963"
},
{
"name": "Shell",
"bytes": "951"
}
],
"symlink_target": ""
} |
"""
This is a master vasp running script to perform various combinations of VASP
runs.
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__version__ = "0.5"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "12/31/13"
import logging
import sys
import yaml
from custodian.custodian import Custodian
from custodian.vasp.jobs import VaspJob
from pymatgen.io.vasp import VaspInput, Incar, Kpoints
def load_class(mod, name):
toks = name.split("?")
params = {}
if len(toks) == 2:
for p in toks[-1].split(","):
ptoks = p.split("=")
params[ptoks[0]] = yaml.load(ptoks[1])
elif len(toks) > 2:
print("Bad handler specification")
sys.exit(-1)
mod = __import__(mod, globals(), locals(), [toks[0]], 0)
return getattr(mod, toks[0])(**params)
def get_jobs(args):
#Returns a generator of jobs. Allows of "infinite" jobs.
vasp_command = args.command.split()
#save initial INCAR for rampU runs
n_ramp_u = args.jobs.count('rampU')
ramps = 0
if n_ramp_u:
incar = Incar.from_file('INCAR')
ldauu = incar['LDAUU']
ldauj = incar['LDAUJ']
njobs = len(args.jobs)
post_settings = [] # append to this list to have settings applied on next job
for i, job in enumerate(args.jobs):
final = False if i != njobs - 1 else True
if any(c.isdigit() for c in job):
suffix = "." + job
else:
suffix = ".{}{}".format(job, i + 1)
settings = post_settings
post_settings = []
backup = True if i == 0 else False
copy_magmom = False
vinput = VaspInput.from_directory(".")
if i > 0:
settings.append(
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}})
job_type = job.lower()
auto_npar = True
if job_type.startswith("static_derived"):
from pymatgen.io.vasp.sets import MPStaticSet
vis = MPStaticSet.from_prev_calc(
".", user_incar_settings={"LWAVE": True, "EDIFF": 1e-6},
ediff_per_atom=False)
settings.extend([
{"dict" : "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
if job_type.startswith("static_dielectric_derived"):
from pymatgen.io.vasp.sets import MPStaticSet, MPStaticDielectricDFPTVaspInputSet
# vis = MPStaticSet.from_prev_calc(
# ".", user_incar_settings={"EDIFF": 1e-6, "IBRION": 8,
# "LEPSILON": True, 'LREAL':False,
# "LPEAD": True, "ISMEAR": 0,
# "SIGMA": 0.01},
# ediff_per_atom=False)
vis = MPStaticDielectricDFPTVaspInputSet()
incar = vis.get_incar(vinput["POSCAR"].structure)
unset = {}
for k in ["NPAR", "KPOINT_BSE", "LAECHG", "LCHARG", "LVHAR",
"NSW"]:
incar.pop(k, None)
if k in vinput["INCAR"]:
unset[k] = 1
kpoints = vis.get_kpoints(vinput["POSCAR"].structure)
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(incar),
"_unset": unset}},
{'dict': 'KPOINTS',
'action': {'_set': kpoints.as_dict()}}])
auto_npar = False
elif job_type.startswith("static"):
m = [i * args.static_kpoint for i in vinput["KPOINTS"].kpts[0]]
settings.extend([
{"dict": "INCAR",
"action": {"_set": {"NSW": 0}}},
{'dict': 'KPOINTS',
'action': {'_set': {'kpoints': [m]}}}])
elif job_type.startswith("nonscf_derived"):
from pymatgen.io.vasp.sets import MPNonSCFSet
vis = MPNonSCFSet.from_prev_calc(".", copy_chgcar=False,
user_incar_settings={"LWAVE": True})
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
elif job_type.startswith("optics_derived"):
from pymatgen.io.vasp.sets import MPNonSCFSet
vis = MPNonSCFSet.from_prev_calc(
".", optics=True, copy_chgcar=False,
nedos=2001, mode="uniform", nbands_factor=5,
user_incar_settings={"LWAVE": True, "ALGO": "Exact", "SIGMA": 0.01, "EDIFF": 1e-6},
ediff_per_atom=False)
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
elif job_type.startswith("rampu"):
f = ramps / (n_ramp_u - 1)
settings.append(
{"dict": "INCAR",
"action": {"_set": {"LDAUJ": [j * f for j in ldauj],
"LDAUU": [u * f for u in ldauu]}}})
copy_magmom = True
ramps += 1
elif job_type.startswith("quick_relax") or job_type.startswith(\
"quickrelax"):
kpoints = vinput["KPOINTS"]
incar = vinput["INCAR"]
structure = vinput["POSCAR"].structure
if "ISMEAR" in incar:
post_settings.append(
{"dict": "INCAR",
"action": {"_set": {"ISMEAR": incar["ISMEAR"]}}})
else:
post_settings.append(
{"dict": "INCAR",
"action": {"_unset": {"ISMEAR": 1}}})
post_settings.append({"dict": "KPOINTS",
"action": {"_set": kpoints.as_dict()}})
# lattice vectors with length < 9 will get >1 KPOINT
low_kpoints = Kpoints.gamma_automatic(
[max(int(18/l), 1) for l in structure.lattice.abc])
settings.extend([
{"dict": "INCAR",
"action": {"_set": {"ISMEAR": 0}}},
{'dict': 'KPOINTS',
'action': {'_set': low_kpoints.as_dict()}}])
# let vasp determine encut (will be lower than
# needed for compatibility with other runs)
if "ENCUT" in incar:
post_settings.append(
{"dict": "INCAR",
"action": {"_set": {"ENCUT": incar["ENCUT"]}}})
settings.append(
{"dict": "INCAR",
"action": {"_unset": {"ENCUT": 1}}})
elif job_type.startswith("relax"):
pass
elif job_type.startswith("full_relax"):
for j in VaspJob.full_opt_run(
vasp_command):
yield j
else:
print("Unsupported job type: {}".format(job))
sys.exit(-1)
if not job_type.startswith("full_relax"):
yield VaspJob(vasp_command, final=final, suffix=suffix,
backup=backup, settings_override=settings,
copy_magmom=copy_magmom, auto_npar=auto_npar)
def do_run(args):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, filename="run.log")
logging.info("Handlers used are %s" % args.handlers)
handlers = [load_class("custodian.vasp.handlers", n) for n in
args.handlers]
validators = [load_class("custodian.vasp.validators", n) for n in
args.validators]
c = Custodian(handlers, get_jobs(args), validators,
max_errors=args.max_errors, scratch_dir=args.scratch,
gzipped_output=args.gzip,
checkpoint=True)
c.run()
def main():
import argparse
parser = argparse.ArgumentParser(description="""
run_vasp is a master script to perform various kinds of VASP runs.
""",
epilog="""
Author: Shyue Ping Ong
Version: {}
Last updated: {}""".format(__version__, __date__))
parser.add_argument(
"-c", "--command", dest="command", nargs="?",
default="pvasp", type=str,
help="VASP command. Defaults to pvasp. If you are using mpirun, "
"set this to something like \"mpirun pvasp\".")
parser.add_argument(
"-z", "--gzip", dest="gzip", action="store_true",
help="Add this option to gzip the final output. Do not gzip if you "
"are going to perform an additional static run."
)
parser.add_argument(
"-s", "--scratch", dest="scratch", nargs="?",
default=None, type=str,
help="Scratch directory to perform run in. Specify the root scratch "
"directory as the code will automatically create a temporary "
"subdirectory to run the job.")
parser.add_argument(
"-ks", "--kpoint-static", dest="static_kpoint", nargs="?",
default=1, type=int,
help="The multiplier to use for the KPOINTS of a static run (if "
"any). For example, setting this to 2 means that if your "
"original run was done using a k-point grid of 2x3x3, "
"the static run will be done with a k-point grid of 4x6x6. This "
"defaults to 1, i.e., static runs are performed with the same "
"k-point grid as relaxation runs."
)
parser.add_argument(
"-me", "--max-errors", dest="max_errors", nargs="?",
default=10, type=int,
help="Maximum number of errors to allow before quitting")
parser.add_argument(
"-hd", "--handlers", dest="handlers", nargs="+",
default=["VaspErrorHandler", "MeshSymmetryErrorHandler",
"UnconvergedErrorHandler", "NonConvergingErrorHandler",
"PotimErrorHandler"], type=str,
help="The ErrorHandlers to use specified as string class names, "
"with optional arguments specified as a url-like string. For "
"example, VaspErrorHandler?output_filename=myfile.out specifies a "
"VaspErrorHandler with output_name set to myfile.out. Multiple "
"arguments are joined by a comma. E.g., MyHandler?myfile=a,"
"data=1. The arguments are deserialized using yaml."
)
parser.add_argument(
"-vd", "--validators", dest="validators", nargs="+",
default=["VasprunXMLValidator"], type=str,
help="The Validators to use specified as string class names, "
"with optional arguments specified as a url-like string. For "
"example, VaspErrorHandler?output_filename=myfile.out specifies a "
"VaspErrorHandler with output_name set to myfile.out. Multiple "
"arguments are joined by a comma. E.g., MyHandler?myfile=a,"
"data=1. The arguments are deserialized using yaml."
)
parser.add_argument(
"jobs", metavar="jobs", type=str, nargs='+',
default=["relax", "relax"],
help="Jobs to execute. Only sequences of relax, "
"quickrelax, static, rampU, full_relax, static_derived, "
"nonscf_derived, optics_derived are "
"supported at the moment. For example, \"relax relax static\" "
"will run a double relaxation followed by a static "
"run. By default, suffixes are given sequential numbering,"
"but this can be overrridden by adding a number to the job"
"type, e.g. relax5 relax6 relax7")
args = parser.parse_args()
do_run(args)
| {
"content_hash": "204d2b08ce91dba2d20e8a8aa9846959",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 99,
"avg_line_length": 40.605442176870746,
"alnum_prop": 0.5230356843692411,
"repo_name": "davidwaroquiers/custodian",
"id": "157bbc692ef0cbbb53d2c5f484ed6808d3773002",
"size": "11961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custodian/cli/run_vasp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "234770"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import json
def init():
languages = ['en']
degreepath = 'C:/Users/Elias/Dropbox/__WikiNetworks/Master_Projekt/Degrees/results/'
for language in languages:
print language
totaldegpath = degreepath + language + '_deg_titles.sorted'
degrees = []
average = False
old_degree = 0
last_rank = 1
start_position = 0
end_position = 0
with open(totaldegpath, 'r') as deg:
for degrank, line in enumerate(deg):
line = line.decode('utf-8').split('\t')
title = line[0]
degree = int(line[1])
if old_degree == degree:
if average is False:
if degrank == 0:
start_position = 0
else:
start_position = degrank - 1
degrees.append((title, last_rank, 0))
average = True
else:
if average is True:
end_position = degrank - 1
average_rank = (start_position + 1.0 + end_position + 1.0) / 2.0
for i in range(start_position, end_position + 1):
degrees[i] = (degrees[i][0], degrees[i][1], average_rank)
average = False
degrees.append((title, degrank + 1, degrank + 1)) #we count from 1 to x (for difference)
last_rank = degrank + 1
old_degree = degree
if average is True:
end_position = len(degrees) - 1
average_rank = (start_position + 1.0 + end_position + 1.0) / 2.0
for i in range(start_position, end_position + 1):
degrees[i] = (degrees[i][0], degrees[i][1], average_rank)
print 'degrees loaded'
with open(degreepath + language + '_ranking.json', 'w') as w:
json.dump(degrees, w)
print 'degrees dumped'
if __name__ == '__main__':
init() | {
"content_hash": "c745dfda6db40b564300389e6c4a2e03",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 109,
"avg_line_length": 38.280701754385966,
"alnum_prop": 0.45187901008249315,
"repo_name": "CMThF/wikipedia-analysis-toolkit",
"id": "b828a8f6341206b62a93af43a565fe6bc10399ab",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ngrams/3_setup/ranking/degreeRankingEN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "60173"
},
{
"name": "M",
"bytes": "16294"
},
{
"name": "Matlab",
"bytes": "68943"
},
{
"name": "Objective-C",
"bytes": "616"
},
{
"name": "Python",
"bytes": "60956"
}
],
"symlink_target": ""
} |
from bokeh.core.properties import Instance
from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource, Tool
from bokeh.plotting import figure
from bokeh.util.compiler import TypeScript
output_file('custom_tool.html')
JS_CODE = """
import {GestureTool, GestureToolView} from "models/tools/gestures/gesture_tool"
import {ColumnDataSource} from "models/sources/column_data_source"
import {PanEvent} from "core/ui_events"
import * as p from "core/properties"
export class DrawToolView extends GestureToolView {
model: DrawTool
// this is executed when the pan/drag event starts
_pan_start(_e: PanEvent): void {
this.model.source.data = {x: [], y: []}
}
// this is executed on subsequent mouse/touch moves
_pan(e: PanEvent): void {
const {frame} = this.plot_view
const {sx, sy} = e
if (!frame.bbox.contains(sx, sy))
return
const x = frame.xscales.default.invert(sx)
const y = frame.yscales.default.invert(sy)
const {source} = this.model
source.get_array("x").push(x)
source.get_array("y").push(y)
source.change.emit()
}
// this is executed then the pan/drag ends
_pan_end(_e: PanEvent): void {}
}
export namespace DrawTool {
export type Attrs = p.AttrsOf<Props>
export type Props = GestureTool.Props & {
source: p.Property<ColumnDataSource>
}
}
export interface DrawTool extends DrawTool.Attrs {}
export class DrawTool extends GestureTool {
properties: DrawTool.Props
constructor(attrs?: Partial<DrawTool.Attrs>) {
super(attrs)
}
tool_name = "Drag Span"
icon = "bk-tool-icon-lasso-select"
event_type = "pan" as "pan"
default_order = 12
static init_DrawTool(): void {
this.prototype.default_view = DrawToolView
this.define<DrawTool.Props>({
source: [ p.Instance ],
})
}
}
"""
class DrawTool(Tool):
__implementation__ = TypeScript(JS_CODE)
source = Instance(ColumnDataSource)
source = ColumnDataSource(data=dict(x=[], y=[]))
plot = figure(x_range=(0,10), y_range=(0,10), tools=[DrawTool(source=source)])
plot.title.text ="Drag to draw on the plot"
plot.line('x', 'y', source=source)
show(plot)
| {
"content_hash": "6c99b5d7bc46c37af36785be0f47a8a9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 25.023255813953487,
"alnum_prop": 0.6909851301115242,
"repo_name": "ericmjl/bokeh",
"id": "9d846007ae3dfbcea3f1bcef77c36e9729148e5c",
"size": "2152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/custom/custom_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views.generic import TemplateView
urlpatterns = [
url(r'^about/$', TemplateView.as_view(template_name="pages/about_us.html"), name='about'),
url(r'^downloads/$', TemplateView.as_view(template_name="pages/downloads.html"), name='downloads'),
]
| {
"content_hash": "244606ae420f40d9ce5eb8f745db8908",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 103,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.7272727272727273,
"repo_name": "geometalab/osmaxx-frontend",
"id": "78886d5fa6f9209d5b1d5eada01c4b0219b71051",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osmaxx/core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26077"
},
{
"name": "HTML",
"bytes": "22722"
},
{
"name": "JavaScript",
"bytes": "271988"
},
{
"name": "Python",
"bytes": "194135"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import
import time
import numpy
from oskar._bda_utils import expand
def run(num_antennas, vis_compressed, input_name, vis_original, output_name):
print('- Expanding compressed data...')
t0 = time.time()
expand(num_antennas, vis_compressed, input_name, vis_original, output_name)
# num_baselines = num_antennas * (num_antennas - 1) / 2
# num_input_vis = len(vis_compressed[input_name])
# print(' - No. input visibilities : %i' % num_input_vis)
# out_time_idx = numpy.zeros((num_baselines,), dtype=numpy.int32)
# for row in range(num_input_vis):
# a1 = vis_compressed['antenna1'][row]
# a2 = vis_compressed['antenna2'][row]
# weight = int(round(vis_compressed['weight'][row]))
# data = vis_compressed[input_name][row]
# b = a1 * (num_antennas - 1) - (a1 - 1) * a1 / 2 + a2 - a1 - 1
# for t in range(out_time_idx[b], out_time_idx[b] + weight):
# out_row = t * num_baselines + b
# vis_original[output_name][out_row] = data
# out_time_idx[b] += weight
print(' - Visibilities expanded in %.2f s' % (time.time() - t0))
| {
"content_hash": "ebcd961f757227fadcee4522e475261c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 43.81481481481482,
"alnum_prop": 0.6086221470836856,
"repo_name": "OxfordSKA/bda",
"id": "1e8b0ac45f249835cb7d3c81ccbcecfd5a3b8e36",
"size": "1208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybda/expand_bda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "32495"
},
{
"name": "Python",
"bytes": "227005"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
} |
import app.db as db
class Application():
def __init__(self, project_name, student_name, application_date, status='pending', is_new_application=True):
self.project_name = project_name
self.student_name = student_name
self.application_date = application_date
self.status = status
self.is_new_application = is_new_application
def accept(self):
if self.is_new_application:
raise ValueError('cannot approve a new application, save first')
self._update_status('accepted')
def reject(self):
if self.is_new_application:
raise ValueError('cannot reject a new application, save first')
self._update_status('rejected')
def _update_status(self, new_status):
update_query = (
"UPDATE application SET status=%(status)s "
"WHERE "
"project_name=%(project_name)s and "
"student_name=%(student_name)s")
self.status = new_status
cnx = db.get_connection()
with cnx.cursor() as cursor:
cursor.execute(update_query, vars(self))
cnx.commit()
def save(self):
insert_application = (
"INSERT INTO application "
"(project_name,"
"student_name,"
"application_date,"
"status) "
"VALUES "
"(%(project_name)s,"
"%(student_name)s,"
"%(application_date)s,"
"%(status)s)")
cnx = db.get_connection()
with cnx.cursor() as cursor:
if self.is_new_application:
cursor.execute(insert_application, vars(self))
else:
raise NotImplementedError('courses can not be modified')
cnx.commit()
self.is_new_application = False
@staticmethod
def find(student_name='%%', project_name='%%'):
query = (
"SELECT "
"project_name,"
"student_name,"
"application_date,"
"status "
"FROM application "
"WHERE "
"project_name LIKE %(project_name)s and "
"student_name LIKE %(student_name)s")
multi = student_name == '%%' or project_name == '%%'
results = list() if multi else None
cnx = db.get_connection()
with cnx.cursor() as cursor:
cursor.execute(query, {
'project_name': project_name,
'student_name': student_name,
})
for result in cursor:
if multi:
results.append(Application(is_new_application=False, **result))
else:
results = Application(is_new_application=False, **result)
return results
| {
"content_hash": "f1a02f78c1b3beaa0a23256e66164cf7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 112,
"avg_line_length": 34.135802469135804,
"alnum_prop": 0.538878842676311,
"repo_name": "BunsenMcDubbs/cs4400-project",
"id": "d8855d21307d7cca257e8065f52003bbf13d12b1",
"size": "2765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models/Application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14373"
},
{
"name": "JavaScript",
"bytes": "553"
},
{
"name": "Python",
"bytes": "41099"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..preprocess import SpaceTimeRealigner
def test_SpaceTimeRealigner_inputs():
input_map = dict(ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(mandatory=True,
min_ver='0.4.0.dev',
),
slice_info=dict(requires=[u'slice_times'],
),
slice_times=dict(),
tr=dict(requires=[u'slice_times'],
),
)
inputs = SpaceTimeRealigner.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SpaceTimeRealigner_outputs():
output_map = dict(out_file=dict(),
par_file=dict(),
)
outputs = SpaceTimeRealigner.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "6a875dc7b92268463158f925150c583a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 29.303030303030305,
"alnum_prop": 0.6504653567735263,
"repo_name": "carolFrohlich/nipype",
"id": "961756a80098b1dfd2a02e5a46ccd45594a4c209",
"size": "1021",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""
pyClanSphere.tests.testModels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Make sure our core models have consistent behaviour
:copyright: (c) 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import unittest
from pyClanSphere import models, privileges
from pyClanSphere.tests import pyClanSphereTestCase
class testUserModel(pyClanSphereTestCase):
def setUp(self):
pyClanSphereTestCase.setUp(self)
models.User(u'TestUser', u'TestPass', u'[email protected]', u'TestBenutzer')
self.db.commit()
def testAnonymousUser(self):
"""Make sure anonymous user is properly recognized"""
user = models.User.query.get(1)
self.assertTrue(isinstance(user, models.User))
self.assertFalse(isinstance(user, models.AnonymousUser))
self.assertTrue(user.is_somebody)
user = models.AnonymousUser()
self.assertTrue(isinstance(user, models.User))
self.assertTrue(isinstance(user, models.AnonymousUser))
self.assertFalse(user.is_somebody)
def testPermissions(self):
"""Setting different permissions"""
admin = models.User.query.get(1)
self.assertTrue(admin.is_somebody)
self.assertTrue(admin.is_manager)
self.assertTrue(admin.has_profile_access)
self.assertTrue(admin.is_admin)
user = models.User.query.get(2)
self.assertTrue(user.is_somebody)
self.assertFalse(user.is_manager)
self.assertFalse(user.has_profile_access)
self.assertFalse(user.is_admin)
priv = privileges.ENTER_ACCOUNT_PANEL
user.own_privileges.add(priv)
self.assertFalse(user.is_manager)
self.assertTrue(user.has_profile_access)
self.assertFalse(user.is_admin)
user.own_privileges.remove(priv)
self.assertFalse(user.is_manager)
self.assertFalse(user.has_profile_access)
self.assertFalse(user.is_admin)
priv = privileges.ENTER_ADMIN_PANEL
user.own_privileges.add(priv)
self.assertTrue(user.is_manager)
self.assertFalse(user.has_profile_access)
self.assertFalse(user.is_admin)
user.own_privileges.remove(priv)
self.assertFalse(user.is_manager)
self.assertFalse(user.has_profile_access)
self.assertFalse(user.is_admin)
def testUserPassword(self):
"""User Password setting and account disabling"""
user = models.User.query.get(2)
self.assertFalse(user.disabled)
self.assertTrue(user.check_password('TestPass'))
user.disable()
self.assertFalse(user.check_password('TestPass'))
self.assertFalse(user.check_password('!'))
self.assertTrue(user.disabled)
user.set_password('!')
self.assertFalse(user.disabled)
self.assertTrue(user.check_password('!'))
def testUserQuery(self):
"""User querying"""
self.assertTrue(models.User.query.count() > 0)
anon = models.User.query.get_nobody()
self.assertTrue(isinstance(anon, models.User))
self.assertTrue(isinstance(anon, models.AnonymousUser))
userlist = models.User.query.namesort().all()
self.assertEqual(userlist, [models.User.query.get(1),models.User.query.get(2)])
def testUserNaming(self):
"""Realname or Username"""
user = models.User.query.get(2)
self.assertEqual(user.display_name, 'TestUser')
user.display_name = '$real_name'
self.assertEqual(user.display_name, 'TestBenutzer')
user.display_name = '$username'
self.assertNotEqual(user.display_name, 'TestBenutzer')
self.assertEqual(user.display_name, 'TestUser')
def tearDown(self):
self.db.delete(models.User.query.get(2))
self.db.commit()
pyClanSphereTestCase.tearDown(self)
| {
"content_hash": "d37fcf41aa3f0c602f458696cfc40319",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 87,
"avg_line_length": 34.73451327433628,
"alnum_prop": 0.6578343949044586,
"repo_name": "jokey2k/pyClanSphere",
"id": "6f3862535e974064bff1c3e41689b4450e4382d0",
"size": "3949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyClanSphere/tests/testModels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "188174"
},
{
"name": "Python",
"bytes": "891594"
}
],
"symlink_target": ""
} |
from utils import ptb_iterator, sample
train_data = [i for i in range(1024)]
#num_steps is how many things in a sequence do we grab from data.
#If you want to grab 10 words then num_steps is 10
for batch in ptb_iterator(train_data, batch_size=2, num_steps=1):
print("Batch")
x, y = batch
print(x, y)
| {
"content_hash": "8887eeb2fe84ede15c336caf2584cf83",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.6964856230031949,
"repo_name": "dmitrinesterenko/cs224d",
"id": "1a677ab33e2a9f3524741a618d195d508dc13403",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment2/test_iteration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135342"
},
{
"name": "Shell",
"bytes": "2598"
}
],
"symlink_target": ""
} |
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import six.moves.cPickle as pickle
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from loregTut import LogisticRegression, load_data
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sorted in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
#added by garbers
# save the best model
# with open('best_MLP_model.pkl', 'w') as fBest:
# pickle.dump(classifier, fBest)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
if __name__ == '__main__':
test_mlp()
| {
"content_hash": "bbb9fcebd76f0295affecc9457873814",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 80,
"avg_line_length": 34.91566265060241,
"alnum_prop": 0.5780538302277433,
"repo_name": "garbersc/keras-galaxies",
"id": "70f6a8993def9e7817b81250fab01c97ac9e0e79",
"size": "14490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "255"
},
{
"name": "Jupyter Notebook",
"bytes": "5124"
},
{
"name": "Python",
"bytes": "1537024"
}
],
"symlink_target": ""
} |
import unittest
from yapyg import math_collision
class TestMathCollisionRectCircle(unittest.TestCase):
def test_contact_mid_left_side(self):
circ = ("circle", 0.0, 0.0, 1.0)
rect = ("rectangle", 1.0, -1.0, 1.0, 2.0, 0.0)
contact_points = []
self.assertEqual(math_collision.is_rect_circle_collision(circ, rect, contact_points), 1)
self.assertEqual(contact_points, [(1.0, 0.0)])
def test_contact_mid_right_side(self):
circ = ("circle", 0.0, 0.0, 1.0)
rect = ("rectangle", -2.0, -1.0, 1.0, 2.0, 0.0)
contact_points = []
self.assertEqual(math_collision.is_rect_circle_collision(circ, rect, contact_points), 1)
self.assertEqual(contact_points, [(-1.0, 0.0)])
def test_contact_mid_top_side(self):
circ = ("circle", 0.0, 0.0, 1.0)
rect = ("rectangle", -1.0, -2.0, 2.0, 1.0, 0.0)
contact_points = []
self.assertEqual(math_collision.is_rect_circle_collision(circ, rect, contact_points), 1)
self.assertEqual(contact_points, [(0.0, -1.0)])
def test_contact_mid_bottom_side(self):
circ = ("circle", 0.0, 0.0, 1.0)
rect = ("rectangle", -1.0, 1.0, 2.0, 1.0, 0.0)
contact_points = []
self.assertEqual(math_collision.is_rect_circle_collision(circ, rect, contact_points), 1)
self.assertEqual(contact_points, [(0.0, 1.0)])
class TestMathCollisionRectRect(unittest.TestCase):
def test_contact_top(self):
rect_1 = ("rectangle", 0.0, 0.0, 1.0, 1.0, 0.0)
rect_2 = ("rectangle", 0.0, 1.0, 1.0, 1.0, 0.0)
contact_points = []
result = math_collision.is_rect_rect_collision(rect_1, rect_2, contact_points)
self.assertEqual(result, True)
self.assertEqual(contact_points, [(0.0, 1.0), (1.0, 1.0)])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestMathCollisionRectCircle)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMathCollisionRectRect)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "7ee64204a6bdaefe6ef29d878e54c110",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 104,
"avg_line_length": 49.95744680851064,
"alnum_prop": 0.5570698466780238,
"repo_name": "rkibria/yapyg",
"id": "6dbfc4f2b3ecd0a6b7b22b15a50c5e52e98f0d8e",
"size": "3441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_math_collision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "422117"
},
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
class BadPlateauInit(Exception):
def __init__(self):
Exception.__init__(
self,
"Wrong initial Plateau parameters"
)
| {
"content_hash": "ac7611cd523d6cb0b649b29f1c8c4cc8",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 23,
"alnum_prop": 0.5279503105590062,
"repo_name": "tkanoff/therover",
"id": "d49f8bd866029634b9442a3447528199337641a3",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom_exceptions/bad_plateau_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7667"
}
],
"symlink_target": ""
} |
import pytest
import json, sys
import pexpect
def pytest_collect_file(path, parent):
if path.ext == ".q" and path.basename.endswith("_test.q"):
return QuarkFile(path, parent)
class QuarkFile(pytest.File):
def collect(self):
for lang in ["python", "java", "javascript", "ruby"]:
yield QuarkItem(self, lang)
class QuarkItem(pytest.Item):
def __init__(self, parent, lang):
super(QuarkItem, self).__init__("%s:%s" % (lang, parent.fspath.basename), parent)
self.lang = lang
def runtest(self):
child = pexpect.spawn("quark", ["install", "-v"] +
["--%s" % self.lang,
self.parent.fspath.strpath])
child.logfile = sys.stdout
child.expect(pexpect.EOF, timeout=300)
assert child.before.splitlines()[-1].strip() == "Done"
child = pexpect.spawn(
"quark", ["run", "--%s" % self.lang,
self.parent.fspath.strpath, "--", "--json"])
child.logfile = sys.stdout
child.expect_exact(
"=============================== json report ===============================",
timeout=300
)
child.expect(pexpect.EOF)
report = json.loads(child.before)
for item in report:
if item["failures"]:
raise QuarkException(report)
def repr_failure(self, excinfo):
if isinstance(excinfo.value, QuarkException):
ret = "\n".join([
"%s\n%s" % (item["name"],
"\n".join(" %s" % i
for i in item["failures"]))
for item in excinfo.value.report
if item["failures"]])
return ret
def reportinfo(self):
return self.fspath, 0, "lang: %s" % self.lang
class QuarkException(Exception):
def __init__(self, report):
super(QuarkException,self).__init__("Quark harness failure")
self.report = report
| {
"content_hash": "e83b851ee51e3f76a12d4fdc5cb43595",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 90,
"avg_line_length": 35.45614035087719,
"alnum_prop": 0.5096486887679367,
"repo_name": "datawire/quark",
"id": "60e9bf089fe029bfe1382276cc7484a499f5d844",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quarkc/test/lib/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1802"
},
{
"name": "HTML",
"bytes": "8346"
},
{
"name": "Java",
"bytes": "381125"
},
{
"name": "JavaScript",
"bytes": "501785"
},
{
"name": "Python",
"bytes": "643417"
},
{
"name": "Ruby",
"bytes": "370423"
},
{
"name": "Shell",
"bytes": "21479"
}
],
"symlink_target": ""
} |
from selenium.webdriver.support.ui import Select
from models.contact import Contact
import re
from random import randrange
class ContactHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.app.wd
wd.get("http://localhost/addressbook/")
def init_contact_creation(self):
wd = self.app.wd
self.open_home_page()
# init contact creating
wd.find_element_by_link_text("add new").click()
def select_group(self):
wd = self.app.wd
list_of_options = wd.find_elements_by_xpath("//select[@name='new_group']//option")
index = randrange(len(list_of_options))
Select(wd.find_element_by_xpath("//select[@name='new_group']")).select_by_index(index)
def create(self, contact):
wd = self.app.wd
self.init_contact_creation()
# create new contact
self.fill_contact_form_with_extended_data(contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[@type='submit']").click()
self.return_to_homepage()
self.contact_cache = None
def create_contact_with_adding_to_group(self, contact):
wd = self.app.wd
self.init_contact_creation()
# create new contact
self.fill_contact_form(contact)
self.select_group()
wd.find_element_by_xpath("//input[@type='submit']").click()
self.return_to_homepage()
self.contact_cache = None
def fill_contact_form_with_extended_data(self, contact):
wd = self.app.wd
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_number)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_number)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_number)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.secondary_number)
def return_to_homepage(self):
wd = self.app.wd
# return to the homepage
wd.find_element_by_link_text("home").click()
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@id='%s']" % id).click()
def type(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
self.type('firstname', contact.firstname)
self.type('lastname', contact.lastname)
self.type('address', contact.address)
self.type('home', contact.home_number)
self.type('mobile', contact.mobile_number)
def delete_contact(self):
wd = self.app.wd
# select contact
self.select_first_contact()
# init deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit deletion
wd.switch_to_alert().accept()
self.return_to_homepage()
self.contact_cache = None
def delete_some_contact(self, index):
wd = self.app.wd
# select contact
self.select_contact_by_index(index)
# init deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit deletion
wd.switch_to_alert().accept()
self.return_to_homepage()
self.contact_cache = None
def delete_some_contact_by_id(self, id):
wd = self.app.wd
# select contact
self.select_contact_by_id(id)
# init deletion
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit deletion
wd.switch_to_alert().accept()
self.return_to_homepage()
self.contact_cache = None
def remove_contact_from_group(self, id, group_id):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_xpath("//select[@name='group']").click()
wd.find_element_by_xpath("//select[@name='group']/option[@value='%s']" % group_id).click()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@name='remove']").click()
self.return_to_homepage()
self.contact_cache = None
def edit_some_contact(self, new_group_data, index):
wd = self.app.wd
self.select_contact_by_index(index)
# init edition
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
# modify contact data
self.fill_contact_form(new_group_data)
# submit modifying
wd.find_element_by_name('update').click()
# return to homepage
self.return_to_homepage()
self.contact_cache = None
def edit_some_contact_by_id(self, new_group_data, id):
wd = self.app.wd
self.select_contact_by_id(id)
# init edition
wd.find_element_by_xpath("//tr[td/input[@id='%s']]//img[@alt='Edit']" % id).click()
# modify contact data
self.fill_contact_form(new_group_data)
# submit modifying
wd.find_element_by_name('update').click()
# return to homepage
self.return_to_homepage()
self.contact_cache = None
# def test_f(self, id):
# wd = self.app.wd
# self.select_contact_by_id(id)
# # init edition
# parent_node = wd.find_element_by_xpath("//tr[td/input[@id='%s']]//img[@alt='Edit']" % id).click()
# parent_node.find_element_by_xpath("//img[@alt='Edit']").click()
# return parent_node
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name('selected[]'))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
id = cells[0].find_element_by_tag_name('input').get_attribute('value')
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname,
all_phones_from_homepage = all_phones,
all_emails_from_homepage = all_emails,
address=address, id=id))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, c_index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name('entry')[c_index]
cell = row.find_elements_by_tag_name('td')[7]
cell.find_element_by_tag_name('a').click()
def open_contact_view_by_index(self, c_index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name('entry')[c_index]
cell = row.find_elements_by_tag_name('td')[6]
cell.find_element_by_tag_name('a').click()
def get_contact_info_from_edit_page(self, c_index):
wd = self.app.wd
self.open_contact_to_edit_by_index(c_index)
firstname = wd.find_element_by_name('firstname').get_attribute('value')
lastname = wd.find_element_by_name('lastname').get_attribute('value')
address = wd.find_element_by_name('address').get_attribute('value')
home_number = wd.find_element_by_name('home').get_attribute('value')
mobile_number = wd.find_element_by_name('mobile').get_attribute('value')
work_number = wd.find_element_by_name('work').get_attribute('value')
email = wd.find_element_by_name('email').get_attribute('value')
email2 = wd.find_element_by_name('email2').get_attribute('value')
email3 = wd.find_element_by_name('email3').get_attribute('value')
secondary_number = wd.find_element_by_name('phone2').get_attribute('value')
id = wd.find_element_by_name('id').get_attribute('value')
return Contact(firstname=firstname, lastname=lastname, address=address, home_number=home_number,
mobile_number=mobile_number, work_number=work_number,
email=email, email2=email2, email3=email3, secondary_number=secondary_number, id=id)
def get_contact_info_from_view_page(self, c_index):
wd = self.app.wd
self.open_contact_view_by_index(c_index)
text = wd.find_element_by_id('content').text
home_number = re.search('H: (.+)', text).group(1)
mobile_number = re.search('M: (.+)', text).group(1)
work_number = re.search('W: (.+)', text).group(1)
secondary_number = re.search('P: (.+)', text).group(1)
return Contact(home_number=home_number,
mobile_number=mobile_number, work_number=work_number,
secondary_number=secondary_number, id=id) | {
"content_hash": "120cd8bbcfd534d197dd74cf794ff648",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 107,
"avg_line_length": 42.02755905511811,
"alnum_prop": 0.595503512880562,
"repo_name": "s34rching/python_classes",
"id": "cf08c45cd48629f339831e08944c815326804498",
"size": "10675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "394"
},
{
"name": "Gherkin",
"bytes": "1889"
},
{
"name": "HTML",
"bytes": "795"
},
{
"name": "JavaScript",
"bytes": "6636"
},
{
"name": "Python",
"bytes": "51444"
}
],
"symlink_target": ""
} |
import sys, json, os, tempfile
from solver.commonSolver import CommonSolver
from logic.smbool import SMBool
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
from logic.helpers import Pickup
from rom.rompatcher import RomPatcher
from rom.rom_patches import RomPatches
from graph.graph import AccessGraphSolver as AccessGraph
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils
from graph.location import define_location
from utils.utils import removeChars
from solver.conf import Conf
from utils.parameters import hard, infinity
from solver.solverState import SolverState
from solver.comeback import ComeBack
from rando.ItemLocContainer import ItemLocation
from utils.doorsmanager import DoorsManager
from logic.logic import Logic
from utils.objectives import Objectives
import utils.log
class InteractiveSolver(CommonSolver):
def __init__(self, shm, logic):
self.interactive = True
self.errorMsg = ""
self.checkDuplicateMajor = False
self.vcr = None
self.log = utils.log.get('Solver')
# only available since python 3.8, so import it here to keep >= 3.6 compatibility for CLI
from utils.shm import SHM
self.shm = SHM(shm)
self.firstLogFile = None
Logic.factory(logic)
self.locations = Logic.locations
(self.locsAddressName, self.locsWeb2Internal) = self.initLocsAddressName()
self.transWeb2Internal = self.initTransitionsName()
Conf.difficultyTarget = infinity
self.objectives = Objectives()
# no time limitation
self.runtimeLimit_s = 0
# used by auto tracker to know how many locs have changed
self.locDelta = 0
def initLocsAddressName(self):
addressName = {}
web2Internal = {}
for loc in Logic.locations:
webName = self.locNameInternal2Web(loc.Name)
addressName[loc.Address % 0x10000] = webName
web2Internal[webName] = loc.Name
return (addressName, web2Internal)
def initTransitionsName(self):
web2Internal = {}
for (startPoint, endPoint) in vanillaTransitions + vanillaBossesTransitions + vanillaEscapeTransitions:
for point in [startPoint, endPoint]:
web2Internal[self.apNameInternal2Web(point)] = point
return web2Internal
def dumpState(self):
state = SolverState(self.debug)
state.fromSolver(self)
self.shm.writeMsgJson(state.get())
self.shm.finish(False)
def initialize(self, mode, rom, presetFileName, magic, fill, startLocation):
# load rom and preset, return first state
self.debug = mode == "debug"
self.mode = mode
if self.mode != "seedless":
self.seed = os.path.basename(os.path.splitext(rom)[0])+'.sfc'
else:
self.seed = "seedless"
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True, magic=magic, startLocation=startLocation)
# in plando/tracker always consider that we're doing full
self.majorsSplit = 'Full'
# hide doors
if self.doorsRando and mode in ['standard', 'race']:
DoorsManager.initTracker()
self.clearItems()
# in debug mode don't load plando locs/transitions
if self.mode == 'plando' and self.debug == False:
if fill == True:
# load the source seed transitions and items/locations
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.buildGraph()
self.fillPlandoLocs()
else:
if self.areaRando == True or self.bossRando == True:
plandoTrans = self.loadPlandoTransitions()
if len(plandoTrans) > 0:
self.curGraphTransitions = plandoTrans
self.buildGraph()
self.loadPlandoLocs()
# if tourian is disabled remove mother brain location
if self.tourian == 'Disabled':
mbLoc = self.getLoc('Mother Brain')
self.locations.remove(mbLoc)
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.checkGoals()
self.dumpState()
def iterate(self, scope, action, params):
self.debug = params["debug"]
self.smbm = SMBoolManager()
state = SolverState()
state.set(self.shm.readMsgJson())
state.toSolver(self)
self.objectives.setSolverMode(self)
# save current AP
previousAP = self.lastAP
self.loadPreset(self.presetFileName)
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if scope == 'item':
if action == 'clear':
self.clearItems(True)
else:
if action == 'add':
if self.mode in ['plando', 'seedless', 'race', 'debug']:
if params['loc'] != None:
if self.mode == 'plando':
self.setItemAt(params['loc'], params['item'], params['hide'])
else:
itemName = params.get('item', 'Nothing')
if itemName is None:
itemName = 'Nothing'
self.setItemAt(params['loc'], itemName, False)
else:
self.increaseItem(params['item'])
else:
# pickup item at locName
self.pickItemAt(params['loc'])
elif action == 'remove':
if 'loc' in params:
self.removeItemAt(params['loc'])
elif 'count' in params:
# remove last collected item
self.cancelLastItems(params['count'])
else:
self.decreaseItem(params['item'])
elif action == 'replace':
self.replaceItemAt(params['loc'], params['item'], params['hide'])
elif action == 'toggle':
self.toggleItem(params['item'])
elif action == 'upload_scav':
self.updatePlandoScavengerOrder(params['plandoScavengerOrder'])
elif scope == 'area':
if action == 'clear':
self.clearTransitions()
else:
if action == 'add':
startPoint = params['startPoint']
endPoint = params['endPoint']
self.addTransition(self.transWeb2Internal[startPoint], self.transWeb2Internal[endPoint])
elif action == 'remove':
if 'startPoint' in params:
self.cancelTransition(self.transWeb2Internal[params['startPoint']])
else:
# remove last transition
self.cancelLastTransition()
elif scope == 'door':
if action == 'replace':
doorName = params['doorName']
newColor = params['newColor']
DoorsManager.setColor(doorName, newColor)
elif action == 'toggle':
doorName = params['doorName']
DoorsManager.switchVisibility(doorName)
elif action == 'clear':
DoorsManager.initTracker()
elif scope == 'dump':
if action == 'import':
self.importDump(params["dump"])
self.buildGraph()
if scope == 'common':
if action == 'save':
return self.savePlando(params['lock'], params['escapeTimer'])
elif action == 'randomize':
self.randoPlando(params)
rewindLimit = self.locDelta if scope == 'dump' and self.locDelta > 0 else 1
lastVisitedLocs = []
# if last loc added was a sequence break, recompute its difficulty,
# as it may be available with the newly placed item.
# generalize it for auto-tracker where we can add more than one loc at once.
if len(self.visitedLocations) > 0:
for i in range(1, rewindLimit+1):
if i > len(self.visitedLocations):
break
else:
loc = self.visitedLocations[-i]
# check that the ap of the loc is available from the previous ap,
# else it may set loc diff to easy
if (loc.difficulty.difficulty == -1 and
loc.accessPoint is not None and
self.areaGraph.canAccess(self.smbm, previousAP, loc.accessPoint, Conf.difficultyTarget)):
lastVisitedLocs.append(loc)
for loc in lastVisitedLocs:
self.visitedLocations.remove(loc)
self.majorLocations.append(loc)
# compute new available locations
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
while True:
remainLocs = []
okLocs = []
for loc in lastVisitedLocs:
if loc.difficulty == False:
remainLocs.append(loc)
else:
okLocs.append(loc)
if len(remainLocs) == len(lastVisitedLocs):
# all remaining locs are seq break
for loc in lastVisitedLocs:
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
if loc.difficulty == False:
# if the loc is still sequence break, put it back as sequence break
loc.difficulty = SMBool(True, -1)
break
else:
# add available locs
for loc in okLocs:
lastVisitedLocs.remove(loc)
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
# compute again
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
# autotracker handles objectives
if not (scope == 'dump' and action == 'import'):
self.checkGoals()
# return them
self.dumpState()
def checkGoals(self):
# check if objectives can be completed
self.newlyCompletedObjectives = []
goals = self.objectives.checkGoals(self.smbm, self.lastAP)
for goalName, canClear in goals.items():
if canClear:
self.objectives.setGoalCompleted(goalName, True)
self.newlyCompletedObjectives.append("Completed objective: {}".format(goalName))
def getLocNameFromAddress(self, address):
return self.locsAddressName[address]
def loadPlandoTransitions(self):
# add escape transition
transitionsAddr = self.romLoader.getPlandoTransitions(len(vanillaBossesTransitions) + len(vanillaTransitions) + 1)
return GraphUtils.getTransitions(transitionsAddr)
def loadPlandoLocs(self):
# get the addresses of the already filled locs, with the correct order
addresses = self.romLoader.getPlandoAddresses()
# create a copy of the locations to avoid removing locs from self.locations
self.majorLocations = self.locations[:]
for address in addresses:
# TODO::compute only the difficulty of the current loc
self.computeLocationsDifficulty(self.majorLocations)
locName = self.getLocNameFromAddress(address)
self.pickItemAt(locName)
def fillPlandoLocs(self):
self.pickup = Pickup("all")
self.comeBack = ComeBack(self)
# backup
locationsBck = self.locations[:]
self.lastAP = self.startLocation
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
if self.itemsOk == False:
# add remaining locs as sequence break
for loc in self.majorLocations[:]:
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is not None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
self.collectMajor(loc)
self.locations = locationsBck
def fillGraph(self):
# add self looping transitions on unused acces points
usedAPs = {}
for (src, dst) in self.curGraphTransitions:
usedAPs[src] = True
usedAPs[dst] = True
singleAPs = []
for ap in Logic.accessPoints:
if ap.isInternal() == True:
continue
if ap.Name not in usedAPs:
singleAPs.append(ap.Name)
transitions = self.curGraphTransitions[:]
for apName in singleAPs:
transitions.append((apName, apName))
return transitions
def randoPlando(self, parameters):
# if all the locations are visited, do nothing
if len(self.majorLocations) == 0:
return
plandoLocsItems = {}
for loc in self.visitedLocations:
plandoLocsItems[loc.Name] = loc.itemName
plandoCurrent = {
"locsItems": plandoLocsItems,
"transitions": self.fillGraph(),
"patches": RomPatches.ActivePatches,
"doors": DoorsManager.serialize(),
"forbiddenItems": parameters["forbiddenItems"],
"objectives": self.objectives.getGoalsList(),
"tourian": self.tourian
}
plandoCurrentJson = json.dumps(plandoCurrent)
(fd, jsonOutFileName) = tempfile.mkstemp()
os.close(fd)
from utils.utils import getPythonExec
params = [
getPythonExec(), os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '10',
'--param', self.presetFileName,
'--output', jsonOutFileName,
'--plandoRando', plandoCurrentJson,
'--progressionSpeed', 'speedrun',
'--minorQty', parameters["minorQty"],
'--maxDifficulty', 'hardcore',
'--energyQty', parameters["energyQty"],
'--startLocation', self.startLocation
]
import subprocess
subprocess.call(params)
with open(jsonOutFileName, 'r') as jsonFile:
data = json.load(jsonFile)
os.remove(jsonOutFileName)
self.errorMsg = data["errorMsg"]
# load the locations
if "itemLocs" in data:
self.clearItems(reload=True)
itemsLocs = data["itemLocs"]
# create a copy because we need self.locations to be full, else the state will be empty
self.majorLocations = self.locations[:]
# if tourian is disabled remove mother brain from itemsLocs if the rando added it
if self.tourian == 'Disabled':
if itemsLocs and itemsLocs[-1]["Location"]["Name"] == "Mother Brain":
itemsLocs.pop()
for itemLoc in itemsLocs:
locName = itemLoc["Location"]["Name"]
loc = self.getLoc(locName)
# we can have locations from non connected areas
if "difficulty" in itemLoc["Location"]:
difficulty = itemLoc["Location"]["difficulty"]
smbool = SMBool(difficulty["bool"], difficulty["difficulty"], difficulty["knows"], difficulty["items"])
loc.difficulty = smbool
itemName = itemLoc["Item"]["Type"]
loc.itemName = itemName
loc.accessPoint = itemLoc["Location"]["accessPoint"]
self.collectMajor(loc)
def savePlando(self, lock, escapeTimer):
# store filled locations addresses in the ROM for next creating session
errorMsg = ""
from rando.Items import ItemManager
locsItems = {}
itemLocs = []
for loc in self.visitedLocations:
locsItems[loc.Name] = loc.itemName
for loc in self.locations:
if loc.Name in locsItems:
itemLocs.append(ItemLocation(ItemManager.getItem(loc.itemName), loc))
else:
# put nothing items in unused locations
itemLocs.append(ItemLocation(ItemManager.getItem("Nothing"), loc))
# patch the ROM
if lock == True:
import random
magic = random.randint(1, sys.maxsize)
else:
magic = None
# plando is considered Full
majorsSplit = self.masterMajorsSplit if self.masterMajorsSplit in ["FullWithHUD", "Scavenger"] else "Full"
class FakeRandoSettings:
def __init__(self):
self.qty = {'energy': 'plando'}
self.progSpeed = 'plando'
self.progDiff = 'plando'
self.restrictions = {'Suits': False, 'Morph': 'plando'}
self.superFun = {}
randoSettings = FakeRandoSettings()
escapeAttr = None
if self.escapeRando == True and escapeTimer != None:
# convert from '03:00' to number of seconds
escapeTimer = int(escapeTimer[0:2]) * 60 + int(escapeTimer[3:5])
escapeAttr = {'Timer': escapeTimer, 'Animals': None, 'patches': []}
progItemLocs = []
if majorsSplit == "Scavenger":
def getLoc(locName):
for loc in self.locations:
if loc.Name == locName:
return loc
for locName in self.plandoScavengerOrder:
loc = getLoc(locName)
if locName in locsItems:
item = ItemManager.getItem(loc.itemName)
else:
item = ItemManager.getItem("Nothing")
errorMsg = "Nothing at a Scavenger location, seed is unfinishable"
progItemLocs.append(ItemLocation(Location=loc, Item=item))
if RomPatches.ProgressiveSuits in RomPatches.ActivePatches:
suitsMode = "Progressive"
elif RomPatches.NoGravityEnvProtection in RomPatches.ActivePatches:
suitsMode = "Balanced"
else:
suitsMode = "Vanilla"
patches = ["Escape_Animals_Disable"]
doors = GraphUtils.getDoorConnections(AccessGraph(Logic.accessPoints, self.fillGraph()), self.areaRando,
self.bossRando, self.escapeRando, False)
from utils.version import displayedVersion
patcherSettings = {
"isPlando": True,
"majorsSplit": majorsSplit,
"startLocation": self.startLocation,
"optionalPatches": patches,
"layout": RomPatches.MoatShotBlock in RomPatches.ActivePatches,
"suitsMode": suitsMode,
"area": self.areaRando,
"boss": self.bossRando,
"areaLayout": RomPatches.AreaRandoGatesOther in RomPatches.ActivePatches,
"variaTweaks": False,
"nerfedCharge": False,
"nerfedRainbowBeam": False,
"escapeAttr": escapeAttr,
"escapeRandoRemoveEnemies": False,
"minimizerN": None,
"tourian": self.tourian,
"doorsColorsRando": DoorsManager.isRandom(),
"vanillaObjectives": self.objectives.isVanilla(),
"ctrlDict": None,
"moonWalk": False,
"seed": None,
"randoSettings": randoSettings,
"doors": doors,
"displayedVersion": displayedVersion,
"itemLocs": itemLocs,
"progItemLocs": progItemLocs,
"plando": {
"graphTrans": self.curGraphTransitions,
"maxTransitions": len(vanillaBossesTransitions) + len(vanillaTransitions),
"visitedLocations": self.visitedLocations,
"additionalETanks": self.additionalETanks
}
}
romPatcher = RomPatcher(settings=patcherSettings, magic=magic)
romPatcher.patchRom()
data = romPatcher.romFile.data
preset = os.path.splitext(os.path.basename(self.presetFileName))[0]
seedCode = 'FX'
if self.bossRando == True:
seedCode = 'B'+seedCode
if DoorsManager.isRandom():
seedCode = 'D'+seedCode
if self.areaRando == True:
seedCode = 'A'+seedCode
from time import gmtime, strftime
fileName = 'VARIA_Plandomizer_{}{}_{}.sfc'.format(seedCode, strftime("%Y%m%d%H%M%S", gmtime()), preset)
data["fileName"] = fileName
# error msg in json to be displayed by the web site
data["errorMsg"] = errorMsg
self.shm.writeMsgJson(data)
self.shm.finish(False)
def locNameInternal2Web(self, locName):
return removeChars(locName, " ,()-")
def locNameWeb2Internal(self, locNameWeb):
return self.locsWeb2Internal[locNameWeb]
def apNameInternal2Web(self, apName):
return apName[0].lower() + removeChars(apName[1:], " ")
def getWebLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.locations:
if loc.Name == locName:
return loc
raise Exception("Location '{}' not found".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getWebLoc(locName)
# check that location has not already been visited
if loc in self.visitedLocations:
self.errorMsg = "Location '{}' has already been visited".format(loc.Name)
return
if loc.difficulty is None or loc.difficulty == False:
# sequence break
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
self.collectMajor(loc)
def setItemAt(self, locName, itemName, hide):
# set itemName at locName
loc = self.getWebLoc(locName)
# check if location has not already been visited
if loc in self.visitedLocations:
self.errorMsg = "Location {} has already been visited".format(loc.Name)
return
# plando mode
loc.itemName = itemName
if loc.difficulty is None:
# sequence break
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
if hide == True:
loc.Visibility = 'Hidden'
if loc in self.majorLocations:
self.collectMajor(loc, itemName)
def replaceItemAt(self, locName, itemName, hide):
# replace itemName at locName
loc = self.getWebLoc(locName)
oldItemName = loc.itemName
# replace item at the old item spot in collectedItems
try:
index = next(i for i, vloc in enumerate(self.visitedLocations) if vloc.Name == loc.Name)
except Exception as e:
self.errorMsg = "Empty location {}".format(locName)
return
# major item can be set multiple times in plando mode
count = self.collectedItems.count(oldItemName)
isCount = self.smbm.isCountItem(oldItemName)
# update item in collected items after we check the count
self.collectedItems[index] = itemName
loc.itemName = itemName
# update smbm if count item or major was only there once
if isCount == True or count == 1:
self.smbm.removeItem(oldItemName)
if hide == True:
loc.Visibility = 'Hidden'
elif loc.CanHidden == True and loc.Visibility == 'Hidden':
# the loc was previously hidden, set it back to visible
loc.Visibility = 'Visible'
self.smbm.addItem(itemName)
def increaseItem(self, item):
# add item at begining of collectedItems to not mess with item removal when cancelling a location
self.collectedItems.insert(0, item)
self.smbm.addItem(item)
def decreaseItem(self, item):
if item in self.collectedItems:
self.collectedItems.remove(item)
self.smbm.removeItem(item)
def toggleItem(self, item):
# add or remove a major item
if item in self.collectedItems:
self.collectedItems.remove(item)
self.smbm.removeItem(item)
else:
self.collectedItems.insert(0, item)
self.smbm.addItem(item)
def clearItems(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastAP = self.startLocation
self.lastArea = self.startArea
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
loc.difficulty = None
self.smbm.resetItems()
self.objectives.resetGoals()
def updatePlandoScavengerOrder(self, plandoScavengerOrder):
self.plandoScavengerOrder = plandoScavengerOrder
def addTransition(self, startPoint, endPoint):
# already check in controller if transition is valid for seed
self.curGraphTransitions.append((startPoint, endPoint))
def cancelLastTransition(self):
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) > 0:
self.curGraphTransitions.pop()
elif self.areaRando == True:
if len(self.curGraphTransitions) > len(self.bossTransitions) + (1 if self.escapeRando == False else 0):
self.curGraphTransitions.pop()
elif self.bossRando == True:
print("len cur graph: {} len area: {} len escape: {} len sum: {}".format(len(self.curGraphTransitions), len(self.areaTransitions), 1 if self.escapeRando == False else 0, len(self.areaTransitions) + (1 if self.escapeRando == False else 0)))
if len(self.curGraphTransitions) > len(self.areaTransitions) + (1 if self.escapeRando == False else 0):
self.curGraphTransitions.pop()
elif self.escapeRando == True:
if len(self.curGraphTransitions) > len(self.areaTransitions) + len(self.bossTransitions):
self.curGraphTransitions.pop()
def cancelTransition(self, startPoint):
# get end point
endPoint = None
for (i, (start, end)) in enumerate(self.curGraphTransitions):
if start == startPoint:
endPoint = end
break
elif end == startPoint:
endPoint = start
break
if endPoint == None:
# shouldn't happen
return
# check that transition is cancelable
if self.areaRando == True and self.bossRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == 0:
return
elif self.areaRando == True and self.escapeRando == False:
if len(self.curGraphTransitions) == len(self.bossTransitions) + len(self.escapeTransition):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif [startPoint, endPoint] in self.escapeTransition or [endPoint, startPoint] in self.escapeTransition:
return
elif self.bossRando == True and self.escapeRando == False:
if len(self.curGraphTransitions) == len(self.areaTransitions) + len(self.escapeTransition):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif [startPoint, endPoint] in self.escapeTransition or [endPoint, startPoint] in self.escapeTransition:
return
elif self.areaRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif self.bossRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == len(self.areaTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif self.escapeRando == True and self.areaRando == False and self.bossRando == False:
if len(self.curGraphTransitions) == len(self.areaTransitions) + len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
# remove transition
self.curGraphTransitions.pop(i)
def clearTransitions(self):
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
def clearLocs(self, locs):
for loc in locs:
loc.difficulty = None
def getDiffThreshold(self):
# in interactive solver we don't have the max difficulty parameter
epsilon = 0.001
return hard - epsilon
# byteIndex is area index
bossBitMasks = {
"Kraid": {"byteIndex": 0x01, "bitMask": 0x01},
"Ridley": {"byteIndex": 0x02, "bitMask": 0x01},
"Phantoon": {"byteIndex": 0x03, "bitMask": 0x01},
"Draygon": {"byteIndex": 0x04, "bitMask": 0x01},
"Mother Brain": {"byteIndex": 0x05, "bitMask": 0x02},
"Spore Spawn": {"byteIndex": 0x01, "bitMask": 0x02},
"Crocomire": {"byteIndex": 0x02, "bitMask": 0x02},
"Botwoon": {"byteIndex": 0x04, "bitMask": 0x02},
"Golden Torizo": {"byteIndex": 0x02, "bitMask": 0x04}
}
eventsBitMasks = {}
areaAccessPoints = {
"Lower Mushrooms Left": {"byteIndex": 36, "bitMask": 1, "room": 0x9969, "area": "Crateria"},
"Green Pirates Shaft Bottom Right": {"byteIndex": 37, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Moat Right": {"byteIndex": 148, "bitMask": 4, "room": 0x95ff, "area": "Crateria"},
"Keyhunter Room Bottom": {"byteIndex": 156, "bitMask": 32, "room": 0x948c, "area": "Crateria"},
"Morph Ball Room Left": {"byteIndex": 46, "bitMask": 4, "room": 0x9e9f, "area": "Brinstar"},
"Green Brinstar Elevator": {"byteIndex": 36, "bitMask": 2, "room": 0x9938, "area": "Crateria"},
"Green Hill Zone Top Right": {"byteIndex": 46, "bitMask": 8, "room": 0x9e52, "area": "Brinstar"},
"Noob Bridge Right": {"byteIndex": 184, "bitMask": 128, "room": 0x9fba, "area": "Brinstar"},
"West Ocean Left": {"byteIndex": 148, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Crab Maze Left": {"byteIndex": 170, "bitMask": 4, "room": 0x957d, "area": "Crateria"},
"Lava Dive Right": {"byteIndex": 47, "bitMask": 64, "room": 0xaf14, "area": "Norfair"},
"Three Muskateers Room Left": {"byteIndex": 19, "bitMask": 2, "room": 0xb656, "area": "Norfair"},
"Warehouse Zeela Room Left": {"byteIndex": 205, "bitMask": 8, "room": 0xa471, "area": "Brinstar"},
"Warehouse Entrance Left": {"byteIndex": 205, "bitMask": 64, "room": 0xa6a1, "area": "Brinstar"},
"Warehouse Entrance Right": {"byteIndex": 205, "bitMask": 16, "room": 0xa6a1, "area": "Brinstar"},
"Single Chamber Top Right": {"byteIndex": 19, "bitMask": 4, "room": 0xad5e, "area": "Norfair"},
"Kronic Boost Room Bottom Left": {"byteIndex": 47, "bitMask": 32, "room": 0xae74, "area": "Norfair"},
"Crocomire Speedway Bottom": {"byteIndex": 41, "bitMask": 1, "room": 0xa923, "area": "Norfair"},
"Crocomire Room Top": {"byteIndex": 45, "bitMask": 1, "room": 0xa98d, "area": "Norfair"},
"Main Street Bottom": {"byteIndex": 69, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
"Crab Hole Bottom Left": {"byteIndex": 74, "bitMask": 128, "room": 0xd21c, "area": "Maridia"},
"Red Fish Room Left": {"byteIndex": 33, "bitMask": 8, "room": 0xd104, "area": "Maridia"},
"Crab Shaft Right": {"byteIndex": 46, "bitMask": 16, "room": 0xd1a3, "area": "Maridia"},
"Aqueduct Top Left": {"byteIndex": 46, "bitMask": 8, "room": 0xd5a7, "area": "Maridia"},
"Le Coude Right": {"byteIndex": 170, "bitMask": 8, "room": 0x95a8, "area": "Crateria"},
"Red Tower Top Left": {"byteIndex": 184, "bitMask": 64, "room": 0xa253, "area": "Brinstar"},
"Caterpillar Room Top Right": {"byteIndex": 160, "bitMask": 1, "room": 0xa322, "area": "Brinstar"},
"Red Brinstar Elevator": {"byteIndex": 160, "bitMask": 32, "room": 0x962a, "area": "Crateria"},
"East Tunnel Right": {"byteIndex": 77, "bitMask": 8, "room": 0xcf80, "area": "Maridia"},
"East Tunnel Top Right": {"byteIndex": 73, "bitMask": 1, "room": 0xcf80, "area": "Maridia"},
"Glass Tunnel Top": {"byteIndex": 73, "bitMask": 16, "room": 0xcefb, "area": "Maridia"},
"Golden Four": {"byteIndex": 37, "bitMask": 8, "room": 0xa5ed, "area": "Crateria"}
}
bossAccessPoints = {
"PhantoonRoomOut": {"byteIndex": 82, "bitMask": 32, "room": 0xcc6f, "area": "WreckedShip"},
"PhantoonRoomIn": {"byteIndex": 82, "bitMask": 16, "room": 0xcd13, "area": "WreckedShip"},
"RidleyRoomOut": {"byteIndex": 71, "bitMask": 128, "room": 0xb37a, "area": "Norfair"},
"RidleyRoomIn": {"byteIndex": 70, "bitMask": 1, "room": 0xb32e, "area": "Norfair"},
"KraidRoomOut": {"byteIndex": 210, "bitMask": 2, "room": 0xa56b, "area": "Brinstar"},
"KraidRoomIn": {"byteIndex": 210, "bitMask": 1, "room": 0xa59f, "area": "Brinstar"},
"DraygonRoomOut": {"byteIndex": 169, "bitMask": 64, "room": 0xd78f, "area": "Maridia"},
"DraygonRoomIn": {"byteIndex": 169, "bitMask": 128, "room": 0xda60, "area": "Maridia"}
}
escapeAccessPoints = {
'Tourian Escape Room 4 Top Right': {"byteIndex": 74, "bitMask": 8, "room": 0xdede, "area": "Tourian"},
'Climb Bottom Left': {"byteIndex": 74, "bitMask": 32, "room": 0x96ba, "area": "Crateria"},
'Green Brinstar Main Shaft Top Left': {"byteIndex": 21, "bitMask": 64, "room": 0x9ad9, "area": "Brinstar"},
'Basement Left': {"byteIndex": 81, "bitMask": 2, "room": 0xcc6f, "area": "WreckedShip"},
'Business Center Mid Left': {"byteIndex": 21, "bitMask": 32, "room": 0xa7de, "area": "Norfair"},
'Crab Hole Bottom Right': {"byteIndex": 74, "bitMask": 128, "room": 0xd21c, "area": "Maridia"}
}
nothingScreens = {
"Energy Tank, Gauntlet": {"byteIndex": 14, "bitMask": 64, "room": 0x965b, "area": "Crateria"},
"Bomb": {"byteIndex": 31, "bitMask": 64, "room": 0x9804, "area": "Crateria"},
"Energy Tank, Terminator": {"byteIndex": 29, "bitMask": 8, "room": 0x990d, "area": "Crateria"},
"Reserve Tank, Brinstar": {"byteIndex": 21, "bitMask": 4, "room": 0x9c07, "area": "Brinstar"},
"Charge Beam": {"byteIndex": 50, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Morphing Ball": {"byteIndex": 47, "bitMask": 64, "room": 0x9e9f, "area": "Brinstar"},
"Energy Tank, Brinstar Ceiling": {"byteIndex": 47, "bitMask": 1, "room": 0x9f64, "area": "Brinstar"},
"Energy Tank, Etecoons": {"byteIndex": 44, "bitMask": 2, "room": 0xa011, "area": "Brinstar"},
"Energy Tank, Waterway": {"byteIndex": 57, "bitMask": 128, "room": 0xa0d2, "area": "Brinstar"},
"Energy Tank, Brinstar Gate": {"byteIndex": 38, "bitMask": 4, "room": 0xa15b, "area": "Brinstar"},
"X-Ray Scope": {"byteIndex": 66, "bitMask": 1, "room": 0xa2ce, "area": "Brinstar"},
"Spazer": {"byteIndex": 200, "bitMask": 2, "room": 0xa447, "area": "Brinstar"},
"Energy Tank, Kraid": {"byteIndex": 209, "bitMask": 16, "room": 0xa4b1, "area": "Brinstar"},
"Varia Suit": {"byteIndex": 211, "bitMask": 64, "room": 0xa6e2, "area": "Brinstar"},
"Ice Beam": {"byteIndex": 12, "bitMask": 4, "room": 0xa890, "area": "Norfair"},
"Energy Tank, Crocomire": {"byteIndex": 46, "bitMask": 16, "room": 0xa98d, "area": "Norfair"},
"Hi-Jump Boots": {"byteIndex": 28, "bitMask": 1, "room": 0xa9e5, "area": "Norfair"},
"Grapple Beam": {"byteIndex": 68, "bitMask": 16, "room": 0xac2b, "area": "Norfair"},
"Reserve Tank, Norfair": {"byteIndex": 14, "bitMask": 32, "room": 0xac5a, "area": "Norfair"},
"Speed Booster": {"byteIndex": 140, "bitMask": 4, "room": 0xad1b, "area": "Norfair"},
"Wave Beam": {"byteIndex": 23, "bitMask": 4, "room": 0xadde, "area": "Norfair"},
"Energy Tank, Ridley": {"byteIndex": 74, "bitMask": 2, "room": 0xb698, "area": "Norfair"},
"Screw Attack": {"byteIndex": 70, "bitMask": 8, "room": 0xb6c1, "area": "Norfair"},
"Energy Tank, Firefleas": {"byteIndex": 176, "bitMask": 4, "room": 0xb6ee, "area": "Norfair"},
"Reserve Tank, Wrecked Ship": {"byteIndex": 49, "bitMask": 1, "room": 0xc98e, "area": "WreckedShip"},
"Energy Tank, Wrecked Ship": {"byteIndex": 58, "bitMask": 32, "room": 0xcc27, "area": "WreckedShip"},
"Right Super, Wrecked Ship": {"byteIndex": 74, "bitMask": 4, "room": 0xcdf1, "area": "WreckedShip"},
"Gravity Suit": {"byteIndex": 57, "bitMask": 32, "room": 0xce40, "area": "WreckedShip"},
"Energy Tank, Mama turtle": {"byteIndex": 54, "bitMask": 16, "room": 0xd055, "area": "Maridia"},
"Plasma Beam": {"byteIndex": 15, "bitMask": 8, "room": 0xd2aa, "area": "Maridia"},
"Reserve Tank, Maridia": {"byteIndex": 62, "bitMask": 8, "room": 0xd4ef, "area": "Maridia"},
"Spring Ball": {"byteIndex": 196, "bitMask": 64, "room": 0xd6d0, "area": "Maridia"},
"Energy Tank, Botwoon": {"byteIndex": 39, "bitMask": 4, "room": 0xd7e4, "area": "Maridia"},
"Space Jump": {"byteIndex": 172, "bitMask": 2, "room": 0xd9aa, "area": "Maridia"},
"Power Bomb (Crateria surface)": {"byteIndex": 136, "bitMask": 64, "room": 0x93aa, "area": "Crateria"},
"Missile (outside Wrecked Ship bottom)": {"byteIndex": 152, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Missile (outside Wrecked Ship top)": {"byteIndex": 132, "bitMask": 1, "room": 0x93fe, "area": "Crateria"},
"Missile (outside Wrecked Ship middle)": {"byteIndex": 140, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Missile (Crateria moat)": {"byteIndex": 148, "bitMask": 8, "room": 0x95ff, "area": "Crateria"},
"Missile (Crateria bottom)": {"byteIndex": 78, "bitMask": 8, "room": 0x975c, "area": "Crateria"},
"Missile (Crateria gauntlet right)": {"byteIndex": 17, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Missile (Crateria gauntlet left)": {"byteIndex": 17, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Super Missile (Crateria)": {"byteIndex": 43, "bitMask": 128, "room": 0x99f9, "area": "Crateria"},
"Missile (Crateria middle)": {"byteIndex": 34, "bitMask": 128, "room": 0x9a90, "area": "Crateria"},
"Power Bomb (green Brinstar bottom)": {"byteIndex": 33, "bitMask": 8, "room": 0x9ad9, "area": "Brinstar"},
"Super Missile (pink Brinstar)": {"byteIndex": 43, "bitMask": 128, "room": 0x9b5b, "area": "Brinstar"},
"Missile (green Brinstar below super missile)": {"byteIndex": 21, "bitMask": 16, "room": 0x9bc8, "area": "Brinstar"},
"Super Missile (green Brinstar top)": {"byteIndex": 17, "bitMask": 32, "room": 0x9bc8, "area": "Brinstar"},
"Missile (green Brinstar behind missile)": {"byteIndex": 21, "bitMask": 2, "room": 0x9c07, "area": "Brinstar"},
"Missile (green Brinstar behind reserve tank)": {"byteIndex": 21, "bitMask": 2, "room": 0x9c07, "area": "Brinstar"},
"Missile (pink Brinstar top)": {"byteIndex": 34, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Missile (pink Brinstar bottom)": {"byteIndex": 46, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Power Bomb (pink Brinstar)": {"byteIndex": 37, "bitMask": 1, "room": 0x9e11, "area": "Brinstar"},
"Missile (green Brinstar pipe)": {"byteIndex": 50, "bitMask": 2, "room": 0x9e52, "area": "Brinstar"},
"Power Bomb (blue Brinstar)": {"byteIndex": 46, "bitMask": 1, "room": 0x9e9f, "area": "Brinstar"},
"Missile (blue Brinstar middle)": {"byteIndex": 172, "bitMask": 128, "room": 0x9f64, "area": "Brinstar"},
"Super Missile (green Brinstar bottom)": {"byteIndex": 44, "bitMask": 4, "room": 0xa051, "area": "Brinstar"},
"Missile (blue Brinstar bottom)": {"byteIndex": 51, "bitMask": 8, "room": 0xa107, "area": "Brinstar"},
"Missile (blue Brinstar top)": {"byteIndex": 39, "bitMask": 4, "room": 0xa1d8, "area": "Brinstar"},
"Missile (blue Brinstar behind missile)": {"byteIndex": 39, "bitMask": 4, "room": 0xa1d8, "area": "Brinstar"},
"Power Bomb (red Brinstar sidehopper room)": {"byteIndex": 164, "bitMask": 16, "room": 0xa37c, "area": "Brinstar"},
"Power Bomb (red Brinstar spike room)": {"byteIndex": 176, "bitMask": 16, "room": 0xa3ae, "area": "Brinstar"},
"Missile (red Brinstar spike room)": {"byteIndex": 176, "bitMask": 32, "room": 0xa3ae, "area": "Brinstar"},
"Missile (Kraid)": {"byteIndex": 205, "bitMask": 1, "room": 0xa4da, "area": "Brinstar"},
"Missile (lava room)": {"byteIndex": 22, "bitMask": 128, "room": 0xa788, "area": "Norfair"},
"Missile (below Ice Beam)": {"byteIndex": 20, "bitMask": 32, "room": 0xa8f8, "area": "Norfair"},
"Missile (above Crocomire)": {"byteIndex": 29, "bitMask": 16, "room": 0xaa0e, "area": "Norfair"},
"Missile (Hi-Jump Boots)": {"byteIndex": 25, "bitMask": 128, "room": 0xaa41, "area": "Norfair"},
"Energy Tank (Hi-Jump Boots)": {"byteIndex": 25, "bitMask": 64, "room": 0xaa41, "area": "Norfair"},
"Power Bomb (Crocomire)": {"byteIndex": 45, "bitMask": 64, "room": 0xaade, "area": "Norfair"},
"Missile (below Crocomire)": {"byteIndex": 65, "bitMask": 2, "room": 0xab3b, "area": "Norfair"},
"Missile (Grapple Beam)": {"byteIndex": 65, "bitMask": 128, "room": 0xab8f, "area": "Norfair"},
"Missile (Norfair Reserve Tank)": {"byteIndex": 14, "bitMask": 32, "room": 0xac5a, "area": "Norfair"},
"Missile (bubble Norfair green door)": {"byteIndex": 14, "bitMask": 4, "room": 0xac83, "area": "Norfair"},
"Missile (bubble Norfair)": {"byteIndex": 26, "bitMask": 1, "room": 0xacb3, "area": "Norfair"},
"Missile (Speed Booster)": {"byteIndex": 140, "bitMask": 8, "room": 0xacf0, "area": "Norfair"},
"Missile (Wave Beam)": {"byteIndex": 23, "bitMask": 32, "room": 0xadad, "area": "Norfair"},
"Missile (Gold Torizo)": {"byteIndex": 66, "bitMask": 32, "room": 0xb283, "area": "Norfair"},
"Super Missile (Gold Torizo)": {"byteIndex": 66, "bitMask": 16, "room": 0xb283, "area": "Norfair"},
"Missile (Mickey Mouse room)": {"byteIndex": 47, "bitMask": 8, "room": 0xb40a, "area": "Norfair"},
"Missile (lower Norfair above fire flea room)": {"byteIndex": 152, "bitMask": 16, "room": 0xb510, "area": "Norfair"},
"Power Bomb (lower Norfair above fire flea room)": {"byteIndex": 156, "bitMask": 4, "room": 0xb55a, "area": "Norfair"},
"Power Bomb (Power Bombs of shame)": {"byteIndex": 188, "bitMask": 128, "room": 0xb5d5, "area": "Norfair"},
"Missile (lower Norfair near Wave Beam)": {"byteIndex": 27, "bitMask": 4, "room": 0xb656, "area": "Norfair"},
"Missile (Wrecked Ship middle)": {"byteIndex": 69, "bitMask": 8, "room": 0xcaf6, "area": "WreckedShip"},
"Missile (Gravity Suit)": {"byteIndex": 57, "bitMask": 4, "room": 0xc98e, "area": "WreckedShip"},
"Missile (Wrecked Ship top)": {"byteIndex": 46, "bitMask": 4, "room": 0xcaae, "area": "WreckedShip"},
"Super Missile (Wrecked Ship left)": {"byteIndex": 73, "bitMask": 1, "room": 0xcda8, "area": "WreckedShip"},
"Missile (green Maridia shinespark)": {"byteIndex": 53, "bitMask": 32, "room": 0xcfc9, "area": "Maridia"},
"Super Missile (green Maridia)": {"byteIndex": 49, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
"Missile (green Maridia tatori)": {"byteIndex": 58, "bitMask": 16, "room": 0xd055, "area": "Maridia"},
# TODO::check these two if they are not swapped on the map ?
"Super Missile (yellow Maridia)": {"byteIndex": 29, "bitMask": 8, "room": 0xd13b, "area": "Maridia"},
"Missile (yellow Maridia super missile)": {"byteIndex": 29, "bitMask": 8, "room": 0xd13b, "area": "Maridia"},
"Missile (yellow Maridia false wall)": {"byteIndex": 30, "bitMask": 8, "room": 0xd1dd, "area": "Maridia"},
"Missile (left Maridia sand pit room)": {"byteIndex": 62, "bitMask": 8, "room": 0xd4ef, "area": "Maridia"},
"Missile (right Maridia sand pit room)": {"byteIndex": 62, "bitMask": 1, "room": 0xd51e, "area": "Maridia"},
"Power Bomb (right Maridia sand pit room)": {"byteIndex": 67, "bitMask": 128, "room": 0xd51e, "area": "Maridia"},
"Missile (pink Maridia)": {"byteIndex": 43, "bitMask": 128, "room": 0xd5a7, "area": "Maridia"},
"Super Missile (pink Maridia)": {"byteIndex": 43, "bitMask": 64, "room": 0xd5a7, "area": "Maridia"},
"Missile (Draygon)": {"byteIndex": 161, "bitMask": 32, "room": 0xd78f, "area": "Maridia"}
}
doorsScreen = {
# crateria
'LandingSiteRight': {"byteIndex": 23, "bitMask": 1, "room": 0x91f8, "area": "Crateria"},
'LandingSiteTopRight': {"byteIndex": 11, "bitMask": 1, "room": 0x91f8, "area": "Crateria"},
'KihunterBottom': {"byteIndex": 156, "bitMask": 32, "room": 0x948c, "area": "Crateria"},
'KihunterRight': {"byteIndex": 148, "bitMask": 16, "room": 0x948c, "area": "Crateria"},
'FlywayRight': {"byteIndex": 31, "bitMask": 128, "room": 0x9879, "area": "Crateria"},
'GreenPiratesShaftBottomRight': {"byteIndex": 37, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
'RedBrinstarElevatorTop': {"byteIndex": 160, "bitMask": 32, "room": 0x962a, "area": "Crateria"},
'ClimbRight': {"byteIndex": 70, "bitMask": 8, "room": 0x96ba, "area": "Crateria"},
# blue brinstar
'ConstructionZoneRight': {"byteIndex": 47, "bitMask": 4, "room": 0x9f11, "area": "Brinstar"},
# green brinstar
'GreenHillZoneTopRight': {"byteIndex": 46, "bitMask": 8, "room": 0x9e52, "area": "Brinstar"},
'NoobBridgeRight': {"byteIndex": 184, "bitMask": 128, "room": 0x9fba, "area": "Brinstar"},
'MainShaftRight': {"byteIndex": 21, "bitMask": 64, "room": 0x9ad9, "area": "Brinstar"},
'MainShaftBottomRight': {"byteIndex": 29, "bitMask": 64, "room": 0x9ad9, "area": "Brinstar"},
'EarlySupersRight': {"byteIndex": 21, "bitMask": 8, "room": 0x9bc8, "area": "Brinstar"},
'EtecoonEnergyTankLeft': {"byteIndex": 44, "bitMask": 2, "room": 0xa011, "area": "Brinstar"},
# pink brinstar
'BigPinkTopRight': {"byteIndex": 22, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkRight': {"byteIndex": 38, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkBottomRight': {"byteIndex": 46, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkBottomLeft': {"byteIndex": 57, "bitMask": 1, "room": 0x9d19, "area": "Brinstar"},
# red brinstar
'RedTowerLeft': {"byteIndex": 192, "bitMask": 64, "room": 0xa253, "area": "Brinstar"},
'RedBrinstarFirefleaLeft': {"byteIndex": 67, "bitMask": 64, "room": 0xa293, "area": "Brinstar"},
'RedTowerElevatorTopLeft': {"byteIndex": 160, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'RedTowerElevatorLeft': {"byteIndex": 168, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'RedTowerElevatorBottomLeft': {"byteIndex": 176, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'BelowSpazerTopRight': {"byteIndex": 200, "bitMask": 4, "room": 0xa408, "area": "Brinstar"},
# Wrecked ship
'WestOceanRight': {"byteIndex": 149, "bitMask": 4, "room": 0x93fe, "area": "Crateria"},
'LeCoudeBottom': {"byteIndex": 170, "bitMask": 8, "room": 0x95a8, "area": "Crateria"},
'WreckedShipMainShaftBottom': {"byteIndex": 78, "bitMask": 128, "room": 0xcaf6, "area": "WreckedShip"},
'ElectricDeathRoomTopLeft': {"byteIndex": 58, "bitMask": 4, "room": 0xcbd5, "area": "WreckedShip"},
# Upper Norfair
'BusinessCenterTopLeft': {"byteIndex": 17, "bitMask": 32, "room": 0xa7de, "area": "Norfair"},
'BusinessCenterBottomLeft': {"byteIndex": 25, "bitMask": 32, "room": 0xa7de, "area": "Norfair"},
'CathedralEntranceRight': {"byteIndex": 17, "bitMask": 4, "room": 0xa7b3, "area": "Norfair"},
'CathedralRight': {"byteIndex": 22, "bitMask": 128, "room": 0xa788, "area": "Norfair"},
'BubbleMountainTopRight': {"byteIndex": 14, "bitMask": 1, "room": 0xacb3, "area": "Norfair"},
'BubbleMountainTopLeft': {"byteIndex": 14, "bitMask": 2, "room": 0xacb3, "area": "Norfair"},
'SpeedBoosterHallRight': {"byteIndex": 140, "bitMask": 8, "room": 0xacf0, "area": "Norfair"},
'SingleChamberRight': {"byteIndex": 23, "bitMask": 128, "room": 0xad5e, "area": "Norfair"},
'DoubleChamberRight': {"byteIndex": 23, "bitMask": 8, "room": 0xadad, "area": "Norfair"},
'KronicBoostBottomLeft': {"byteIndex": 47, "bitMask": 32, "room": 0xae74, "area": "Norfair"},
'CrocomireSpeedwayBottom': {"byteIndex": 41, "bitMask": 1, "room": 0xa923, "area": "Norfair"},
# Crocomire
'PostCrocomireUpperLeft': {"byteIndex": 45, "bitMask": 32, "room": 0xaa82, "area": "Norfair"},
'PostCrocomireShaftRight': {"byteIndex": 65, "bitMask": 32, "room": 0xab07, "area": "Norfair"},
# Lower Norfair
'RedKihunterShaftBottom': {"byteIndex": 184, "bitMask": 4, "room": 0xb585, "area": "Norfair"},
'WastelandLeft': {"byteIndex": 196, "bitMask": 64, "room": 0xb5d5, "area": "Norfair"},
# Maridia
'MainStreetBottomRight': {"byteIndex": 69, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
'FishTankRight': {"byteIndex": 66, "bitMask": 128, "room": 0xd017, "area": "Maridia"},
'CrabShaftRight': {"byteIndex": 46, "bitMask": 16, "room": 0xd1a3, "area": "Maridia"},
'ColosseumBottomRight': {"byteIndex": 161, "bitMask": 128, "room": 0xd72a, "area": "Maridia"},
'PlasmaSparkBottom': {"byteIndex": 22, "bitMask": 2, "room": 0xd340, "area": "Maridia"},
'OasisTop': {"byteIndex": 66, "bitMask": 2, "room": 0xd48e, "area": "Maridia"}
}
mapOffsetEnum = {
"Crateria": 0,
"Brinstar": 0x100,
"Norfair": 0x200,
"WreckedShip": 0x300,
"Maridia": 0x400,
"Tourian": 0x500
}
def importDump(self, shmName):
from utils.shm import SHM
shm = SHM(shmName)
dumpData = shm.readMsgJson()
shm.finish(False)
# first update current access point
self.lastAP = dumpData["newAP"]
dataEnum = {
"state": '1',
"map": '2',
"curMap": '3',
"samus": '4',
"items": '5',
"boss": '6',
"events": '7'
}
currentState = dumpData["currentState"]
self.locDelta = 0
for dataType, offset in dumpData["stateDataOffsets"].items():
if dataType == dataEnum["items"]:
# get item data, loop on all locations to check if they have been visited
for loc in self.locations:
# loc id is used to index in the items data, boss locations don't have an Id.
# for scav hunt ridley loc now have an id, so also check if loc is a boss loc.
if loc.Id is None or loc.isBoss():
continue
# nothing locs are handled later
if loc.itemName == 'Nothing':
continue
byteIndex = loc.Id >> 3
bitMask = 0x01 << (loc.Id & 7)
if currentState[offset + byteIndex] & bitMask != 0:
if loc not in self.visitedLocations:
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
if loc in self.visitedLocations:
self.removeItemAt(self.locNameInternal2Web(loc.Name))
elif dataType == dataEnum["boss"]:
for boss, bossData in self.bossBitMasks.items():
byteIndex = bossData["byteIndex"]
bitMask = bossData["bitMask"]
loc = self.getLoc(boss)
if currentState[offset + byteIndex] & bitMask != 0:
# in tourian disabled mother brain is not available, but it gets auto killed during escape
if loc not in self.visitedLocations and loc in self.majorLocations:
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
if loc in self.visitedLocations:
self.removeItemAt(self.locNameInternal2Web(loc.Name))
elif dataType == dataEnum["map"]:
if self.areaRando or self.bossRando or self.escapeRando:
availAPs = set()
for apName, apData in self.areaAccessPoints.items():
if self.isElemAvailable(currentState, offset, apData):
availAPs.add(apName)
for apName, apData in self.bossAccessPoints.items():
if self.isElemAvailable(currentState, offset, apData):
availAPs.add(apName)
for apName, apData in self.escapeAccessPoints.items():
if self.isElemAvailable(currentState, offset, apData):
availAPs.add(apName)
# static transitions
if self.areaRando == True and self.bossRando == True:
staticTransitions = []
possibleTransitions = self.bossTransitions + self.areaTransitions
elif self.areaRando == True:
staticTransitions = self.bossTransitions[:]
possibleTransitions = self.areaTransitions[:]
elif self.bossRando == True:
staticTransitions = self.areaTransitions[:]
possibleTransitions = self.bossTransitions[:]
else:
staticTransitions = self.bossTransitions + self.areaTransitions
possibleTransitions = []
if self.escapeRando == False:
staticTransitions += self.escapeTransition
else:
possibleTransitions += self.escapeTransition
# remove static transitions from current transitions
dynamicTransitions = self.curGraphTransitions[:]
for transition in self.curGraphTransitions:
if transition in staticTransitions:
dynamicTransitions.remove(transition)
# remove dynamic transitions not visited
for transition in dynamicTransitions:
if transition[0] not in availAPs and transition[1] not in availAPs:
self.curGraphTransitions.remove(transition)
# for fast check of current transitions
fastTransCheck = {}
for transition in self.curGraphTransitions:
fastTransCheck[transition[0]] = transition[1]
fastTransCheck[transition[1]] = transition[0]
# add new transitions
for transition in possibleTransitions:
start = transition[0]
end = transition[1]
# available transition
if start in availAPs and end in availAPs:
# transition not already in current transitions
if start not in fastTransCheck and end not in fastTransCheck:
self.curGraphTransitions.append(transition)
if self.hasNothing:
# get locs with nothing
locsNothing = [loc for loc in self.locations if loc.itemName == 'Nothing']
for loc in locsNothing:
locData = self.nothingScreens[loc.Name]
if self.isElemAvailable(currentState, offset, locData):
# nothing has been seen, check if loc is already visited
if not loc in self.visitedLocations:
# visit it
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
# nothing not yet seed, check if loc is already visited
if loc in self.visitedLocations:
# unvisit it
self.removeItemAt(self.locNameInternal2Web(loc.Name))
if self.doorsRando:
# get currently hidden / revealed doors names in sets
(hiddenDoors, revealedDoor) = DoorsManager.getDoorsState()
for doorName in hiddenDoors:
# check if door is still hidden
doorData = self.doorsScreen[doorName]
if self.isElemAvailable(currentState, offset, doorData):
DoorsManager.switchVisibility(doorName)
for doorName in revealedDoor:
# check if door is still visible
doorData = self.doorsScreen[doorName]
if not self.isElemAvailable(currentState, offset, doorData):
DoorsManager.switchVisibility(doorName)
elif dataType == dataEnum["events"]:
self.newlyCompletedObjectives = []
goalsList = self.objectives.getGoalsList()
goalsCompleted = self.objectives.getState()
goalsCompleted = list(goalsCompleted.values())
for i, (event, eventData) in enumerate(self.eventsBitMasks.items()):
assert str(i) == event, "{}th event has code {} instead of {}".format(i, event, i)
if i >= len(goalsList):
continue
byteIndex = eventData["byteIndex"]
bitMask = eventData["bitMask"]
goalName = goalsList[i]
goalCompleted = goalsCompleted[i]
if currentState[offset + byteIndex] & bitMask != 0:
# set goal completed
if not goalCompleted:
self.objectives.setGoalCompleted(goalName, True)
self.newlyCompletedObjectives.append("Completed objective: {}".format(goalName))
else:
# set goal uncompleted
if goalCompleted:
self.objectives.setGoalCompleted(goalName, False)
def isElemAvailable(self, currentState, offset, apData):
byteIndex = apData["byteIndex"]
bitMask = apData["bitMask"]
return currentState[offset + byteIndex + self.mapOffsetEnum[apData["area"]]] & bitMask != 0
| {
"content_hash": "0518e6952478c9ad079c76dba9a00914",
"timestamp": "",
"source": "github",
"line_count": 1180,
"max_line_length": 251,
"avg_line_length": 51.78220338983051,
"alnum_prop": 0.5738834427114872,
"repo_name": "theonlydude/RandomMetroidSolver",
"id": "c6d6cacc94f3961038d597e57d0d7166c43375d0",
"size": "61103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solver/interactiveSolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "323310"
},
{
"name": "CSS",
"bytes": "70414"
},
{
"name": "Dockerfile",
"bytes": "3915"
},
{
"name": "HTML",
"bytes": "916237"
},
{
"name": "JavaScript",
"bytes": "975366"
},
{
"name": "Lua",
"bytes": "2247"
},
{
"name": "Makefile",
"bytes": "3217"
},
{
"name": "Perl",
"bytes": "1680"
},
{
"name": "Python",
"bytes": "2240472"
},
{
"name": "Shell",
"bytes": "227851"
}
],
"symlink_target": ""
} |
import constants_v2 as const
import netaddr
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper \
import BigIPResourceHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper \
import ResourceType
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import get_filter
from requests import HTTPError
LOG = logging.getLogger(__name__)
class BigipSelfIpManager(object):
def __init__(self, driver, l2_service, l3_binding):
self.driver = driver
self.l2_service = l2_service
self.l3_binding = l3_binding
self.selfip_manager = BigIPResourceHelper(ResourceType.selfip)
self.network_helper = NetworkHelper()
def _create_bigip_selfip(self, bigip, model):
created = False
if self.selfip_manager.exists(bigip, name=model['name'],
partition=model['partition']):
created = True
else:
try:
self.selfip_manager.create(bigip, model)
created = True
except HTTPError as err:
if (err.response.status_code == 400 and
err.response.text.find(
"must be one of the vlans "
"in the associated route domain") > 0):
try:
self.network_helper.add_vlan_to_domain(
bigip,
name=model['vlan'],
partition=model['partition'])
self.selfip_manager.create(bigip, model)
created = True
except HTTPError as err:
LOG.exception("Error creating selfip %s. "
"Repsponse status code: %s. "
"Response message: %s." % (
model["name"],
err.response.status_code,
err.message))
raise f5_ex.SelfIPCreationException("selfip")
else:
LOG.exception("selfip creation error: %s(%s)" %
(err.message, err.response.status_code))
raise
except Exception as err:
LOG.error("Failed to create selfip")
LOG.exception(err.message)
raise f5_ex.SelfIPCreationException("selfip creation")
return created
def assure_bigip_selfip(self, bigip, service, subnetinfo):
u"""Ensure the BigIP has a selfip address on the tenant subnet."""
network = None
subnet = None
if 'network' in subnetinfo:
network = subnetinfo['network']
if 'subnet' in subnetinfo:
subnet = subnetinfo['subnet']
if not network or not subnet:
LOG.error('Attempted to create selfip and snats '
'for network with not id...')
raise KeyError("network and subnet need to be specified")
tenant_id = service['loadbalancer']['tenant_id']
lb_id = service['loadbalancer']['id']
# If we have already assured this subnet.. return.
# Note this cache is periodically cleared in order to
# force assurance that the configuration is present.
if tenant_id in bigip.assured_tenant_snat_subnets and \
subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]:
return True
selfip_address = self._get_bigip_selfip_address(bigip, subnet, lb_id)
if 'route_domain_id' not in network:
LOG.error("network route domain is not set")
raise KeyError()
selfip_address += '%' + str(network['route_domain_id'])
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.driver.service_adapter.\
get_folder_name(service['loadbalancer']['tenant_id'])
# Get the name of the vlan.
(network_name, preserve_network_name) = \
self.l2_service.get_network_name(bigip, network)
netmask = netaddr.IPNetwork(subnet['cidr']).prefixlen
address = selfip_address + ("/%d" % netmask)
model = {
"name": "local-" + bigip.device_name + "-" + subnet['id'],
"address": address,
"vlan": network_name,
"floating": "disabled",
"partition": network_folder
}
self._create_bigip_selfip(bigip, model)
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=selfip_address)
def _get_bigip_selfip_address(self, bigip, subnet, device_id):
u"""Ensure a selfip address is allocated on Neutron network."""
# Get ip address for selfip to use on BIG-IP.
if self.driver.conf.unlegacy_setting_placeholder:
LOG.debug('setting vnic_type to normal instead of baremetal')
vnic_type = "normal"
else:
vnic_type = "baremetal"
selfip_address = ""
selfip_name = "local-" + bigip.device_name + "-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=selfip_name)
if len(ports) > 0:
port = ports[0]
else:
port = self.driver.plugin_rpc.create_port_on_subnet(
subnet_id=subnet['id'],
mac_address=None,
name=selfip_name,
fixed_address_count=1,
device_id=device_id,
vnic_type=vnic_type
)
if port and 'fixed_ips' in port:
fixed_ip = port['fixed_ips'][0]
selfip_address = fixed_ip['ip_address']
return selfip_address
def assure_gateway_on_subnet(self, bigip, subnetinfo, traffic_group):
"""Ensure """
network = None
subnet = None
if 'network' in subnetinfo:
network = subnetinfo['network']
if 'subnet' in subnetinfo:
subnet = subnetinfo['subnet']
if not network or not subnet:
raise KeyError("network and subnet must be specified to create "
"gateway on subnet.")
if not subnet['gateway_ip']:
raise KeyError("attempting to create gateway on subnet without "
"gateway ip address specified.")
if subnet['id'] in bigip.assured_gateway_subnets:
return True
(network_name, preserve_network_name) = \
self.l2_service.get_network_name(bigip, network)
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.driver.service_adapter.\
get_folder_name(subnet['tenant_id'])
# Create a floating SelfIP for the given traffic-group.
floating_selfip_name = "gw-" + subnet['id']
netmask = netaddr.IPNetwork(subnet['cidr']).prefixlen
address = subnet['gateway_ip'] + "%" + str(network['route_domain_id'])
address += ("/%d" % (netmask))
model = {
'name': floating_selfip_name,
'address': address,
'vlan': network_name,
'floating': True,
'traffic-group': traffic_group,
'partition': network_folder
}
if not self._create_bigip_selfip(bigip, model):
LOG.error("failed to create gateway selfip")
if self.l3_binding:
self.l3_binding.bind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
# Setup a wild card ip forwarding virtual service for this subnet
gw_name = "gw-" + subnet['id']
vs = bigip.tm.ltm.virtuals.virtual
if not vs.exists(name=gw_name, partition=network_folder):
try:
vs.create(
name=gw_name,
partition=network_folder,
destination='0.0.0.0:0',
mask='0.0.0.0',
vlansEnabled=True,
vlans=[network_name],
sourceAddressTranslation={'type': 'automap'},
ipForward=True
)
except Exception as err:
LOG.exception(err)
raise f5_ex.VirtualServerCreationException(
"Failed to create gateway virtual service on subnet %s",
subnet['id']
)
# Put the virtual server address in the specified traffic group
virtual_address = bigip.tm.ltm.virtual_address_s.virtual_address
try:
obj = virtual_address.load(
name='0.0.0.0', partition=network_folder)
obj.modify(trafficGroup=traffic_group)
except Exception as err:
LOG.exception(err)
raise f5_ex.VirtualServerCreationException(
"Failed to add virtual address to traffic group %s",
traffic_group)
bigip.assured_gateway_subnets.append(subnet['id'])
def delete_gateway_on_subnet(self, bigip, subnetinfo):
network = None
subnet = None
if 'network' in subnetinfo:
network = subnetinfo['network']
if 'subnet' in subnetinfo:
subnet = subnetinfo['subnet']
if not network or not subnet:
LOG.error('Attempted to create selfip and snats '
'for network with no id...')
raise KeyError("network and subnet must be specified")
if not subnet['gateway_ip']:
raise KeyError("attempting to create gateway on subnet without "
"gateway ip address specified.")
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.driver.service_adapter.\
get_folder_name(subnet['tenant_id'])
if self.driver.conf.f5_populate_static_arp:
self.network_helper.arp_delete_by_subnet(
bigip,
partition=network_folder,
subnet=subnetinfo['subnet']['cidr'],
mask=None
)
floating_selfip_name = "gw-" + subnet['id']
self.delete_selfip(
bigip, floating_selfip_name, network_folder)
if self.l3_binding:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=subnet['gateway_ip'])
gw_name = "gw-" + subnet['id']
vs = bigip.tm.ltm.virtuals.virtual
try:
if vs.exists(name=gw_name, partition=network_folder):
obj = vs.load(name=gw_name, partition=network_folder)
obj.delete()
except Exception as err:
LOG.exception(err)
raise f5_ex.VirtualServerDeleteException(
"Failed to delete gateway service on subnet %s", subnet['id'])
if subnet['id'] in bigip.assured_gateway_subnets:
bigip.assured_gateway_subnets.remove(subnet['id'])
return gw_name
def get_selfip_addr(self, bigip, name, partition=const.DEFAULT_PARTITION):
selfip_addr = ""
try:
s = bigip.tm.net.selfips.selfip
if s.exists(name=name, partition=partition):
obj = s.load(name=name, partition=partition)
# The selfip address on BigIP is actually a network,
# parse out the address portion.
if obj.address:
(selfip_addr, netbits) = obj.address.split("/")
except HTTPError as err:
LOG.exception("Error getting selfip address for %s. "
"Repsponse status code: %s. Response "
"message: %s." % (name,
err.response.status_code,
err.message))
except Exception as err:
LOG.exception("Error getting selfip address for %s.",
name)
return selfip_addr
def get_selfips(self, bigip, partition=const.DEFAULT_PARTITION,
vlan_name=None):
selfips_list = []
if vlan_name:
if not vlan_name.startswith('/'):
vlan_name = "/%s/%s" % (partition, vlan_name)
params = {'params': get_filter(bigip, 'partition', 'eq', partition)}
try:
selfips_list = [selfip for selfip in
bigip.tm.net.selfips.get_collection(
requests_params=params
)
if vlan_name == selfip.vlan or not vlan_name]
except HTTPError as err:
LOG.exception("Error getting selfips for vlan(%s). "
"Response status code: %s. "
"Response message: %s." % (
vlan_name,
err.response.status_code,
err.message))
raise f5_ex.SelfIPQueryException(
"Failed to get selfips assigned to vlan")
return selfips_list
def delete_selfip(self, bigip, name, partition=const.DEFAULT_PARTITION):
"""Delete the selfip if it exists."""
try:
s = bigip.tm.net.selfips.selfip
if s.exists(name=name, partition=partition):
obj = s.load(name=name, partition=partition)
obj.delete()
except HTTPError as err:
LOG.exception("Error deleting selfip %s. "
"Response status code: %s. Response "
"message: %s." % (name,
err.response.status_code,
err.message))
raise f5_ex.SelfIPDeleteException(
"Failed to delete selfip %s." % name)
except Exception as err:
raise f5_ex.SelfIPDeleteException(
"Failed to delete selfip %s." % name)
| {
"content_hash": "767a4c1929f8e31e4c72947059363174",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 78,
"avg_line_length": 39.46739130434783,
"alnum_prop": 0.5309143486642798,
"repo_name": "F5Networks/f5-openstack-agent",
"id": "0af6185879017fe984b6b632386d06bf33a06722",
"size": "15132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2220"
},
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1395055"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
} |
Subsets and Splits