gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 DGB from 0 to 1 and 60 DGB from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('30') - fee_node_1) # change from node 1's send
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('30') - fee_node_1)
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('0')) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('0'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('0'))
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
|
|
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from django.utils.functional import cached_property
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
return result[1]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return cursor._last_executed.decode('utf-8')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# Truncate already resets the AUTO_INCREMENT field from
# MySQL version 5.0.13 onwards. Refs #16961.
if self.connection.mysql_version < (5, 0, 13):
return ["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences]
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return six.text_type(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.close()
return False
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
cursor = self.connection.cursor()
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
cursor.close()
def _cursor(self):
if not self._valid_connection():
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
@cached_property
def mysql_version(self):
if not self.server_version:
new_connection = False
if not self._valid_connection():
# Ensure we have a connection with the DB by using a temporary
# cursor
new_connection = True
self.cursor().close()
server_info = self.connection.get_server_info()
if new_connection:
# Make sure we close the connection
self.close()
m = server_version_re.match(server_info)
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
self.cursor().execute('SET foreign_key_checks=1')
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
|
|
import socket
import string
from driver import driver
class AirHidDriver(driver):
SHIFT = "_SHIFT"
ALT = "_ALT"
CTRL = "_CTRL"
WIN = "_WIN"
VK_CAPS_LOCK = 0x14
VK_ESCAPE = 0x1B
VK_PAGE_UP = 0x21
VK_PAGE_DOWN = 0x22
VK_END = 0x23
VK_HOME = 0x24
VK_LEFT = 0x25
VK_UP = 0x26
VK_RIGHT = 0x27
VK_DOWN = 0x28
VK_BACK_SPACE = ord('\b')
VK_F1 = 0x70
VK_F2 = 0x71
VK_F3 = 0x72
VK_F4 = 0x73
VK_F5 = 0x74
VK_F6 = 0x75
VK_F7 = 0x76
VK_F8 = 0x77
VK_F9 = 0x78
VK_F10 = 0x79
VK_F11 = 0x7A
VK_F12 = 0x7B
VK_ENTER = ord('\n')
VK_TAB = ord('\t')
VK_SPACE = 0x20
VK_COMMA = 0x2C
VK_MINUS = 0x2D
VK_PERIOD = 0x2E
VK_SLASH = 0x2F
VK_SEMICOLON = 0x3B
VK_EQUALS = 0x3D
VK_OPEN_BRACKET = 0x5B
VK_BACK_SLASH = 0x5C
VK_CLOSE_BRACKET = 0x5D
VK_MULTIPLY = 0x6A
VK_ADD = 0x6B
VK_QUOTE = 0xDE
VK_AMPERSAND = 0x96
VK_LESS = 0x99
VK_GREATER = 0xA0
VK_AT = 0x0200
VK_COLON = 0x0201
VK_CIRCUMFLEX = 0x0202
VK_DOLLAR = 0x0203
VK_EXCLAMATION_MARK = 0x0205
VK_LEFT_PARENTHESIS = 0x0207
VK_NUMBER_SIGN = 0x0208
VK_RIGHT_PARENTHESIS = 0x020A
VK_UNDERSCORE = 0x020B
VK_QUOTE = 0xDE
# translation table, because AirHID's codes sometimes differ from Java's
TRANSLATION_TABLE = {
10: 13,
154: 42,
155: 45,
127: 46,
156: 47,
61440: 124,
61441: 125,
61442: 126,
61443: 127,
513: 186,
59: 187,
44: 188,
45: 189,
46: 190,
47: 191,
512: 192,
91: 219,
92: 220,
93: 221
}
ACTION_KEYS = {
'TAB': VK_TAB,
'ENTER': VK_ENTER,
'CAPS_LOCK': VK_CAPS_LOCK,
'ESCAPE': VK_ESCAPE,
'PAGE_UP': VK_PAGE_UP,
'PAGE_DOWN': VK_PAGE_DOWN,
'END': VK_END,
'HOME': VK_HOME,
'LEFT': VK_LEFT,
'UP': VK_UP,
'RIGHT': VK_RIGHT,
'DOWN': VK_DOWN,
'BACK_SPACE': VK_BACK_SPACE,
'F1': VK_F1,
'F2': VK_F2,
'F3': VK_F3,
'F4': VK_F4,
'F5': VK_F5,
'F6': VK_F6,
'F7': VK_F7,
'F8': VK_F8,
'F9': VK_F9,
'F10': VK_F10,
'F11': VK_F11,
'F12': VK_F12
}
KEY_MAPPINGS = {
'\n': VK_ENTER,
'\b': VK_BACK_SPACE,
'\t': VK_TAB,
' ' : VK_SPACE,
',' : VK_COMMA,
'-' : VK_MINUS,
'.' : VK_PERIOD,
'/' : VK_SLASH,
';' : VK_SEMICOLON,
'=' : VK_EQUALS,
'[' : VK_OPEN_BRACKET,
'\\': VK_BACK_SLASH,
']' : VK_CLOSE_BRACKET,
'*' : VK_MULTIPLY,
'+' : VK_ADD,
'&' : VK_AMPERSAND,
'<' : VK_LESS,
'>' : VK_GREATER,
'@' : VK_AT,
'^' : VK_CIRCUMFLEX,
'!' : VK_EXCLAMATION_MARK,
'#' : VK_NUMBER_SIGN,
'_' : VK_UNDERSCORE,
"'" : VK_QUOTE
}
SHIFT_KEY_MAPPINGS = {
'"': VK_QUOTE,
'$': ord('4'),
'(': ord('9'),
')': ord('0'),
':': VK_SEMICOLON
}
INPUT_PORT = 13246
SERVER_PORT = 13246
def __init__(self, ip):
self._ip = ip
self._output_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._input_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._input_socket.settimeout(2)
self._input_socket.bind(('', AirHidDriver.INPUT_PORT))
def close(self):
self._output_socket.close()
self._input_socket.close()
def _listen_for_replies(self):
try:
data, addr = self._input_socket.recvfrom(1024)
print "%s replied: %s" % (addr, data)
return True
except:
return False
def _send(self, data):
print "sending: " + data
self._output_socket.sendto(data, (self._ip, AirHidDriver.SERVER_PORT))
def _vkey_to_airhid_button_code(self, vkey):
if vkey in AirHidDriver.TRANSLATION_TABLE:
return str(AirHidDriver.TRANSLATION_TABLE[vkey])
else:
return str(vkey)
def _get_char_code(self, c):
if c in string.ascii_lowercase:
c = c.upper()
return self._vkey_to_airhid_button_code(ord(c))
elif c in string.ascii_uppercase:
return self._vkey_to_airhid_button_code(ord(c)) + AirHidDriver.SHIFT
elif c in AirHidDriver.KEY_MAPPINGS:
return self._vkey_to_airhid_button_code(AirHidDriver.KEY_MAPPINGS[c])
elif c in AirHidDriver.SHIFT_KEY_MAPPINGS:
return self._vkey_to_airhid_button_code(AirHidDriver.SHIFT_KEY_MAPPINGS[c]) + AirHidDriver.SHIFT
else:
raise ValueError('Cannot encode character: %s' % c)
def left_click(self):
self._send("clk.1_DOWN")
self._send("clk.1_UP")
return self
def right_click(self):
self._send("clk.2_DOWN")
self._send("clk.2_UP")
return self
def scroll_up(self):
self._send("clk.3_UP")
return self
def scroll_down(self):
self._send("clk.3_DOWN")
return self
def move_mouse(self, deltaX, deltaY):
self._send("pos." + str(deltaX) + "," + str(deltaY))
return self
def open_url(self, url):
self._send("url." + url)
return self
def typeText(self, text):
format = "cmd."
code = None
for c in text:
code = self._get_char_code(c)
self._send(format + code)
self._send(format + code + "_UP")
return self
def press_action_key(self, name, shift=False, ctrl=False, alt=False):
if name not in AirHidDriver.ACTION_KEYS:
raise ValueError('Unknown action key name: %s' % name)
format = "cmd."
command = str(AirHidDriver.ACTION_KEYS[name])
if shift:
command += AirHidDriver.SHIFT
if ctrl:
command += AirHidDriver.CTRL
if alt:
command += AirHidDriver.ALT
self._send(format + command)
return self
def ping(self):
format = "from:airhid"
self._send(format)
return self._listen_for_replies()
|
|
# -*- coding: utf-8 -*-
import json
import logging
from bson import ObjectId
from flask import Blueprint, Response, flash, jsonify, redirect, render_template, request, url_for
from flask_login import current_user
from werkzeug.datastructures import Headers
from scout.constants import ACMG_COMPLETE_MAP, ACMG_MAP, CASEDATA_HEADER, CLINVAR_HEADER
from scout.server.blueprints.variants.controllers import update_form_hgnc_symbols
from scout.server.extensions import loqusdb, store
from scout.server.utils import institute_and_case, jsonconverter, templated
from . import controllers
from .forms import GeneVariantFiltersForm, InstituteForm, PhenoModelForm, PhenoSubPanelForm
LOG = logging.getLogger(__name__)
blueprint = Blueprint(
"overview",
__name__,
template_folder="templates",
static_folder="static",
static_url_path="/overview/static",
)
@blueprint.route("/api/v1/institutes", methods=["GET"])
def api_institutes():
"""API endpoint that returns institutes data"""
data = dict(institutes=controllers.institutes())
return jsonify(data)
@blueprint.route("/overview")
def institutes():
"""Display a list of all user institutes."""
data = dict(institutes=controllers.institutes())
return render_template("overview/institutes.html", **data)
@blueprint.route("/api/v1/institutes/<institute_id>/cases", methods=["GET", "POST"])
def api_cases(institute_id):
"""API endpoint that returns all cases for a given institute"""
case_data = controllers.cases(store, request, institute_id)
json_cases = json.dumps({"cases": case_data}, default=jsonconverter)
return json_cases
@blueprint.route("/<institute_id>/cases", methods=["GET", "POST"])
@templated("overview/cases.html")
def cases(institute_id):
"""Display a list of cases for an institute."""
return controllers.cases(store, request, institute_id)
@blueprint.route("/<institute_id>/causatives")
@templated("overview/causatives.html")
def causatives(institute_id):
institute_obj = institute_and_case(store, institute_id)
query = request.args.get("query", "")
hgnc_id = None
if "|" in query:
# filter accepts an array of IDs. Provide an array with one ID element
try:
hgnc_id = [int(query.split(" | ", 1)[0])]
except ValueError:
flash("Provided gene info could not be parsed!", "warning")
variants = list(store.check_causatives(institute_obj=institute_obj, limit_genes=hgnc_id))
if variants:
variants = sorted(
variants,
key=lambda k: k.get("hgnc_symbols", [None])[0] or k.get("str_repid") or "",
)
all_variants = {}
all_cases = {}
for variant_obj in variants:
if variant_obj["case_id"] not in all_cases:
case_obj = store.case(variant_obj["case_id"])
all_cases[variant_obj["case_id"]] = case_obj
else:
case_obj = all_cases[variant_obj["case_id"]]
if variant_obj["variant_id"] not in all_variants:
all_variants[variant_obj["variant_id"]] = []
all_variants[variant_obj["variant_id"]].append((case_obj, variant_obj))
acmg_map = {key: ACMG_COMPLETE_MAP[value] for key, value in ACMG_MAP.items()}
return dict(institute=institute_obj, variant_groups=all_variants, acmg_map=acmg_map)
@blueprint.route("/<institute_id>/filters", methods=["GET"])
@templated("overview/filters.html")
def filters(institute_id):
form = request.form
institute_obj = institute_and_case(store, institute_id)
filters = controllers.filters(store, institute_id)
return dict(institute=institute_obj, form=form, filters=filters)
@blueprint.route("/<institute_id>/lock_filter/<filter_id>", methods=["POST"])
def lock_filter(institute_id, filter_id):
filter_lock = request.form.get("filter_lock", "False")
LOG.debug(
"Attempting to toggle lock %s for %s with status %s",
filter_id,
institute_id,
filter_lock,
)
if filter_lock == "True":
filter_obj = controllers.unlock_filter(store, current_user, filter_id)
if filter_lock == "False" or not filter_lock:
filter_obj = controllers.lock_filter(store, current_user, filter_id)
return redirect(request.referrer)
@blueprint.route("/<institute_id>/gene_variants", methods=["GET", "POST"])
@templated("overview/gene_variants.html")
def gene_variants(institute_id):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
institute_obj = institute_and_case(store, institute_id)
data = {}
if request.method == "GET":
form = GeneVariantFiltersForm(request.args)
else: # POST
form = GeneVariantFiltersForm(request.form)
if form.variant_type.data == []:
form.variant_type.data = ["clinical"]
variant_type = form.data.get("variant_type")
update_form_hgnc_symbols(store=store, case_obj=None, form=form)
# If no valid gene is provided, redirect to form
if not form.hgnc_symbols.data:
flash("Provided gene symbols could not be used in variants' search", "warning")
return redirect(request.referrer)
variants_query = store.gene_variants(
query=form.data,
institute_id=institute_id,
category="snv",
variant_type=variant_type,
)
result_size = store.count_gene_variants(
query=form.data,
institute_id=institute_id,
category="snv",
variant_type=variant_type,
)
data = controllers.gene_variants(store, variants_query, result_size, page)
return dict(institute=institute_obj, form=form, page=page, **data)
@blueprint.route("/overview/<institute_id>/settings", methods=["GET", "POST"])
def institute_settings(institute_id):
"""Show institute settings page"""
if institute_id not in current_user.institutes and current_user.is_admin is False:
flash(
"Current user doesn't have the permission to modify this institute",
"warning",
)
return redirect(request.referrer)
institute_obj = store.institute(institute_id)
form = InstituteForm(request.form)
# if institute is to be updated
if request.method == "POST" and form.validate_on_submit():
institute_obj = controllers.update_institute_settings(store, institute_obj, request.form)
if isinstance(institute_obj, dict):
flash("institute was updated ", "success")
else: # an error message was retuned
flash(institute_obj, "warning")
return redirect(request.referrer)
data = controllers.institute(store, institute_id)
loqus_instances = loqusdb.loqus_ids if hasattr(loqusdb, "loqus_ids") else []
default_phenotypes = controllers.populate_institute_form(form, institute_obj)
return render_template(
"/overview/institute_settings.html",
form=form,
default_phenotypes=default_phenotypes,
loqus_instances=loqus_instances,
panel=1,
**data,
)
@blueprint.route("/overview/<institute_id>/users", methods=["GET"])
def institute_users(institute_id):
"""Should institute users list"""
if institute_id not in current_user.institutes and current_user.is_admin is False:
flash(
"Current user doesn't have the permission to modify this institute",
"warning",
)
return redirect(request.referrer)
data = controllers.institute(store, institute_id)
return render_template("/overview/users.html", panel=2, **data)
@blueprint.route("/<submission>/<case>/rename/<old_name>", methods=["POST"])
def clinvar_rename_casedata(submission, case, old_name):
"""Rename one or more casedata individuals belonging to the same clinvar submission, same case"""
new_name = request.form.get("new_name")
controllers.update_clinvar_sample_names(store, submission, case, old_name, new_name)
return redirect(request.referrer + f"#cdata_{submission}")
@blueprint.route("/<institute_id>/<submission>/update_status", methods=["POST"])
def clinvar_update_submission(institute_id, submission):
"""Update a submission status to open/closed, register an official SUB number or delete the entire submission"""
controllers.update_clinvar_submission_status(store, request, institute_id, submission)
return redirect(request.referrer)
@blueprint.route("/<submission>/<object_type>", methods=["POST"])
def clinvar_delete_object(submission, object_type):
"""Delete a single object (variant_data or case_data) associated with a clinvar submission"""
store.delete_clinvar_object(
object_id=request.form.get("delete_object"),
object_type=object_type,
submission_id=submission,
)
return redirect(request.referrer)
@blueprint.route("/<submission>/download/<csv_type>/<clinvar_id>", methods=["GET"])
def clinvar_download_csv(submission, csv_type, clinvar_id):
"""Download a csv (Variant file or CaseData file) for a clinVar submission"""
def generate_csv(header, lines):
"""Return downloaded header and lines with quoted fields"""
yield header + "\n"
for line in lines:
yield line + "\n"
clinvar_file_data = controllers.clinvar_submission_file(store, submission, csv_type, clinvar_id)
if clinvar_file_data is None:
return redirect(request.referrer)
headers = Headers()
headers.add("Content-Disposition", "attachment", filename=clinvar_file_data[0])
return Response(
generate_csv(",".join(clinvar_file_data[1]), clinvar_file_data[2]),
mimetype="text/csv",
headers=headers,
)
@blueprint.route("/<institute_id>/clinvar_submissions", methods=["GET"])
@templated("overview/clinvar_submissions.html")
def clinvar_submissions(institute_id):
"""Handle clinVar submission objects and files"""
institute_obj = institute_and_case(store, institute_id)
data = {
"submissions": controllers.clinvar_submissions(store, institute_id),
"institute": institute_obj,
"variant_header_fields": CLINVAR_HEADER,
"casedata_header_fields": CASEDATA_HEADER,
}
return data
@blueprint.route("/<institute_id>/advanced_phenotypes", methods=["GET", "POST"])
@templated("overview/phenomodels.html")
def advanced_phenotypes(institute_id):
"""Show institute-level advanced phenotypes"""
institute_obj = institute_and_case(store, institute_id)
# Get a list of all users which are registered to this institute or collaborate with it
collaborators = set()
for inst_id in [institute_id] + institute_obj.get("collaborators", []):
for user in store.users(institute=inst_id):
if (
user["email"] == current_user.email
): # Do not include current user among collaborators
continue
collaborators.add((user["email"], user["name"], inst_id))
if request.form.get("create_model"): # creating a new phenomodel
store.create_phenomodel(
institute_id, request.form.get("model_name"), request.form.get("model_desc")
)
pheno_form = PhenoModelForm(request.form or None)
phenomodels = list(store.phenomodels(institute_id=institute_id))
data = {
"institute": institute_obj,
"collaborators": collaborators,
"pheno_form": pheno_form,
"phenomodels": phenomodels,
}
return data
@blueprint.route("/advanced_phenotypes/lock", methods=["POST"])
def lock_phenomodel():
"""Lock or unlock a specific phenomodel for editing"""
form = request.form
model_id = form.get("model_id")
phenomodel_obj = store.phenomodel(model_id)
if phenomodel_obj is None:
return redirect(request.referrer)
phenomodel_obj["admins"] = []
if (
"lock" in form
): # lock phenomodel for all users except current user and specified collaborators
phenomodel_obj["admins"] = [current_user.email] + form.getlist("user_admins")
# update phenomodels admins:
store.update_phenomodel(model_id, phenomodel_obj)
return redirect(request.referrer)
@blueprint.route("/advanced_phenotypes/remove", methods=["POST"])
def remove_phenomodel():
"""Remove an entire phenomodel using its id"""
model_id = request.form.get("model_id")
model_obj = store.phenomodel_collection.find_one_and_delete({"_id": ObjectId(model_id)})
if model_obj is None:
flash(f"An error occurred while deleting phenotype model", "warning")
return redirect(request.referrer)
@blueprint.route("/<institute_id>/phenomodel/<model_id>/edit_subpanel", methods=["POST"])
def checkbox_edit(institute_id, model_id):
"""Add or delete a single checkbox in a phenotyoe subpanel"""
controllers.edit_subpanel_checkbox(model_id, request.form)
return redirect(url_for(".phenomodel", institute_id=institute_id, model_id=model_id))
@blueprint.route("/<institute_id>/phenomodel/<model_id>", methods=["GET", "POST"])
@templated("overview/phenomodel.html")
def phenomodel(institute_id, model_id):
"""View/Edit an advanced phenotype model"""
institute_obj = institute_and_case(store, institute_id)
pheno_form = PhenoModelForm(request.form)
subpanel_form = PhenoSubPanelForm(request.form)
hide_subpanel = True
if request.method == "POST":
# update an existing phenotype model
controllers.update_phenomodel(model_id, request.form)
phenomodel_obj = store.phenomodel(model_id)
if phenomodel_obj is None:
flash(
f"Could not retrieve given phenotype model using the given key '{model_id}'",
"warning",
)
return redirect(request.referrer)
pheno_form.model_name.data = phenomodel_obj["name"]
pheno_form.model_desc.data = phenomodel_obj["description"]
return dict(
institute=institute_obj,
pheno_form=pheno_form,
phenomodel=phenomodel_obj,
subpanel_form=subpanel_form,
)
|
|
# Copyright 2008, 2009 Neil Martinsen-Burrell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Defines a file-derived class to read/write Fortran unformatted files.
The assumption is that a Fortran unformatted file is being written by
the Fortran runtime as a sequence of records. Each record consists of
an integer (of the default size [usually 32 or 64 bits]) giving the
length of the following data in bytes, then the data itself, then the
same integer as before.
Examples
--------
To use the default endian and size settings, one can just do::
>>> f = FortranFile('filename')
>>> x = f.readReals()
One can read arrays with varying precisions::
>>> f = FortranFile('filename')
>>> x = f.readInts('h')
>>> y = f.readInts('q')
>>> z = f.readReals('f')
Where the format codes are those used by Python's struct module.
One can change the default endian-ness and header precision::
>>> f = FortranFile('filename', endian='>', header_prec='l')
for a file with little-endian data whose record headers are long
integers.
"""
__docformat__ = "restructuredtext en"
import struct
import numpy
class FortranFile(file):
"""File with methods for dealing with fortran unformatted data files"""
def _get_header_length(self):
return struct.calcsize(self._header_prec)
_header_length = property(fget=_get_header_length)
def _set_endian(self,c):
"""Set endian to big (c='>') or little (c='<') or native (c='@')
:Parameters:
`c` : string
The endian-ness to use when reading from this file.
"""
if c in '<>@=':
self._endian = c
else:
raise ValueError('Cannot set endian-ness')
def _get_endian(self):
return self._endian
ENDIAN = property(fset=_set_endian,
fget=_get_endian,
doc="Possible endian values are '<', '>', '@', '='"
)
def _set_header_prec(self, prec):
if prec in 'hilq':
self._header_prec = prec
else:
raise ValueError('Cannot set header precision')
def _get_header_prec(self):
return self._header_prec
HEADER_PREC = property(fset=_set_header_prec,
fget=_get_header_prec,
doc="Possible header precisions are 'h', 'i', 'l', 'q'"
)
def __init__(self, fname, endian='@', header_prec='i', *args, **kwargs):
"""Open a Fortran unformatted file for writing.
Parameters
----------
endian : character, optional
Specify the endian-ness of the file. Possible values are
'>', '<', '@' and '='. See the documentation of Python's
struct module for their meanings. The deafult is '>' (native
byte order)
header_prec : character, optional
Specify the precision used for the record headers. Possible
values are 'h', 'i', 'l' and 'q' with their meanings from
Python's struct module. The default is 'i' (the system's
default integer).
"""
file.__init__(self, fname, *args, **kwargs)
self.ENDIAN = endian
self.HEADER_PREC = header_prec
def _read_exactly(self, num_bytes):
"""Read in exactly num_bytes, raising an error if it can't be done."""
data = ''
while True:
l = len(data)
if l == num_bytes:
return data
else:
read_data = self.read(num_bytes - l)
if read_data == '':
raise IOError('Could not read enough data.'
' Wanted %d bytes, got %d.' % (num_bytes, l))
data += read_data
def _read_check(self):
return struct.unpack(self.ENDIAN+self.HEADER_PREC,
self._read_exactly(self._header_length)
)[0]
def _write_check(self, number_of_bytes):
"""Write the header for the given number of bytes"""
self.write(struct.pack(self.ENDIAN+self.HEADER_PREC,
number_of_bytes))
def readRecord(self):
"""Read a single fortran record"""
l = self._read_check()
data_str = self._read_exactly(l)
check_size = self._read_check()
if check_size != l:
raise IOError('Error reading record from data file')
return data_str
def writeRecord(self,s):
"""Write a record with the given bytes.
Parameters
----------
s : the string to write
"""
length_bytes = len(s)
self._write_check(length_bytes)
self.write(s)
self._write_check(length_bytes)
def readString(self):
"""Read a string."""
return self.readRecord()
def writeString(self,s):
"""Write a string
Parameters
----------
s : the string to write
"""
self.writeRecord(s)
_real_precisions = 'df'
def readReals(self, prec='f'):
"""Read in an array of real numbers.
Parameters
----------
prec : character, optional
Specify the precision of the array using character codes from
Python's struct module. Possible values are 'd' and 'f'.
"""
_numpy_precisions = {'d': numpy.float64,
'f': numpy.float32
}
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
num = len(data_str)/struct.calcsize(prec)
numbers =struct.unpack(self.ENDIAN+str(num)+prec,data_str)
return numpy.array(numbers, dtype=_numpy_precisions[prec])
def writeReals(self, reals, prec='f'):
"""Write an array of floats in given precision
Parameters
----------
reals : array
Data to write
prec` : string
Character code for the precision to use in writing
"""
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
# Don't use writeRecord to avoid having to form a
# string as large as the array of numbers
length_bytes = len(reals)*struct.calcsize(prec)
self._write_check(length_bytes)
_fmt = self.ENDIAN + prec
for r in reals:
self.write(struct.pack(_fmt,r))
self._write_check(length_bytes)
_int_precisions = 'hilq'
def readInts(self, prec='i'):
"""Read an array of integers.
Parameters
----------
prec : character, optional
Specify the precision of the data to be read using
character codes from Python's struct module. Possible
values are 'h', 'i', 'l' and 'q'
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
num = len(data_str)/struct.calcsize(prec)
return numpy.array(struct.unpack(self.ENDIAN+str(num)+prec,data_str))
def writeInts(self, ints, prec='i'):
"""Write an array of integers in given precision
Parameters
----------
reals : array
Data to write
prec : string
Character code for the precision to use in writing
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
# Don't use writeRecord to avoid having to form a
# string as large as the array of numbers
length_bytes = len(ints)*struct.calcsize(prec)
self._write_check(length_bytes)
_fmt = self.ENDIAN + prec
for item in ints:
self.write(struct.pack(_fmt,item))
self._write_check(length_bytes)
|
|
import unittest
import os
from os import path
import sys
import subprocess
import time
from errno import EPERM
from support import SpaghettiTestCase, randomdata
def wait_for_mount(mount_path):
for c in xrange(20):
if path.ismount(mount_path):
return True
time.sleep(.1)
else:
return False
def do_umount(mount_path):
if sys.platform == 'darwin':
cmd = ['umount', mount_path]
elif sys.platform == 'linux2':
cmd = ['fusermount', '-u', '-z', mount_path]
else:
raise ValueError("Don't know how to unmount a fuse filesystem")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0]
class SpaghettiMountTestCase(SpaghettiTestCase):
script_tmpl = "from spaghettifs.filesystem import mount; mount(%s, %s)"
mounted = False
def mount(self):
self.mount_point = path.join(self.tmpdir, 'mnt')
os.mkdir(self.mount_point)
script = self.script_tmpl % (repr(self.repo_path),
repr(self.mount_point))
self.fsmount = subprocess.Popen([sys.executable, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# wait for mount operation to complete
if not wait_for_mount(self.mount_point):
if self.fsmount.poll():
self._output = self.fsmount.communicate()[0]
raise AssertionError('Filesystem did not mount after 2 seconds')
self.mounted = True
def umount(self):
msg = do_umount(path.realpath(self.mount_point))
self._output = self.fsmount.communicate()[0]
self.mounted = False
class BasicFilesystemOps(SpaghettiMountTestCase):
def setUp(self):
super(BasicFilesystemOps, self).setUp()
self.mount()
def tearDown(self):
self.umount()
super(BasicFilesystemOps, self).tearDown()
def test_listing(self):
ls = os.listdir(self.mount_point)
self.assertEqual(set(ls), set(['a.txt', 'b']))
def test_read_file(self):
data = open(path.join(self.mount_point, 'a.txt')).read()
self.assertEqual(data, 'text file "a"\n')
def test_write_file(self):
new_file_path = path.join(self.mount_point, 'newfile')
self.assertFalse('newfile' in os.listdir(self.mount_point))
f = open(new_file_path, 'wb')
self.assertTrue('newfile' in os.listdir(self.mount_point))
self.assertEqual(os.stat(new_file_path).st_size, 0)
self.assertEqual(open(new_file_path).read(), '')
f.write('something here!')
f.flush()
self.assertEqual(os.stat(new_file_path).st_size, 15)
self.assertEqual(open(new_file_path).read(), 'something here!')
f.seek(10)
f.write('there!')
f.flush()
self.assertEqual(os.stat(new_file_path).st_size, 16)
self.assertEqual(open(new_file_path).read(), 'something there!')
f.truncate(9)
f.flush()
self.assertEqual(os.stat(new_file_path).st_size, 9)
self.assertEqual(open(new_file_path).read(), 'something')
f.seek(15)
f.write('else')
f.flush()
self.assertEqual(os.stat(new_file_path).st_size, 19)
self.assertEqual(open(new_file_path).read(), 'something\0\0\0\0\0\0else')
def test_large_data(self):
_64K = 64*1024
_1M = 1024*1024
test_file_path = path.join(self.mount_point, 'newfile2')
test_data = randomdata(_1M)
f = open(test_file_path, 'wb')
for c in xrange(0, _1M, _64K):
f.write(test_data[c:c+_64K])
f.close()
f2 = open(test_file_path, 'rb')
for c in xrange(0, _1M, _64K):
d = f2.read(_64K)
self.assertEqual(d, test_data[c:c+_64K])
f2.close()
def test_unlink(self):
new_file_path = path.join(self.mount_point, 'newfile')
f = open(new_file_path, 'wb')
f.write('hey')
f.close()
self.assertTrue('newfile' in os.listdir(self.mount_point))
os.unlink(new_file_path)
self.assertFalse('newfile' in os.listdir(self.mount_point))
def test_mkdir_listdir_rmdir(self):
new_dir_path = path.join(self.mount_point, 'newdir')
self.assertFalse('newdir' in os.listdir(self.mount_point))
os.mkdir(new_dir_path)
self.assertTrue('newdir' in os.listdir(self.mount_point))
self.assertEqual(os.listdir(new_dir_path), [])
os.rmdir(new_dir_path)
self.assertFalse('newdir' in os.listdir(self.mount_point))
def test_link(self):
orig_path = path.join(self.mount_point, 'orig')
linked_path = path.join(self.mount_point, 'linked')
f = open(orig_path, 'wb')
f.write('hey')
f.close()
self.assertEqual(os.stat(orig_path).st_nlink, 1)
os.link(orig_path, linked_path)
self.assertEqual(os.stat(orig_path).st_nlink, 2)
# FUSE seems to mangle st_ino
#self.assertEqual(os.stat(orig_path).st_ino,
# os.stat(linked_path).st_ino)
f = open(orig_path, 'wb')
f.write('asdf')
f.close()
f = open(linked_path, 'rb')
linked_data = f.read()
f.close()
self.assertEqual(linked_data, 'asdf')
os.unlink(orig_path)
self.assertEqual(os.stat(linked_path).st_nlink, 1)
def test_rename_file(self):
orig_path = path.join(self.mount_point, 'orig')
new_path = path.join(self.mount_point, 'linked')
f = open(orig_path, 'wb')
f.write('hey')
f.close()
self.assertEqual(os.stat(orig_path).st_nlink, 1)
self.assertTrue(path.isfile(orig_path))
self.assertFalse(path.isfile(new_path))
os.rename(orig_path, new_path)
self.assertFalse(path.isfile(orig_path))
self.assertTrue(path.isfile(new_path))
self.assertEqual(os.stat(new_path).st_nlink, 1)
f = open(new_path, 'rb')
data = f.read()
f.close()
self.assertEqual(data, 'hey')
def test_not_permitted(self):
myf_path = path.join(self.mount_point, 'myf')
myf2_path = path.join(self.mount_point, 'myf2')
os.mkdir(myf_path)
try:
os.rename(myf_path, myf2_path)
except OSError, e:
self.assertEqual(e.errno, EPERM)
else:
self.fail('OSError not raised')
try:
os.link(myf_path, myf2_path)
except OSError, e:
self.assertEqual(e.errno, EPERM)
else:
self.fail('OSError not raised')
class FilesystemLoggingTestCase(unittest.TestCase):
def test_custom_repr(self):
from spaghettifs.filesystem import LogWrap
self.assertEqual(repr(LogWrap('asdf')), repr('asdf'))
self.assertEqual(repr(LogWrap('"')), repr('"'))
self.assertEqual(repr(LogWrap('\'')), repr('\''))
self.assertEqual(repr(LogWrap(u'q')), repr(u'q'))
self.assertEqual(repr(LogWrap('qwer'*64)), "'qwerqwerqw[...(len=256)]'")
self.assertEqual(repr(LogWrap(u'asdf'*64)), "u'asdfasdfa[...(len=256)]'")
self.assertEqual(repr(LogWrap(range(3))), '[0, 1, 2]')
self.assertEqual(repr(LogWrap(range(100))), repr(range(100)))
if __name__ == '__main__':
unittest.main()
|
|
"""Support for Xiaomi binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.xiaomi import (PY_XIAOMI_GATEWAY, XiaomiDevice)
_LOGGER = logging.getLogger(__name__)
NO_CLOSE = 'no_close'
ATTR_OPEN_SINCE = 'Open since'
MOTION = 'motion'
NO_MOTION = 'no_motion'
ATTR_NO_MOTION_SINCE = 'No motion since'
DENSITY = 'density'
ATTR_DENSITY = 'Density'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Perform the setup for Xiaomi devices."""
devices = []
for (_, gateway) in hass.data[PY_XIAOMI_GATEWAY].gateways.items():
for device in gateway.devices['binary_sensor']:
model = device['model']
if model == 'motion':
devices.append(XiaomiMotionSensor(device, hass, gateway))
elif model == 'sensor_motion.aq2':
devices.append(XiaomiMotionSensor(device, hass, gateway))
elif model == 'magnet':
devices.append(XiaomiDoorSensor(device, gateway))
elif model == 'sensor_magnet.aq2':
devices.append(XiaomiDoorSensor(device, gateway))
elif model == 'smoke':
devices.append(XiaomiSmokeSensor(device, gateway))
elif model == 'natgas':
devices.append(XiaomiNatgasSensor(device, gateway))
elif model == 'switch':
devices.append(XiaomiButton(device, 'Switch', 'status',
hass, gateway))
elif model == 'sensor_switch.aq2':
devices.append(XiaomiButton(device, 'Switch', 'status',
hass, gateway))
elif model == '86sw1':
devices.append(XiaomiButton(device, 'Wall Switch', 'channel_0',
hass, gateway))
elif model == '86sw2':
devices.append(XiaomiButton(device, 'Wall Switch (Left)',
'channel_0', hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Right)',
'channel_1', hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Both)',
'dual_channel', hass, gateway))
elif model == 'cube':
devices.append(XiaomiCube(device, hass, gateway))
add_devices(devices)
class XiaomiBinarySensor(XiaomiDevice, BinarySensorDevice):
"""Representation of a base XiaomiBinarySensor."""
def __init__(self, device, name, xiaomi_hub, data_key, device_class):
"""Initialize the XiaomiSmokeSensor."""
self._data_key = data_key
self._device_class = device_class
self._should_poll = False
self._density = 0
XiaomiDevice.__init__(self, device, name, xiaomi_hub)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return self._should_poll
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of binary sensor."""
return self._device_class
def update(self):
"""Update the sensor state."""
_LOGGER.debug('Updating xiaomi sensor by polling')
self._get_from_hub(self._sid)
class XiaomiNatgasSensor(XiaomiBinarySensor):
"""Representation of a XiaomiNatgasSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = None
XiaomiBinarySensor.__init__(self, device, 'Natgas Sensor', xiaomi_hub,
'alarm', 'gas')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value == '1':
if self._state:
return False
self._state = True
return True
elif value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiMotionSensor(XiaomiBinarySensor):
"""Representation of a XiaomiMotionSensor."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the XiaomiMotionSensor."""
self._hass = hass
self._no_motion_since = 0
XiaomiBinarySensor.__init__(self, device, 'Motion Sensor', xiaomi_hub,
'status', 'motion')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_NO_MOTION_SINCE: self._no_motion_since}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_MOTION in data: # handle push from the hub
self._no_motion_since = data[NO_MOTION]
self._state = False
return True
value = data.get(self._data_key)
if value is None:
return False
if value == MOTION:
self._should_poll = True
if self.entity_id is not None:
self._hass.bus.fire('motion', {
'entity_id': self.entity_id
})
self._no_motion_since = 0
if self._state:
return False
self._state = True
return True
elif value == NO_MOTION:
if not self._state:
return False
self._state = False
return True
class XiaomiDoorSensor(XiaomiBinarySensor):
"""Representation of a XiaomiDoorSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiDoorSensor."""
self._open_since = 0
XiaomiBinarySensor.__init__(self, device, 'Door Window Sensor',
xiaomi_hub, 'status', 'opening')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_OPEN_SINCE: self._open_since}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_CLOSE in data: # handle push from the hub
self._open_since = data[NO_CLOSE]
return True
value = data.get(self._data_key)
if value is None:
return False
if value == 'open':
self._should_poll = True
if self._state:
return False
self._state = True
return True
elif value == 'close':
self._open_since = 0
if self._state:
self._state = False
return True
return False
class XiaomiSmokeSensor(XiaomiBinarySensor):
"""Representation of a XiaomiSmokeSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = 0
XiaomiBinarySensor.__init__(self, device, 'Smoke Sensor', xiaomi_hub,
'alarm', 'smoke')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value == '1':
if self._state:
return False
self._state = True
return True
elif value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiButton(XiaomiBinarySensor):
"""Representation of a Xiaomi Button."""
def __init__(self, device, name, data_key, hass, xiaomi_hub):
"""Initialize the XiaomiButton."""
self._hass = hass
XiaomiBinarySensor.__init__(self, device, name, xiaomi_hub,
data_key, None)
def parse_data(self, data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value == 'long_click_press':
self._state = True
click_type = 'long_click_press'
elif value == 'long_click_release':
self._state = False
click_type = 'hold'
elif value == 'click':
click_type = 'single'
elif value == 'double_click':
click_type = 'double'
elif value == 'both_click':
click_type = 'both'
else:
return False
self._hass.bus.fire('click', {
'entity_id': self.entity_id,
'click_type': click_type
})
if value in ['long_click_press', 'long_click_release']:
return True
return False
class XiaomiCube(XiaomiBinarySensor):
"""Representation of a Xiaomi Cube."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the Xiaomi Cube."""
self._hass = hass
self._state = False
XiaomiBinarySensor.__init__(self, device, 'Cube', xiaomi_hub,
None, None)
def parse_data(self, data):
"""Parse data sent by gateway."""
if 'status' in data:
self._hass.bus.fire('cube_action', {
'entity_id': self.entity_id,
'action_type': data['status']
})
if 'rotate' in data:
self._hass.bus.fire('cube_action', {
'entity_id': self.entity_id,
'action_type': 'rotate',
'action_value': float(data['rotate'].replace(",", "."))
})
return False
|
|
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from matplotlib import pyplot as plt
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
from matplotlib.colors import LinearSegmentedColormap
from .controllers import ImshowController, ImAnnotateController
import itertools
from .util import legend_ontop
class MatplotlibPlots(AbstractPlottingLibrary):
def __init__(self):
super(MatplotlibPlots, self).__init__()
self._defaults = defaults.__dict__
def figure(self, rows=1, cols=1, gridspec_kwargs={}, tight_layout=True, **kwargs):
fig = plt.figure(tight_layout=tight_layout, **kwargs)
fig.rows = rows
fig.cols = cols
fig.gridspec = plt.GridSpec(rows, cols, **gridspec_kwargs)
return fig
def new_canvas(self, figure=None, row=1, col=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
if projection == '3d':
from mpl_toolkits.mplot3d import Axes3D
elif projection == '2d':
projection = None
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
if figure is not None:
fig = figure
elif 'num' in kwargs and 'figsize' in kwargs:
fig = self.figure(num=kwargs.pop('num'), figsize=kwargs.pop('figsize'))
elif 'num' in kwargs:
fig = self.figure(num=kwargs.pop('num'))
elif 'figsize' in kwargs:
fig = self.figure(figsize=kwargs.pop('figsize'))
else:
fig = self.figure()
#if hasattr(fig, 'rows') and hasattr(fig, 'cols'):
ax = fig.add_subplot(fig.gridspec[row-1, col-1], projection=projection)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
if title is not None: ax.set_title(title)
if projection == '3d':
if zlim is not None: ax.set_zlim(zlim)
if zlabel is not None: ax.set_zlabel(zlabel)
return ax, kwargs
def add_to_canvas(self, ax, plots, legend=False, title=None, **kwargs):
#ax.autoscale_view()
fontdict=dict(family='sans-serif', weight='light', size=9)
if legend is True:
ax.legend(*ax.get_legend_handles_labels())
elif legend >= 1:
#ax.legend(prop=fontdict)
legend_ontop(ax, ncol=legend, fontdict=fontdict)
if title is not None: ax.figure.suptitle(title)
return plots
def show_canvas(self, ax, **kwargs):
ax.figure.canvas.draw()
return ax.figure
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, marker='o', **kwargs):
if Z is not None:
return ax.scatter(X, Y, c=color, zs=Z, label=label, marker=marker, **kwargs)
return ax.scatter(X, Y, c=color, label=label, marker=marker, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, **kwargs):
if Z is not None:
return ax.plot(X, Y, color=color, zs=Z, label=label, **kwargs)
return ax.plot(X, Y, color=color, label=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
from matplotlib import transforms
from matplotlib.path import Path
if 'marker' not in kwargs:
kwargs['marker'] = Path([[-.2,0.], [-.2,.5], [0.,1.], [.2,.5], [.2,0.], [-.2,0.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
if 'transform' not in kwargs:
if X.shape[1] == 1:
kwargs['transform'] = transforms.blended_transform_factory(ax.transData, ax.transAxes)
if X.shape[1] == 2:
return ax.scatter(X[:,0], X[:,1], ax.get_zlim()[0], c=color, label=label, **kwargs)
return ax.scatter(X, np.zeros_like(X), c=color, label=label, **kwargs)
def barplot(self, ax, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if 'align' not in kwargs:
kwargs['align'] = 'center'
return ax.bar(x=x, height=height, width=width,
bottom=bottom, label=label, color=color,
**kwargs)
def xerrorbar(self, ax, X, Y, error, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
if not('linestyle' in kwargs or 'ls' in kwargs):
kwargs['ls'] = 'none'
#if Z is not None:
# return ax.errorbar(X, Y, Z, xerr=error, ecolor=color, label=label, **kwargs)
return ax.errorbar(X, Y, xerr=error, ecolor=color, label=label, **kwargs)
def yerrorbar(self, ax, X, Y, error, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
if not('linestyle' in kwargs or 'ls' in kwargs):
kwargs['ls'] = 'none'
#if Z is not None:
# return ax.errorbar(X, Y, Z, yerr=error, ecolor=color, label=label, **kwargs)
return ax.errorbar(X, Y, yerr=error, ecolor=color, label=label, **kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
#xmin, xmax, ymin, ymax = extent
#xoffset, yoffset = (xmax - xmin) / (2. * X.shape[0]), (ymax - ymin) / (2. * X.shape[1])
#xmin, xmax, ymin, ymax = extent = xmin-xoffset, xmax+xoffset, ymin-yoffset, ymax+yoffset
return ax.imshow(X, label=label, extent=extent, vmin=vmin, vmax=vmax, **imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
return ImshowController(ax, plot_function, extent, resolution=resolution, vmin=vmin, vmax=vmax, **imshow_kwargs)
def annotation_heatmap(self, ax, X, annotation, extent=None, label=None, imshow_kwargs=None, **annotation_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
if ('ha' not in annotation_kwargs) and ('horizontalalignment' not in annotation_kwargs):
annotation_kwargs['ha'] = 'center'
if ('va' not in annotation_kwargs) and ('verticalalignment' not in annotation_kwargs):
annotation_kwargs['va'] = 'center'
imshow = self.imshow(ax, X, extent, label, **imshow_kwargs)
if extent is None:
extent = (0, X.shape[0], 0, X.shape[1])
xmin, xmax, ymin, ymax = extent
xoffset, yoffset = (xmax - xmin) / (2. * X.shape[0]), (ymax - ymin) / (2. * X.shape[1])
xlin = np.linspace(xmin, xmax, X.shape[0], endpoint=False)
ylin = np.linspace(ymin, ymax, X.shape[1], endpoint=False)
annotations = []
for [i, x], [j, y] in itertools.product(enumerate(xlin), enumerate(ylin)):
annotations.append(ax.text(x+xoffset, y+yoffset, "{}".format(annotation[j, i]), **annotation_kwargs))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
return ImAnnotateController(ax, plot_function, extent, resolution=resolution, imshow_kwargs=imshow_kwargs or {}, **annotation_kwargs)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return ax.contour(X, Y, C, levels=np.linspace(C.min(), C.max(), levels), label=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return ax.plot_surface(X, Y, Z, label=label, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
return ax.fill_between(X, lower, upper, facecolor=color, label=label, **kwargs)
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
ax = canvas
plots = []
if 'edgecolors' not in kwargs:
kwargs['edgecolors'] = 'none'
if 'facecolors' in kwargs:
color = kwargs.pop('facecolors')
if 'array' in kwargs:
array = kwargs.pop('array')
else:
array = 1.-np.abs(np.linspace(-.97, .97, len(percentiles)-1))
if 'alpha' in kwargs:
alpha = kwargs.pop('alpha')
else:
alpha = .8
if 'cmap' in kwargs:
cmap = kwargs.pop('cmap')
else:
cmap = LinearSegmentedColormap.from_list('WhToColor', (color, color), N=array.size)
cmap._init()
cmap._lut[:-3, -1] = alpha*array
kwargs['facecolors'] = [cmap(i) for i in np.linspace(0,1,cmap.N)]
# pop where from kwargs
where = kwargs.pop('where') if 'where' in kwargs else None
# pop interpolate, which we actually do not do here!
if 'interpolate' in kwargs: kwargs.pop('interpolate')
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
#try:
# from itertools import izip as zip
#except ImportError:
# pass
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for y1, y2 in pairwise(percentiles):
try:
from matplotlib.cbook import contiguous_regions
except ImportError:
from matplotlib.mlab import contiguous_regions
# Handle united data, such as dates
ax._process_unit_info(xdata=X, ydata=y1)
ax._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
from numpy import ma
x = ma.masked_invalid(ax.convert_xunits(X))
y1 = ma.masked_invalid(ax.convert_yunits(y1))
y2 = ma.masked_invalid(ax.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
from functools import reduce
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
p = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
p[0] = start
p[N + 1] = end
p[1:N + 1, 0] = xslice
p[1:N + 1, 1] = y1slice
p[N + 2:, 0] = xslice[::-1]
p[N + 2:, 1] = y2slice[::-1]
polys.append(p)
polycol.extend(polys)
from matplotlib.collections import PolyCollection
if 'zorder' not in kwargs:
kwargs['zorder'] = 0
plots.append(PolyCollection(polycol, label=label, **kwargs))
ax.add_collection(plots[-1], autolim=True)
ax.autoscale_view()
return plots
|
|
"""Helpers for script and condition tracing."""
from __future__ import annotations
from collections import deque
from contextlib import contextmanager
from contextvars import ContextVar
from functools import wraps
from typing import Any, Callable, Deque, Generator, cast
from homeassistant.helpers.typing import TemplateVarsType
import homeassistant.util.dt as dt_util
class TraceElement:
"""Container for trace data."""
def __init__(self, variables: TemplateVarsType, path: str):
"""Container for trace data."""
self._child_key: tuple[str, str] | None = None
self._child_run_id: str | None = None
self._error: Exception | None = None
self.path: str = path
self._result: dict | None = None
self._timestamp = dt_util.utcnow()
if variables is None:
variables = {}
last_variables = variables_cv.get() or {}
variables_cv.set(dict(variables))
changed_variables = {
key: value
for key, value in variables.items()
if key not in last_variables or last_variables[key] != value
}
self._variables = changed_variables
def __repr__(self) -> str:
"""Container for trace data."""
return str(self.as_dict())
def set_child_id(self, child_key: tuple[str, str], child_run_id: str) -> None:
"""Set trace id of a nested script run."""
self._child_key = child_key
self._child_run_id = child_run_id
def set_error(self, ex: Exception) -> None:
"""Set error."""
self._error = ex
def set_result(self, **kwargs: Any) -> None:
"""Set result."""
self._result = {**kwargs}
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this TraceElement."""
result: dict[str, Any] = {"path": self.path, "timestamp": self._timestamp}
if self._child_key is not None:
result["child_id"] = {
"domain": self._child_key[0],
"item_id": self._child_key[1],
"run_id": str(self._child_run_id),
}
if self._variables:
result["changed_variables"] = self._variables
if self._error is not None:
result["error"] = str(self._error)
if self._result is not None:
result["result"] = self._result
return result
# Context variables for tracing
# Current trace
trace_cv: ContextVar[dict[str, Deque[TraceElement]] | None] = ContextVar(
"trace_cv", default=None
)
# Stack of TraceElements
trace_stack_cv: ContextVar[list[TraceElement] | None] = ContextVar(
"trace_stack_cv", default=None
)
# Current location in config tree
trace_path_stack_cv: ContextVar[list[str] | None] = ContextVar(
"trace_path_stack_cv", default=None
)
# Copy of last variables
variables_cv: ContextVar[Any | None] = ContextVar("variables_cv", default=None)
# (domain, item_id) + Run ID
trace_id_cv: ContextVar[tuple[str, str] | None] = ContextVar(
"trace_id_cv", default=None
)
# Reason for stopped script execution
script_execution_cv: ContextVar[StopReason | None] = ContextVar(
"script_execution_cv", default=None
)
def trace_id_set(trace_id: tuple[str, str]) -> None:
"""Set id of the current trace."""
trace_id_cv.set(trace_id)
def trace_id_get() -> tuple[str, str] | None:
"""Get id if the current trace."""
return trace_id_cv.get()
def trace_stack_push(trace_stack_var: ContextVar, node: Any) -> None:
"""Push an element to the top of a trace stack."""
trace_stack = trace_stack_var.get()
if trace_stack is None:
trace_stack = []
trace_stack_var.set(trace_stack)
trace_stack.append(node)
def trace_stack_pop(trace_stack_var: ContextVar) -> None:
"""Remove the top element from a trace stack."""
trace_stack = trace_stack_var.get()
trace_stack.pop()
def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None
def trace_path_push(suffix: str | list[str]) -> int:
"""Go deeper in the config tree."""
if isinstance(suffix, str):
suffix = [suffix]
for node in suffix:
trace_stack_push(trace_path_stack_cv, node)
return len(suffix)
def trace_path_pop(count: int) -> None:
"""Go n levels up in the config tree."""
for _ in range(count):
trace_stack_pop(trace_path_stack_cv)
def trace_path_get() -> str:
"""Return a string representing the current location in the config tree."""
path = trace_path_stack_cv.get()
if not path:
return ""
return "/".join(path)
def trace_append_element(
trace_element: TraceElement,
maxlen: int | None = None,
) -> None:
"""Append a TraceElement to trace[path]."""
path = trace_element.path
trace = trace_cv.get()
if trace is None:
trace = {}
trace_cv.set(trace)
if path not in trace:
trace[path] = deque(maxlen=maxlen)
trace[path].append(trace_element)
def trace_get(clear: bool = True) -> dict[str, Deque[TraceElement]] | None:
"""Return the current trace."""
if clear:
trace_clear()
return trace_cv.get()
def trace_clear() -> None:
"""Clear the trace."""
trace_cv.set({})
trace_stack_cv.set(None)
trace_path_stack_cv.set(None)
variables_cv.set(None)
script_execution_cv.set(StopReason())
def trace_set_child_id(child_key: tuple[str, str], child_run_id: str) -> None:
"""Set child trace_id of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
if node:
node.set_child_id(child_key, child_run_id)
def trace_set_result(**kwargs: Any) -> None:
"""Set the result of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
node.set_result(**kwargs)
class StopReason:
"""Mutable container class for script_execution."""
script_execution: str | None = None
def script_execution_set(reason: str) -> None:
"""Set stop reason."""
data = script_execution_cv.get()
if data is None:
return
data.script_execution = reason
def script_execution_get() -> str | None:
"""Return the current trace."""
data = script_execution_cv.get()
if data is None:
return None
return data.script_execution
@contextmanager
def trace_path(suffix: str | list[str]) -> Generator:
"""Go deeper in the config tree.
Can not be used as a decorator on couroutine functions.
"""
count = trace_path_push(suffix)
try:
yield
finally:
trace_path_pop(count)
def async_trace_path(suffix: str | list[str]) -> Callable:
"""Go deeper in the config tree.
To be used as a decorator on coroutine functions.
"""
def _trace_path_decorator(func: Callable) -> Callable:
"""Decorate a coroutine function."""
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
with trace_path(suffix):
await func(*args)
return async_wrapper
return _trace_path_decorator
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles all requests to the conductor service."""
from oslo.config import cfg
from sahara.conductor import manager
from sahara.conductor import resource as r
from sahara.openstack.common import log as logging
conductor_opts = [
cfg.BoolOpt('use_local',
default=True,
help='Perform sahara-conductor operations locally.'),
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
def _get_id(obj):
"""Return object id.
Allows usage of both an object or an object's ID as a parameter when
dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class LocalApi(object):
"""A local version of the conductor API.
It does database updates locally instead of via RPC.
"""
def __init__(self):
self._manager = manager.ConductorManager()
# Cluster ops
@r.wrap(r.ClusterResource)
def cluster_get(self, context, cluster):
"""Return the cluster or None if it does not exist."""
return self._manager.cluster_get(context, _get_id(cluster))
@r.wrap(r.ClusterResource)
def cluster_get_all(self, context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(plugin_name='vanilla', hadoop_version='1.1')
"""
return self._manager.cluster_get_all(context, **kwargs)
@r.wrap(r.ClusterResource)
def cluster_create(self, context, values):
"""Create a cluster from the values dictionary.
:returns: the created cluster.
"""
return self._manager.cluster_create(context, values)
@r.wrap(r.ClusterResource)
def cluster_update(self, context, cluster, values):
"""Update the cluster with the given values dictionary.
:returns: the updated cluster.
"""
return self._manager.cluster_update(context, _get_id(cluster),
values)
def cluster_destroy(self, context, cluster):
"""Destroy the cluster or raise if it does not exist.
:returns: None.
"""
self._manager.cluster_destroy(context, _get_id(cluster))
# Node Group ops
def node_group_add(self, context, cluster, values):
"""Create a node group from the values dictionary.
:returns: ID of the created node group.
"""
return self._manager.node_group_add(context, _get_id(cluster), values)
def node_group_update(self, context, node_group, values):
"""Update the node group with the given values dictionary.
:returns: None.
"""
self._manager.node_group_update(context, _get_id(node_group), values)
def node_group_remove(self, context, node_group):
"""Destroy the node group or raise if it does not exist.
:returns: None.
"""
self._manager.node_group_remove(context, _get_id(node_group))
# Instance ops
def instance_add(self, context, node_group, values):
"""Create an instance from the values dictionary.
:returns: ID of the created instance.
"""
return self._manager.instance_add(context, _get_id(node_group), values)
def instance_update(self, context, instance, values):
"""Update the instance with the given values dictionary.
:returns: None.
"""
self._manager.instance_update(context, _get_id(instance), values)
def instance_remove(self, context, instance):
"""Destroy the instance or raise if it does not exist.
:returns: None.
"""
self._manager.instance_remove(context, _get_id(instance))
# Volumes ops
def append_volume(self, context, instance, volume_id):
"""Append volume_id to instance."""
self._manager.append_volume(context, _get_id(instance), volume_id)
def remove_volume(self, context, instance, volume_id):
"""Remove volume_id in instance."""
self._manager.remove_volume(context, _get_id(instance), volume_id)
# Cluster Template ops
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get(self, context, cluster_template):
"""Return the cluster template or None if it does not exist."""
return self._manager.cluster_template_get(context,
_get_id(cluster_template))
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get_all(self, context):
"""Get all cluster templates."""
return self._manager.cluster_template_get_all(context)
@r.wrap(r.ClusterTemplateResource)
def cluster_template_create(self, context, values):
"""Create a cluster template from the values dictionary.
:returns: the created cluster template
"""
return self._manager.cluster_template_create(context, values)
def cluster_template_destroy(self, context, cluster_template):
"""Destroy the cluster template or raise if it does not exist.
:returns: None
"""
self._manager.cluster_template_destroy(context,
_get_id(cluster_template))
# Node Group Template ops
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get(self, context, node_group_template):
"""Return the node group template or None if it does not exist."""
return self._manager.node_group_template_get(
context, _get_id(node_group_template))
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get_all(self, context):
"""Get all node group templates."""
return self._manager.node_group_template_get_all(context)
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_create(self, context, values):
"""Create a node group template from the values dictionary.
:returns: the created node group template
"""
return self._manager.node_group_template_create(context, values)
def node_group_template_destroy(self, context, node_group_template):
"""Destroy the node group template or raise if it does not exist.
:returns: None
"""
self._manager.node_group_template_destroy(context,
_get_id(node_group_template))
# Data Source ops
@r.wrap(r.DataSource)
def data_source_get(self, context, data_source):
"""Return the Data Source or None if it does not exist."""
return self._manager.data_source_get(context, _get_id(data_source))
@r.wrap(r.DataSource)
def data_source_get_all(self, context):
"""Get all Data Sources."""
return self._manager.data_source_get_all(context)
@r.wrap(r.DataSource)
def data_source_create(self, context, values):
"""Create a Data Source from the values dictionary."""
return self._manager.data_source_create(context, values)
def data_source_destroy(self, context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
self._manager.data_source_destroy(context, _get_id(data_source))
# JobExecution ops
@r.wrap(r.JobExecution)
def job_execution_get(self, context, job_execution):
"""Return the JobExecution or None if it does not exist."""
return self._manager.job_execution_get(context,
_get_id(job_execution))
@r.wrap(r.JobExecution)
def job_execution_get_all(self, context, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
e.g. job_execution_get_all(cluster_id=12, input_id=123)
"""
return self._manager.job_execution_get_all(context, **kwargs)
def job_execution_count(self, context, **kwargs):
"""Count number of JobExecutions filtered by **kwargs.
e.g. job_execution_count(cluster_id=12, input_id=123)
"""
return self._manager.job_execution_count(context, **kwargs)
@r.wrap(r.JobExecution)
def job_execution_create(self, context, values):
"""Create a JobExecution from the values dictionary."""
return self._manager.job_execution_create(context, values)
@r.wrap(r.JobExecution)
def job_execution_update(self, context, job_execution, values):
"""Update the JobExecution or raise if it does not exist."""
return self._manager.job_execution_update(context,
_get_id(job_execution),
values)
def job_execution_destroy(self, context, job_execution):
"""Destroy the JobExecution or raise if it does not exist."""
self._manager.job_execution_destroy(context, _get_id(job_execution))
# Job ops
@r.wrap(r.Job)
def job_get(self, context, job):
"""Return the Job or None if it does not exist."""
return self._manager.job_get(context, _get_id(job))
@r.wrap(r.Job)
def job_get_all(self, context):
"""Get all Jobs."""
return self._manager.job_get_all(context)
@r.wrap(r.Job)
def job_create(self, context, values):
"""Create a Job from the values dictionary."""
return self._manager.job_create(context, values)
def job_update(self, context, job, values):
"""Update the Job or raise if it does not exist."""
return self._manager.job_update(context, _get_id(job),
values)
def job_destroy(self, context, job):
"""Destroy the Job or raise if it does not exist."""
self._manager.job_destroy(context, _get_id(job))
def job_main_name(self, context, job):
"""Return the name of the first main JobBinary or None.
At present the 'mains' element is expected to contain a single element.
In the future if 'mains' contains more than one element we will need
a scheme or convention for retrieving a name from the list of binaries.
:param job: This is expected to be a Job object
"""
if job.mains:
binary = self.job_binary_get(context, job.mains[0])
if binary is not None:
return binary["name"]
return None
# JobBinary ops
@r.wrap(r.JobBinary)
def job_binary_get_all(self, context):
"""Get all JobBinarys."""
return self._manager.job_binary_get_all(context)
@r.wrap(r.JobBinary)
def job_binary_get(self, context, job_binary):
"""Return the JobBinary or None if it does not exist."""
return self._manager.job_binary_get(context, _get_id(job_binary))
@r.wrap(r.JobBinary)
def job_binary_create(self, context, values):
"""Create a JobBinary from the values dictionary."""
return self._manager.job_binary_create(context, values)
def job_binary_destroy(self, context, job_binary):
"""Destroy the JobBinary or raise if it does not exist."""
self._manager.job_binary_destroy(context, _get_id(job_binary))
# JobBinaryInternal ops
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get_all(self, context):
"""Get all JobBinaryInternals."""
return self._manager.job_binary_internal_get_all(context)
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get(self, context, job_binary_internal):
"""Return the JobBinaryInternal or None if it does not exist."""
return self._manager.job_binary_internal_get(
context,
_get_id(job_binary_internal))
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_create(self, context, values):
"""Create a JobBinaryInternal from the values dictionary."""
return self._manager.job_binary_internal_create(context, values)
def job_binary_internal_destroy(self, context, job_binary_internal_id):
"""Destroy the JobBinaryInternal or raise if it does not exist."""
self._manager.job_binary_internal_destroy(
context,
_get_id(job_binary_internal_id))
def job_binary_internal_get_raw_data(self, context,
job_binary_internal_id):
"""Return the binary data field from a JobBinaryInternal."""
return self._manager.job_binary_internal_get_raw_data(
context,
job_binary_internal_id)
class RemoteApi(LocalApi):
"""Conductor API that does updates via RPC to the ConductorManager."""
# TODO(slukjanov): it should override _manager and only necessary functions
|
|
# Python 2 compatibility.
from __future__ import print_function
import numpy
import scipy.optimize
import warnings
# Same as scipy.optimize.
_epsilon = numpy.sqrt(numpy.finfo(float).eps)
class Balance():
"""
Base class for balancing. Not intended to be used directly.
Subclasses should extend either NonSymmetricBalance or SymmetricBalance.
They also need to implement handicap_function, and optionally row_derivative and col_derivative
(Or just row_derivative for SymmetricBalance.)
"""
"""
Methods and attributes to be set by subclasses:
"""
def handicap_function(self, h_r, h_c):
"""
Mandatory to override.
Args:
h_r, h_c: The canonical row and column handicaps.
Returns:
The canoncial payoff matrix.
Each element of the canoncial payoff matrix should depend only on the corresponding canonical row and column handicap.
It is highly desirable that the function be strictly monotonically decreasing
in row_handicap and strictly monotonically increasing in col_handicap for every element.
NOTE: In the symmetric case the function should also have the property that
handicap_function(h_r, h_c) = -handicap_function(h_c, h_r) + value of the game
where the value of the game is constant.
This means that for any setting of h_r, h_c the payoff matrix is skew-symmetric plus the value of the game.
In particular, all diagonal elements should be equal to the value of the game.
"""
raise NotImplementedError("Balance subclasses must implement a handicap_function.")
def row_derivative(self, h_r, h_c):
"""
Optional to override. Defaults to a finite-difference implementation.
Returns: the derivative of the payoff matrix with respect to the row h.
"""
return self.row_derivative_fd(h_r, h_c, _epsilon)
def col_derivative(self, h_r, h_c):
"""
Optional to override. Defaults to a finite-difference implementation.
Returns: the derivative of the payoff matrix with respect to the column h.
"""
return self.col_derivative_fd(h_r, h_c, _epsilon)
rectify_mask = False
"""
Optional to override. Set to False for no rectification, True to rectify all variables, or use a mask to rectify only some.
By default the canonical handicaps have the range (-inf, inf).
However, depending on the handicap function, it may only make sense to have
strictly positive canonical handicaps in the range (0, inf).
To do this we can use a strictly monotonically increasing rectifier function
to map from the raw optimization values x to the canonical handicaps.
The default is the piecewise combination of a reciprocal and a linear.
"""
"""
Most handicap functions have some redundant dimension over which the payoffs are constant.
The regularizer introduces additional term(s) to the optimization in order to choose a specific solution.
For most common handicap functions, including all one-parameter handicap functions,
all the solutions have the same payoff matrix, so the regularizer effectively just selects one of them.
However, this is not necessarily the case for two-parameter handicap functions.
("One-" and "two-parameter" are defined in the paper.)
"""
regularizer_x_weight = 1.0
"""
The weight to assign to the regularizer on the optimization variables x. Set to 0.0 to disable.
"""
def regularizer_x(self, x):
"""
Optional to override.
regularizer_x uses the raw optimization variables x.
We default regularizer_x to summing x.
Should return a 1-D array.
"""
return numpy.sum(x, keepdims = True)
def regularizer_x_jacobian(self, x):
"""
Jacobian of regularizer_x().
"""
return numpy.ones((1, x.size))
def decanonicalize_handicaps(self, h):
"""
In some cases the problem may be transformed into some canonical form before solving it.
Subclasses override this method to transform the handicap back into a form corresponding to the problem statement.
"""
handicaps = h
return handicaps
def decanonicalize_payoffs(self, p):
"""
In some cases the problem may be transformed into some canonical form before solving it.
Subclasses override this method to transform payoffs back into a form corresponding to the problem statement.
This should be a linear function, since expected payoffs are a linear combination of individual payoffs.
"""
payoffs = p
return payoffs
"""
Common methods.
"""
def optimize(self, x0 = None, method = 'lm', use_jacobian = True, check_derivative = False, check_jacobian = False, *args, **kwargs):
"""
Solves the balance problem using scipy.optimize.root.
Args:
x0: Starting point of the optimization. Defaults to a zero vector.
method: Optimization method to be used by scipy.optimize.root.
We default to 'lm' since it seems to produce the best results empirically.
use_jacobian: If set to true, the Jacobian will be computed from the row and column derivatives,
instead of using scipy.optimize.root's default Jacobian.
check_derivative, check_jacobian:
Can be used to check the provided row_derivative, col_derivative
against a finite difference approximation with the provided epsilon.
A value of True uses a default value for epsilon.
*args, **kwargs: Passed to scipy.optimize.root.
In particular you may want to consider changing the solver method
if the default is not producing good results.
Returns:
The result of scipy.optimize.root, with the following additional values:
result.row_handicaps: The solved decanonicalized row handicaps.
(Canonical: result.h_r)
result.col_handicaps: The solved decanonicalized row handicaps.
(Canonical: result.h_c)
result.payoff_matrix: The solved decanonicalized payoff matrix.
(Canonical: result.F)
"""
if x0 is None:
x0 = numpy.zeros((self.handicap_count))
if check_derivative is True:
check_derivative = _epsilon
if check_jacobian is True:
check_jacobian = _epsilon
def fun(x):
"""
The objective function in terms of the raw optimization variables.
"""
h = self.rectify_masked(x)
y = self.objective(h)
if check_derivative:
h_r, h_c = self.split_handicaps(h)
self.check_row_derivative(h_r, h_c, epsilon = check_derivative)
self.check_col_derivative(h_r, h_c, epsilon = check_derivative)
if check_jacobian:
self.check_jacobian(h, epsilon = check_jacobian)
if self.regularizer_x_weight > 0.0:
r = self.regularizer_x(x) * self.regularizer_x_weight
y = numpy.concatenate((y, r), axis = 0)
return y
if use_jacobian:
def jac(x):
"""
Jacobian of the objective function.
"""
h = self.rectify_masked(x)
J = self.jacobian(h)
J = J * self.rectify_masked_derivative(x)[None, :]
if self.regularizer_x_weight > 0.0:
Jr = self.regularizer_x_jacobian(x) * self.regularizer_x_weight
J = numpy.concatenate((J, Jr), axis = 0)
return J
else:
jac = None
result = scipy.optimize.root(fun = fun, x0 = x0, jac = jac, method = method, *args, **kwargs)
result.h = self.rectify_masked(result.x)
result.h_r, result.h_c = self.split_handicaps(result.h)
result.F = self.handicap_function(result.h_r, result.h_c)
# Decanonicalize the canonical handicaps into the final values.
result.handicaps = self.decanonicalize_handicaps(result.h)
result.payoff_matrix = self.decanonicalize_payoffs(result.F)
result.row_handicaps, result.col_handicaps = self.split_handicaps(result.h)
return result
"""
Methods for checking derivatives and Jacobians.
"""
def jacobian_fd(self, h, epsilon):
""" Computes a finite (central) difference approximation of the Jacobian. """
J = numpy.zeros((self.handicap_count, self.handicap_count))
for input_index in range(self.handicap_count):
hdp = h.copy()
hdp[input_index] += epsilon * 0.5
hdn = h.copy()
hdn[input_index] -= epsilon * 0.5
J[:, input_index] = (self.objective(hdp) - self.objective(hdn)) / epsilon
return J
def check_jacobian(self, h, epsilon):
"""
Checks the Jacobian computed from the handicap function derivatives
against a finite difference approximation.
"""
result = self.jacobian(h) - self.jacobian_fd(h, epsilon)
print('Maximum difference between evaluated Jacobian and finite difference:',
numpy.max(numpy.abs(result)))
return result
def row_derivative_fd(self, h_r, h_c, epsilon):
"""
Computes a finite (central) difference approximation of derivative of the handicap function
with respect to the corresponding row handicap.
"""
h_r_N = h_r - epsilon * 0.5
h_r_P = h_r + epsilon * 0.5
return (self.handicap_function(h_r_P, h_c) - self.handicap_function(h_r_N, h_c)) / epsilon
def col_derivative_fd(self, h_r, h_c, epsilon):
"""
Computes a finite (central) difference approximation of derivative of the handicap function
with respect to the corresponding column handicap.
"""
h_c_N = h_c - epsilon * 0.5
h_c_P = h_c + epsilon * 0.5
return (self.handicap_function(h_r, h_c_P) - self.handicap_function(h_r, h_c_N)) / epsilon
def check_row_derivative(self, h_r, h_c, epsilon):
"""
Checks the derivative of the handicap function with respect to the corresponding row handicap
against a finite difference approximation.
Also checks that all row derivatives are negative.
"""
direct = self.row_derivative(h_r, h_c)
fd = self.row_derivative_fd(h_r, h_c, epsilon)
if numpy.any(direct >= 0.0) or numpy.any(fd >= 0.0):
msg = 'Found a non-negative row derivative for\nh_r = %s\nh_c = %s.' % (h_r, h_c)
msg += '\nIt is highly desirable that the handicap function be strictly monotonically decreasing in the row handicap.'
warnings.warn(msg, DerivativeWarning)
result = direct - fd
print('Maximum difference between evaluated row_derivative and finite difference:',
numpy.max(numpy.abs(result)))
return result
def check_col_derivative(self, h_r, h_c, epsilon):
"""
Checks the derivative of the handicap function with respect to the corresponding column handicap
against a finite difference approximation.
Also checks that all column derivatives are negative.
"""
direct = self.col_derivative(h_r, h_c)
fd = self.col_derivative_fd(h_r, h_c, epsilon)
if numpy.any(direct <= 0.0) or numpy.any(fd <= 0.0):
msg = 'Found a non-positive column derivative for\nh_r = %s\nh_c = %s.' % (h_r, h_c)
msg += '\nIt is highly desirable that the handicap function be strictly monotonically increasing in the column handicap.'
warnings.warn(msg, DerivativeWarning)
result = direct - fd
print('Maximum difference between evaluated col_derivative and finite difference:',
numpy.max(numpy.abs(result)))
return result
"""
Rectification details.
"""
def rectify_masked(self, x):
"""
Rectifies only the variables x that are flagged in self.rectify_mask.
"""
if self.rectify_mask is False: return x.copy()
elif self.rectify_mask is True: return self.rectify(x)
else:
result = x.copy()
result[self.rectify_mask] = self.rectify(x)
return result
def rectify_masked_derivative(self, x):
"""
Derivative of rectify_masked().
"""
if self.rectify_mask is False: return numpy.ones_like(x)
elif self.rectify_mask is True: return self.rectify_derivative(x)
else:
result = numpy.ones_like(x)
result[self.rectify_mask] = self.rectify_derivative(x)
return result
def rectify(self, x):
mask = x >= 0.0
result = numpy.zeros_like(x)
result[mask] = x[mask] + 1.0
result[~mask] = 1.0 / (1.0 - x[~mask])
return result
def rectify_derivative(self, x):
mask = x >= 0.0
result = numpy.ones_like(x)
result[~mask] = 1.0 / (1.0 - x[~mask]) / (1.0 - x[~mask])
return result
def rectify_derivative_fd(self, x, epsilon):
return (self.rectify(x + epsilon * 0.5) - self.rectify(x - epsilon * 0.5)) / epsilon
class NonSymmetricBalance(Balance):
"""
This version of Balance for non-symmetric games, where each player is choosing
from an independent set of strategies.
"""
def __init__(self, row_weights, col_weights, value = 0.0):
"""
Args:
row_weights, col_weights: Defines the desired Nash equilibrium
in terms of row and column strategy probability weights.
If only an integer is specified, a uniform distribution will be used.
Weights will be normalized.
value: The desired value of the resulting game.
This is equal to the row player's payoff and the negative of the column player's payoff.
"""
self.row_count, self.row_weights, self.row_objective_weights = _process_weights(row_weights)
self.col_count, self.col_weights, self.col_objective_weights = _process_weights(col_weights)
self.handicap_count = self.row_count + self.col_count
self.value = value
self.weights = numpy.concatenate((self.row_weights, self.col_weights))
def split_handicaps(self, h):
""" Splits handicaps (canonical or not) into row and col handicaps."""
return h[:self.row_count], h[-self.col_count:]
def objective(self, h):
"""
Compute the objective vector, which is desired to be zero.
This is the expected payoff of each strategy for that player, times the weight of that strategy.
In order to balance them at the edge of being played, zero-weighted strategies are given a weight of 1.0.
This works since they do not affect the expected payoff of other strategies.
"""
h_r, h_c = self.split_handicaps(h)
F = self.handicap_function(h_r, h_c)
# Dot products are weighted.
row_objectives = (numpy.tensordot(F, self.col_weights, axes = ([1], [0])) - self.value) * self.row_objective_weights
col_objectives = (self.value - numpy.tensordot(F, self.row_weights, axes = ([0], [0]))) * self.col_objective_weights
return numpy.concatenate((row_objectives, col_objectives))
def jacobian(self, h):
""" Compute the Jacobian of the objective using the provided canonical handicaps h. """
h_r, h_c = self.split_handicaps(h)
# J_ij = derivative of payoff i with respect to handicap j.
dFr = self.row_derivative(h_r, h_c)
dFc = self.col_derivative(h_r, h_c)
# Derivative of row payoffs with respect to row h.
Jrr = numpy.tensordot(dFr, self.col_weights, axes = ([1], [0])) * self.row_objective_weights
Jrr = numpy.diag(Jrr)
# Derivative of col payoffs with respect to col h.
Jcc = -numpy.tensordot(dFc, self.row_weights, axes = ([0], [0])) * self.col_objective_weights
Jcc = numpy.diag(Jcc)
# Derivative of row payoffs with respect to col h.
Jrc = dFc * self.col_weights[None, :] * self.row_objective_weights[:, None]
# Derivative of col payoffs with respect to row h.
Jcr = -dFr * self.row_weights[:, None] * self.col_objective_weights[None, :]
Jcr = numpy.transpose(Jcr)
# Assemble full Jacobian.
J = numpy.block([[Jrr, Jrc],
[Jcr, Jcc]])
return J
class SymmetricBalance(Balance):
def __init__(self, strategy_weights, value = None):
"""
This version of Balance for symmetric games,
where both players are choosing from the same set of strategies.
Thus there are no independent inputs for column strategies.
Args:
strategy_weights: Defines the desired Nash equilibrium in terms of strategy probability weights.
If only an integer is specified, a uniform distribution will be used.
value: Value of the game.
If not supplied it will be set automatically based on the diagonal elements
when the payoff matrix is first evaluated.
"""
self.handicap_count, self.strategy_weights, self.strategy_objective_weights = _process_weights(strategy_weights)
self.row_count = self.handicap_count
self.col_count = self.handicap_count
self.weights = self.strategy_weights
self.row_weights = self.strategy_weights
self.col_weights = self.strategy_weights
self.value = value
def split_handicaps(self, h):
""" Splits handicaps (canonical or not) into row and col handicaps."""
return h, h
def objective(self, h):
"""
Compute the objective vector, which is desired to be zero.
This is the expected payoff of each strategy for that player, times the weight of that stategy.
In order to balance them at the edge of being played, zero-weighted strategies are given a weight of 1.0.
This works since they do not affect the expected payoff of other strategies.
"""
h_r, h_c = self.split_handicaps(h)
F = self.handicap_function(h_r, h_c)
if self.value is None:
self.value = numpy.average(numpy.diag(F), weights = self.strategy_weights)
# Dot products are weighted.
objectives = (numpy.tensordot(F, self.strategy_weights, axes = ([1], [0])) - self.value) * self.strategy_objective_weights
return objectives
def jacobian(self, h):
""" Compute the Jacobian of the objective using self.row_derivative. """
h_r, h_c = self.split_handicaps(h)
dFr = self.row_derivative(h_r, h_c)
# Derivative of row payoffs with respect to row h.
Jrr = numpy.tensordot(dFr, self.strategy_weights, axes = ([1], [0])) * self.strategy_objective_weights
Jrr = numpy.diag(Jrr)
# Derivative of row payoffs with respect to col h.
dFc = -numpy.transpose(dFr)
Jrc = dFc * self.strategy_weights[None, :] * self.strategy_objective_weights[:, None]
# Variables change both row and col h at the same time, so Jacobian is the sum of their effects.
J = Jrr + Jrc
return J
def col_derivative(self, h_r, h_c):
""" Using the skew-symmetry property. """
return -self.row_derivative(h_r, h_c).transpose()
class DerivativeWarning(RuntimeWarning):
pass
class ValueWarning(RuntimeWarning):
pass
def _process_weights(arg):
"""
Helper function for processing weight arguments.
Args:
arg: Either:
An integer, in which case the weights will be uniform over that many strategies.
A weight distribution, which will be normalized to sum to 1.
Returns:
count: The number of weights/strategies.
weights: The (normalized) weights.
objective_weights: As weights, but 0-weights are replaced by 1.
Used for weighting the objective function.
Raises:
ValueError: If weights sum to 0, or any of the weights are negative.
"""
try:
weights = arg.copy()
count = weights.size
except:
weights = numpy.ones((arg)) / arg
count = arg
weight_sum = numpy.sum(weights)
if weight_sum == 0.0:
raise ValueError('Weights sum to 0.')
if numpy.any(weights < 0.0):
raise ValueError('Received negative weight(s).')
weights = weights / weight_sum
# Replace zeros with 1.0 / weights.size for purposes of weighting the objective vector.
objective_weights = weights.copy()
objective_weights[objective_weights == 0.0] = 1.0 / weights.size
return count, weights, objective_weights
|
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import os
import sys
import urlparse
from hooks import install
from paste import fileapp
from paste import httpserver
import webapp2
from webapp2 import Route, RedirectHandler
from dashboard_build import dashboard_dev_server_config
from tracing_build import tracing_dev_server_config
from netlog_viewer_build import netlog_viewer_dev_server_config
_MAIN_HTML = """<html><body>
<h1>Run Unit Tests</h1>
<ul>
%s
</ul>
<h1>Quick links</h1>
<ul>
%s
</ul>
</body></html>
"""
_QUICK_LINKS = [
('Trace File Viewer',
'/tracing_examples/trace_viewer.html'),
('Metrics debugger',
'/tracing_examples/metrics_debugger.html'),
]
_LINK_ITEM = '<li><a href="%s">%s</a></li>'
def _GetFilesIn(basedir):
data_files = []
for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
new_dirnames = [d for d in dirnames if not d.startswith('.')]
del dirnames[:]
dirnames += new_dirnames
for f in filenames:
if f.startswith('.'):
continue
if f == 'README.md':
continue
full_f = os.path.join(dirpath, f)
rel_f = os.path.relpath(full_f, basedir)
data_files.append(rel_f)
data_files.sort()
return data_files
def _RelPathToUnixPath(p):
return p.replace(os.sep, '/')
class TestResultHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
ostream = sys.stdout if 'PASSED' in msg else sys.stderr
ostream.write(msg + '\n')
return self.response.write('')
class TestsCompletedHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
sys.stdout.write(msg + '\n')
exit_code = 0 if 'ALL_PASSED' in msg else 1
if hasattr(self.app.server, 'please_exit'):
self.app.server.please_exit(exit_code)
return self.response.write('')
class TestsErrorHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs):
del args, kwargs
msg = self.request.body
sys.stderr.write(msg + '\n')
exit_code = 1
if hasattr(self.app.server, 'please_exit'):
self.app.server.please_exit(exit_code)
return self.response.write('')
class DirectoryListingHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_path = kwargs.pop('_source_path', None)
mapped_path = kwargs.pop('_mapped_path', None)
assert mapped_path.endswith('/')
data_files_relative_to_top = _GetFilesIn(source_path)
data_files = [mapped_path + x
for x in data_files_relative_to_top]
files_as_json = json.dumps(data_files)
self.response.content_type = 'application/json'
return self.response.write(files_as_json)
class FileAppWithGZipHandling(fileapp.FileApp):
def guess_type(self):
content_type, content_encoding = \
super(FileAppWithGZipHandling, self).guess_type()
if not self.filename.endswith('.gz'):
return content_type, content_encoding
# By default, FileApp serves gzip files as their underlying type with
# Content-Encoding of gzip. That causes them to show up on the client
# decompressed. That ends up being surprising to our xhr.html system.
return None, None
class SourcePathsHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_paths = kwargs.pop('_source_paths', [])
path = self.request.path
# This is how we do it. Its... strange, but its what we've done since
# the dawn of time. Aka 4 years ago, lol.
for mapped_path in source_paths:
rel = os.path.relpath(path, '/')
candidate = os.path.join(mapped_path, rel)
if os.path.exists(candidate):
app = FileAppWithGZipHandling(candidate)
app.cache_control(no_cache=True)
return app
self.abort(404)
@staticmethod
def GetServingPathForAbsFilename(source_paths, filename):
if not os.path.isabs(filename):
raise Exception('filename must be an absolute path')
for mapped_path in source_paths:
if not filename.startswith(mapped_path):
continue
rel = os.path.relpath(filename, mapped_path)
unix_rel = _RelPathToUnixPath(rel)
return unix_rel
return None
class SimpleDirectoryHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
top_path = os.path.abspath(kwargs.pop('_top_path', None))
if not top_path.endswith(os.path.sep):
top_path += os.path.sep
joined_path = os.path.abspath(
os.path.join(top_path, kwargs.pop('rest_of_path')))
if not joined_path.startswith(top_path):
self.response.set_status(403)
return
app = FileAppWithGZipHandling(joined_path)
app.cache_control(no_cache=True)
return app
class TestOverviewHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
test_links = []
for name, path in kwargs.pop('pds').iteritems():
test_links.append(_LINK_ITEM % (path, name))
quick_links = []
for name, path in _QUICK_LINKS:
quick_links.append(_LINK_ITEM % (path, name))
self.response.out.write(_MAIN_HTML % ('\n'.join(test_links),
'\n'.join(quick_links)))
class DevServerApp(webapp2.WSGIApplication):
def __init__(self, pds, args):
super(DevServerApp, self).__init__(debug=True)
self.pds = pds
self._server = None
self._all_source_paths = []
self._all_mapped_test_data_paths = []
self._InitFromArgs(args)
@property
def server(self):
return self._server
@server.setter
def server(self, server):
self._server = server
def _InitFromArgs(self, args):
default_tests = dict((pd.GetName(), pd.GetRunUnitTestsUrl())
for pd in self.pds)
routes = [
Route('/tests.html', TestOverviewHandler,
defaults={'pds': default_tests}),
Route('', RedirectHandler, defaults={'_uri': '/tests.html'}),
Route('/', RedirectHandler, defaults={'_uri': '/tests.html'}),
]
for pd in self.pds:
routes += pd.GetRoutes(args)
routes += [
Route('/%s/notify_test_result' % pd.GetName(),
TestResultHandler),
Route('/%s/notify_tests_completed' % pd.GetName(),
TestsCompletedHandler),
Route('/%s/notify_test_error' % pd.GetName(),
TestsErrorHandler)
]
for pd in self.pds:
# Test data system.
for mapped_path, source_path in pd.GetTestDataPaths(args):
self._all_mapped_test_data_paths.append((mapped_path, source_path))
routes.append(Route('%s__file_list__' % mapped_path,
DirectoryListingHandler,
defaults={
'_source_path': source_path,
'_mapped_path': mapped_path
}))
routes.append(Route('%s<rest_of_path:.+>' % mapped_path,
SimpleDirectoryHandler,
defaults={'_top_path': source_path}))
# This must go last, because its catch-all.
#
# Its funky that we have to add in the root path. The long term fix is to
# stop with the crazy multi-source-pathing thing.
for pd in self.pds:
self._all_source_paths += pd.GetSourcePaths(args)
routes.append(
Route('/<:.+>', SourcePathsHandler,
defaults={'_source_paths': self._all_source_paths}))
for route in routes:
self.router.add(route)
def GetAbsFilenameForHref(self, href):
for source_path in self._all_source_paths:
full_source_path = os.path.abspath(source_path)
expanded_href_path = os.path.abspath(os.path.join(full_source_path,
href.lstrip('/')))
if (os.path.exists(expanded_href_path) and
os.path.commonprefix([full_source_path,
expanded_href_path]) == full_source_path):
return expanded_href_path
return None
def GetURLForAbsFilename(self, filename):
assert self.server is not None
for mapped_path, source_path in self._all_mapped_test_data_paths:
if not filename.startswith(source_path):
continue
rel = os.path.relpath(filename, source_path)
unix_rel = _RelPathToUnixPath(rel)
url = urlparse.urljoin(mapped_path, unix_rel)
return url
path = SourcePathsHandler.GetServingPathForAbsFilename(
self._all_source_paths, filename)
if path is None:
return None
return urlparse.urljoin('/', path)
def _AddPleaseExitMixinToServer(server):
# Shutting down httpserver gracefully and yielding a return code requires
# a bit of mixin code.
exit_code_attempt = []
def PleaseExit(exit_code):
if len(exit_code_attempt) > 0:
return
exit_code_attempt.append(exit_code)
server.running = False
real_serve_forever = server.serve_forever
def ServeForever():
try:
real_serve_forever()
except KeyboardInterrupt:
# allow CTRL+C to shutdown
return 255
print("Exiting dev server")
if len(exit_code_attempt) == 1:
return exit_code_attempt[0]
# The serve_forever returned for some reason separate from
# exit_please.
return 0
server.please_exit = PleaseExit
server.serve_forever = ServeForever
def _AddCommandLineArguments(pds, argv):
parser = argparse.ArgumentParser(description='Run development server')
parser.add_argument(
'--no-install-hooks', dest='install_hooks', action='store_false')
parser.add_argument('-p', '--port', default=8003, type=int)
for pd in pds:
g = parser.add_argument_group(pd.GetName())
pd.AddOptionstToArgParseGroup(g)
args = parser.parse_args(args=argv[1:])
return args
def Main(argv):
pds = [
dashboard_dev_server_config.DashboardDevServerConfig(),
tracing_dev_server_config.TracingDevServerConfig(),
netlog_viewer_dev_server_config.NetlogViewerDevServerConfig(),
]
args = _AddCommandLineArguments(pds, argv)
if args.install_hooks:
install.InstallHooks()
app = DevServerApp(pds, args=args)
server = httpserver.serve(app, host='127.0.0.1', port=args.port,
start_loop=False, daemon_threads=True)
_AddPleaseExitMixinToServer(server)
# pylint: disable=no-member
server.urlbase = 'http://127.0.0.1:%i' % server.server_port
app.server = server
sys.stderr.write('Now running on %s\n' % server.urlbase)
return server.serve_forever()
|
|
import __builtin__
import contextlib
import os
import unittest
import shutil
import tempfile
import StringIO
from pywatchman import bser, WatchmanError
from typing import Sequence
from .buck import BuildFileProcessor, Diagnostic, add_rule, process_with_diagnostics
def foo_rule(name, srcs=[], visibility=[], build_env=None):
"""A dummy build rule."""
add_rule({
'buck.type': 'foo',
'name': name,
'srcs': srcs,
'visibility': visibility,
}, build_env)
def extract_from_results(name, results):
for result in results:
if result.keys() == [name]:
return result[name]
raise ValueError(str(results))
def get_includes_from_results(results):
return extract_from_results('__includes', results)
def get_config_from_results(results):
return extract_from_results('__configs', results)
def get_env_from_results(results):
return extract_from_results('__env', results)
def setenv(varname, value=None):
if value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = value
@contextlib.contextmanager
def with_env(varname, value=None):
saved = os.environ.get(varname)
setenv(varname, value)
try:
yield
finally:
setenv(varname, saved)
@contextlib.contextmanager
def with_envs(envs):
with contextlib.nested(*[with_env(n, v) for n, v in envs.iteritems()]):
yield
class ProjectFile(object):
def __init__(self, root, path, contents):
# type: (str, str, Sequence[str]) -> None
"""Record of a file that can be written to disk.
:param root: root.
:param path: path to write the file, relative to the root.
:param contents: lines of file context
"""
self.path = path
self.name = '//{0}'.format(path)
self.root = root
self.prefix = None
if isinstance(contents, (tuple, list)):
contents = os.linesep.join(contents) + os.linesep
self.contents = contents
class BuckTest(unittest.TestCase):
def setUp(self):
self.project_root = tempfile.mkdtemp()
self.allow_empty_globs = False
self.build_file_name = 'BUCK'
self.watchman_client = None
self.project_import_whitelist = None
def tearDown(self):
shutil.rmtree(self.project_root, True)
def write_file(self, pfile):
# type: (ProjectFile) -> None
with open(os.path.join(self.project_root, pfile.path), 'w') as f:
f.write(pfile.contents)
def write_files(self, *pfiles):
# type: (*ProjectFile) -> None
for pfile in pfiles:
self.write_file(pfile)
def create_build_file_processor(self, cell_roots=None, includes=None, **kwargs):
return BuildFileProcessor(
self.project_root,
cell_roots or {},
self.build_file_name,
self.allow_empty_globs,
False, # ignore_buck_autodeps_files
False, # no_autodeps_signatures
self.watchman_client,
False, # watchman_glob_stat_results
False, # watchman_use_glob_generator
False, # use_mercurial_glob
self.project_import_whitelist,
includes or [],
**kwargs)
def test_sibling_includes_use_separate_globals(self):
"""
Test that consecutive includes can't see each others globals.
If a build file includes two include defs, one after another, verify
that the first's globals don't pollute the second's (e.g. the second
cannot implicitly reference globals from the first without including
it itself).
"""
# Setup the includes defs. The first one defines a variable that the
# second one (incorrectly) implicitly references.
include_def1 = ProjectFile(self.project_root, path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(self.project_root, path='inc_def2', contents=('BAR = FOO',))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(self.project_root, path='BUCK', contents='')
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def1.name, include_def2.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, set())
def test_lazy_include_defs(self):
"""
Tests bug reported in https://github.com/facebook/buck/issues/182.
If a include def references another include def via a lazy include_defs
call is some defined function, verify that it can correctly access the
latter's globals after the import.
"""
# Setup the includes defs. The first one defines a variable that the
# second one references after a local 'include_defs' call.
include_def1 = ProjectFile(self.project_root, path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(
self.project_root,
path='inc_def2',
contents=(
'def test():',
' include_defs({0!r})'.format(include_def1.name),
' FOO',
))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('test()',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def1.name, include_def2.name])
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
'test()',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
def test_private_globals_are_ignored(self):
"""
Verify globals prefixed with '_' don't get imported via 'include_defs'.
"""
include_def = ProjectFile(self.project_root, path='inc_def1', contents=('_FOO = 1',))
self.write_file(include_def)
# Test we don't get private module attributes from default includes.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('_FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
# Test we don't get private module attributes from explicit includes.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'_FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
def test_implicit_includes_apply_to_explicit_includes(self):
"""
Verify that implict includes are applied to explicit includes.
"""
# Setup an implicit include that defines a variable, another include
# that uses it, and a build file that uses the explicit include.
implicit_inc = ProjectFile(self.project_root, path='implicit', contents=('FOO = 1',))
explicit_inc = ProjectFile(self.project_root, path='explicit', contents=('FOO',))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(explicit_inc.name),
))
self.write_files(implicit_inc, explicit_inc, build_file)
# Run the processor to verify that the explicit include can use the
# variable in the implicit include.
build_file_processor = self.create_build_file_processor(
includes=[implicit_inc.name])
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
def test_all_list_is_respected(self):
"""
Verify that the `__all__` list in included files can be used to narrow
what gets pulled in.
"""
include_def = ProjectFile(
self.project_root,
path='inc_def1',
contents=('__all__ = []', 'FOO = 1'))
self.write_file(include_def)
# Test we don't get non-whitelisted attributes from default includes.
build_file = ProjectFile(self.project_root, path='BUCK', contents=('FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
includes=[include_def.name])
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
# Test we don't get non-whitelisted attributes from explicit includes.
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
def test_do_not_override_overridden_builtins(self):
"""
We want to ensure that if you override something like java_binary, and then use
include_defs to get another file, you don't end up clobbering your override.
"""
# Override java_library and have it automatically add a dep
build_defs = ProjectFile(
self.project_root,
path='BUILD_DEFS',
contents=(
# While not strictly needed for this test, we want to make sure we are overriding
# a provided method and not just defining it ourselves.
'old_get_base_path = get_base_path',
'def get_base_path(*args, **kwargs):',
' raise ValueError()',
'include_defs("//OTHER_DEFS")',
))
other_defs = ProjectFile(self.project_root, path='OTHER_DEFS', contents=())
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'get_base_path()',
))
self.write_files(build_defs, other_defs, build_file)
build_file_processor = self.create_build_file_processor(
includes=[build_defs.name])
with build_file_processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
ValueError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
def test_watchman_glob_failure_raises_diagnostic_with_stack(self):
class FakeWatchmanClient:
def __init__(self):
self.query_invoked = False
def query(self, *args):
self.query_invoked = True
raise WatchmanError("Nobody watches the watchmen")
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(self.project_root, path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
diagnostics = []
rules = []
fake_stdout = StringIO.StringIO()
with build_file_processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
WatchmanError,
process_with_diagnostics,
{
'buildFile': self.build_file_name,
'watchRoot': '',
'projectPrefix': self.project_root,
},
build_file_processor,
fake_stdout)
self.assertTrue(self.watchman_client.query_invoked)
result = fake_stdout.getvalue()
decoded_result = bser.loads(result)
self.assertEqual([], decoded_result['values'])
self.assertEqual(1, len(decoded_result['diagnostics']))
diagnostic = decoded_result['diagnostics'][0]
self.assertEqual('fatal', diagnostic['level'])
self.assertEqual('parse', diagnostic['source'])
self.assertEqual('Nobody watches the watchmen', diagnostic['message'])
exception = diagnostic['exception']
self.assertEqual('WatchmanError', exception['type'])
self.assertEqual('Nobody watches the watchmen', exception['value'])
self.assertTrue(len(exception['traceback']) > 0)
def test_watchman_glob_warning_adds_diagnostic(self):
class FakeWatchmanClient:
def query(self, *args):
return {'warning': 'This is a warning', 'files': ['Foo.java']}
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(self.project_root, path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
diagnostics = []
with build_file_processor.with_builtins(__builtin__.__dict__):
rules = build_file_processor.process(
build_file.root, build_file.prefix, build_file.path, diagnostics)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
self.assertEqual(
[Diagnostic(
message='This is a warning',
level='warning',
source='watchman',
exception=None)],
diagnostics)
def test_multiple_watchman_glob_warning_adds_diagnostics_in_order(self):
warnings = iter(['Warning 1', 'Warning 2'])
glob_results = iter([['Foo.java'], ['Foo.c']])
class FakeWatchmanClient:
def query(self, *args):
return {'warning': warnings.next(), 'files': glob_results.next()}
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]) + glob(["*.c"]),',
')'
))
java_file = ProjectFile(self.project_root, path='Foo.java', contents=())
c_file = ProjectFile(self.project_root, path='Foo.c', contents=())
self.write_files(build_file, java_file, c_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
with build_file_processor.with_builtins(__builtin__.__dict__):
diagnostics = []
rules = build_file_processor.process(
build_file.root, build_file.prefix, build_file.path,
diagnostics)
self.assertEqual(['Foo.java', 'Foo.c'], rules[0]['srcs'])
self.assertEqual(
[Diagnostic(
message='Warning 1',
level='warning',
source='watchman',
exception=None),
Diagnostic(
message='Warning 2',
level='warning',
source='watchman',
exception=None)],
diagnostics)
def test_read_config(self):
"""
Verify that the builtin `read_config()` function works.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'assert read_config("hello", "world") == "foo"',
'assert read_config("hello", "bar") is None',
'assert read_config("hello", "goo", "default") == "default"',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
configs={('hello', 'world'): 'foo'})
result = build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
[])
self.assertEquals(
get_config_from_results(result),
{'hello': {'world': 'foo', 'bar': None, 'goo': None}})
def test_add_build_file_dep(self):
"""
Test simple use of `add_build_file_dep`.
"""
# Setup the build file and dependency.
dep = ProjectFile(self.project_root, path='dep', contents=('',))
build_file = (
ProjectFile(
self.project_root,
path='BUCK',
contents=(
'add_build_file_dep("//dep")',
),
))
self.write_files(dep, build_file)
# Create a process and run it.
build_file_processor = self.create_build_file_processor()
results = build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
[])
# Verify that the dep was recorded.
self.assertTrue(
os.path.join(self.project_root, dep.path) in
get_includes_from_results(results))
def test_imports_are_blocked(self):
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import ssl',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
ImportError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
def test_import_whitelist(self):
"""
Verify that modules whitelisted globally or in configs can be imported
with sandboxing enabled.
"""
self.project_import_whitelist = ['sys', 'subprocess']
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import json',
'import functools',
'import re',
'import sys',
'import subprocess',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
def test_allow_unsafe_import_allows_to_import(self):
"""
Verify that `allow_unsafe_import()` allows to import specified modules
"""
# Importing httplib results in `__import__()` calls for other modules, e.g. socket, sys
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'with allow_unsafe_import():',
' import math, httplib',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_builtins(__builtin__.__dict__):
build_file_processor.process(
build_file.root,
build_file.prefix,
build_file.path,
[])
def test_modules_are_not_copied_unless_specified(self):
"""
Test that modules are not copied by 'include_defs' unless specified in '__all__'.
"""
include_def = ProjectFile(
self.project_root,
path='inc_def',
contents=(
'with allow_unsafe_import():',
' import math',
' def math_pi():',
' return math.pi',
))
self.write_files(include_def)
# Module math should not be accessible
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math.pi, 2) == 3.14)',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
# Confirm that math_pi() works
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math_pi(), 2) == 3.14)',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
# If specified in '__all__', math should be accessible
include_def = ProjectFile(
self.project_root,
path='inc_def',
contents=(
'__all__ = ["math"]',
'with allow_unsafe_import():',
' import math',
))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'assert(round(math.pi, 2) == 3.14)',
))
self.write_files(include_def, build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path, [])
def test_os_getenv(self):
"""
Verify that calling `os.getenv()` records the environment variable.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os',
'assert os.getenv("TEST1") == "foo"',
'assert os.getenv("TEST2") is None',
'assert os.getenv("TEST3", "default") == "default"',
))
self.write_file(build_file)
with with_envs({'TEST1': 'foo', 'TEST2': None, 'TEST3': None}):
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_env_interceptors():
result = build_file_processor.process(build_file.root, build_file.prefix,
build_file.path, [])
self.assertEquals(
get_env_from_results(result),
{'TEST1': "foo", 'TEST2': None, 'TEST3': None})
def test_os_environ(self):
"""
Verify that accessing environemtn variables via `os.environ` records
the environment variables.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os',
'assert os.environ["TEST1"] == "foo"',
'assert os.environ.get("TEST2") is None',
'assert os.environ.get("TEST3", "default") == "default"',
'assert "TEST4" in os.environ',
'assert "TEST5" not in os.environ',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
with with_envs({'TEST1': 'foo', 'TEST2': None, 'TEST3': None, 'TEST4': '', 'TEST5': None}):
build_file_processor = self.create_build_file_processor()
with build_file_processor.with_env_interceptors():
result = build_file_processor.process(build_file.root, build_file.prefix,
build_file.path, [])
self.assertEquals(
get_env_from_results(result),
{'TEST1': "foo", 'TEST2': None, 'TEST3': None, 'TEST4': '', 'TEST5': None})
def test_safe_modules_allow_safe_functions(self):
"""
Test that 'import os.path' allows access to safe 'os' functions,
'import pipes' allows 'quote' and also that 'from os.path import *' works.
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os.path',
'from os.path import *',
'import pipes',
'assert(os.path.split("a/b/c") == ("a/b", "c"))',
'assert(split("a/b/c") == ("a/b", "c"))',
'assert os.environ["TEST1"] == "foo"',
'assert pipes.quote("foo; bar") == "\'foo; bar\'"'
))
self.write_files(build_file)
with with_envs({'TEST1': 'foo'}):
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
[])
def test_safe_modules_block_unsafe_functions(self):
"""
Test that after 'import os.path' unsafe functions raise errors
"""
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'import os.path',
'os.path.exists("a/b")',
))
self.write_files(build_file)
build_file_processor = self.create_build_file_processor()
# 'os.path.exists()' should raise AttributeError
self.assertRaises(
AttributeError,
build_file_processor.process,
build_file.root, build_file.prefix, build_file.path, [])
def test_wrap_access_prints_warnings(self):
path = os.path.normpath(os.path.join(self.project_root, 'foo.py'))
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=("open('{0}', 'r')".format(path.replace('\\', '\\\\')),))
py_file = ProjectFile(self.project_root, path='foo.py', contents=('foo',))
self.write_files(build_file, py_file)
build_file_processor = self.create_build_file_processor()
diagnostics = []
build_file_processor.process(build_file.root, build_file.prefix, build_file.path,
diagnostics)
expected_message = (
"Access to a non-tracked file detected! {0} is not a ".format(path) +
"known dependency and it should be added using 'add_build_file_dep' " +
"function before trying to access the file, e.g.\n" +
"'add_build_file_dep({0!r})'\n".format(py_file.name) +
"The 'add_build_file_dep' function is documented at " +
"https://buckbuild.com/function/add_build_file_dep.html\n"
)
self.assertEqual(
[Diagnostic(
message=expected_message,
level='warning',
source='sandboxing',
exception=None)],
diagnostics)
def test_can_resolve_cell_paths(self):
build_file_processor = self.create_build_file_processor(
cell_roots={
'foo': os.path.abspath(os.path.join(self.project_root, '../cell'))
})
self.assertEqual(
os.path.abspath(os.path.join(self.project_root, '../cell/bar/baz')),
build_file_processor._get_include_path('foo//bar/baz'))
self.assertEqual(
os.path.abspath(os.path.join(self.project_root, 'bar/baz')),
build_file_processor._get_include_path('//bar/baz'))
def test_bser_encoding_failure(self):
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
fake_stdout = StringIO.StringIO()
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=[object()],'
')'
))
self.write_file(build_file)
with build_file_processor.with_builtins(__builtin__.__dict__):
process_with_diagnostics(
{
'buildFile': self.build_file_name,
'watchRoot': '',
'projectPrefix': self.project_root,
},
build_file_processor,
fake_stdout)
result = fake_stdout.getvalue()
decoded_result = bser.loads(result)
self.assertEqual(
[],
decoded_result['values'])
self.assertEqual(
'fatal',
decoded_result['diagnostics'][0]['level'])
self.assertEqual(
'parse',
decoded_result['diagnostics'][0]['source'])
def test_values_from_namespaced_includes_accessible_only_via_namespace(self):
defs_file = ProjectFile(
root=self.project_root,
path='DEFS',
contents=(
'value = 2',
)
)
build_file = ProjectFile(
self.project_root,
path='BUCK',
contents=(
'include_defs("//DEFS", "defs")',
'foo_rule(name="foo" + str(defs.value), srcs=[])',
)
)
self.write_files(defs_file, build_file)
processor = self.create_build_file_processor(extra_funcs=[foo_rule])
with processor.with_builtins(__builtin__.__dict__):
result = processor.process(self.project_root, None, 'BUCK', [])
self.assertTrue(
[x for x in result if x.get('name', '') == 'foo2'],
"result should contain rule with name derived from a value in namespaced defs",
)
# should not be in global scope
self.write_file(ProjectFile(
self.project_root,
path='BUCK_fail',
contents=(
'include_defs("//DEFS", "defs")',
'foo_rule(name="foo" + str(value), srcs=[])',
)
))
with processor.with_builtins(__builtin__.__dict__):
self.assertRaises(
NameError,
lambda: processor.process(self.project_root, None, 'BUCK_fail', []))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
try:
import Crypto.Cipher.DES3
except ImportError:
# we'll have to skip these tests without PyCypto and pyasn1
Crypto = None
try:
import pyasn1
except ImportError:
pyasn1 = None
if Crypto and pyasn1:
from twisted.conch.ssh import keys, common, sexpy
import os, base64
from hashlib import sha1
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.trial import unittest
class HelpersTests(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self._secureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\x55' * x
def tearDown(self):
randbytes.secureRandom = self._secureRandom
self._secureRandom = None
def test_pkcs1(self):
"""
Test Public Key Cryptographic Standard #1 functions.
"""
data = 'ABC'
messageSize = 6
self.assertEqual(keys.pkcs1Pad(data, messageSize),
'\x01\xff\x00ABC')
hash = sha1().digest()
messageSize = 40
self.assertEqual(keys.pkcs1Digest('', messageSize),
'\x01\xff\xff\xff\x00' + keys.ID_SHA1 + hash)
def _signRSA(self, data):
key = keys.Key.fromString(keydata.privateRSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def _signDSA(self, data):
key = keys.Key.fromString(keydata.privateDSA_openssh)
sig = key.sign(data)
return key.keyObject, sig
def test_signRSA(self):
"""
Test that RSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signRSA(data)
sigData = keys.pkcs1Digest(data, keys.lenSig(key))
v = key.sign(sigData, '')[0]
self.assertEqual(sig, common.NS('ssh-rsa') + common.MP(v))
return key, sig
def test_signDSA(self):
"""
Test that DSA keys return appropriate signatures.
"""
data = 'data'
key, sig = self._signDSA(data)
sigData = sha1(data).digest()
v = key.sign(sigData, '\x55' * 19)
self.assertEqual(sig, common.NS('ssh-dss') + common.NS(
Crypto.Util.number.long_to_bytes(v[0], 20) +
Crypto.Util.number.long_to_bytes(v[1], 20)))
return key, sig
def test_objectType(self):
"""
Test that objectType, returns the correct type for objects.
"""
self.assertEqual(keys.objectType(keys.Key.fromString(
keydata.privateRSA_openssh).keyObject), 'ssh-rsa')
self.assertEqual(keys.objectType(keys.Key.fromString(
keydata.privateDSA_openssh).keyObject), 'ssh-dss')
self.assertRaises(keys.BadKeyError, keys.objectType, None)
class KeyTests(unittest.TestCase):
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self.rsaObj = Crypto.PublicKey.RSA.construct((1L, 2L, 3L, 4L, 5L))
self.dsaObj = Crypto.PublicKey.DSA.construct((1L, 2L, 3L, 4L, 5L))
self.rsaSignature = ('\x00\x00\x00\x07ssh-rsa\x00'
'\x00\x00`N\xac\xb4@qK\xa0(\xc3\xf2h \xd3\xdd\xee6Np\x9d_'
'\xb0>\xe3\x0c(L\x9d{\txUd|!\xf6m\x9c\xd3\x93\x842\x7fU'
'\x05\xf4\xf7\xfaD\xda\xce\x81\x8ea\x7f=Y\xed*\xb7\xba\x81'
'\xf2\xad\xda\xeb(\x97\x03S\x08\x81\xc7\xb1\xb7\xe6\xe3'
'\xcd*\xd4\xbd\xc0wt\xf7y\xcd\xf0\xb7\x7f\xfb\x1e>\xf9r'
'\x8c\xba')
self.dsaSignature = ('\x00\x00\x00\x07ssh-dss\x00\x00'
'\x00(\x18z)H\x8a\x1b\xc6\r\xbbq\xa2\xd7f\x7f$\xa7\xbf'
'\xe8\x87\x8c\x88\xef\xd9k\x1a\x98\xdd{=\xdec\x18\t\xe3'
'\x87\xa9\xc72h\x95')
self.oldSecureRandom = randbytes.secureRandom
randbytes.secureRandom = lambda x: '\xff' * x
self.keyFile = self.mktemp()
file(self.keyFile, 'wb').write(keydata.privateRSA_lsh)
def tearDown(self):
randbytes.secureRandom = self.oldSecureRandom
del self.oldSecureRandom
os.unlink(self.keyFile)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateRSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateDSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType(
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType('not a key'),
None)
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEqual(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEqual(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEqual(privateKey.type(), type)
for k, v in data.items():
self.assertEqual(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_openssh,
keydata.privateRSA_openssh, 'RSA', keydata.RSAData)
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase='encrypted'),
keys.Key.fromString(keydata.privateRSA_openssh))
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh))
self._testPublicPrivateFromString(keydata.publicDSA_openssh,
keydata.privateDSA_openssh, 'DSA', keydata.DSAData)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = """-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + '\n'))
def test_fromNewerOpenSSH(self):
"""
Newer versions of OpenSSH generate encrypted keys which have a longer
IV than the older versions. These newer keys are also loaded.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh_encrypted_aes,
passphrase='testxp')
self.assertEqual(key.type(), 'RSA')
key2 = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes + '\n',
passphrase='testxp')
self.assertEqual(key, key2)
def test_fromLSH(self):
"""
Test that keys are correctly generated from LSH strings.
"""
self._testPublicPrivateFromString(keydata.publicRSA_lsh,
keydata.privateRSA_lsh, 'RSA', keydata.RSAData)
self._testPublicPrivateFromString(keydata.publicDSA_lsh,
keydata.privateDSA_lsh, 'DSA', keydata.DSAData)
sexp = sexpy.pack([['public-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
data='{'+base64.encodestring(sexp)+'}')
sexp = sexpy.pack([['private-key', ['bad-key', ['p', '2']]]])
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
sexp)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, 'RSA',
keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, 'DSA',
keydata.DSAData)
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'\x00\x00\x00\x07ssh-foo'+'\x00\x00\x00\x01\x01'*5)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '')
# no key data with a bad key type
self.assertRaises(keys.BadKeyError, keys.Key.fromString, '',
'bad_type')
# trying to decrypt a key which doesn't support encryption
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
keydata.publicRSA_lsh, passphrase = 'unencrypted')
# trying to decrypt a key with the wrong passphrase
self.assertRaises(keys.EncryptedKeyError, keys.Key.fromString,
keys.Key(self.rsaObj).toString('openssh', 'encrypted'))
# key with no key data
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
'-----BEGIN RSA KEY-----\nwA==\n')
# key with invalid DEK Info
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: weird type
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with invalid encryption type
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FOO-123-BAR,01234567
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (AES)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (DES3)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEqual(keys.Key.fromFile(self.keyFile),
keys.Key.fromString(keydata.privateRSA_lsh))
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, 'bad_type')
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, passphrase='unencrypted')
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = Crypto.PublicKey.RSA.construct((1L, 2L))
key = keys.Key(obj)
self.assertEqual(key.keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(Crypto.PublicKey.RSA.construct((1L, 2L)))
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEqual(keys.Key(self.rsaObj).type(), 'RSA')
self.assertEqual(keys.Key(self.rsaObj).sshType(), 'ssh-rsa')
self.assertEqual(keys.Key(self.dsaObj).type(), 'DSA')
self.assertEqual(keys.Key(self.dsaObj).sshType(), 'ssh-dss')
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlob(self):
"""
Test that a public key is correctly generated from a public key blob.
"""
rsaBlob = common.NS('ssh-rsa') + common.MP(2) + common.MP(3)
rsaKey = keys.Key.fromString(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5))
dsaKey = keys.Key.fromString(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertTrue(rsaKey.isPublic())
self.assertEqual(rsaKey.data(), {'e':2L, 'n':3L})
self.assertTrue(dsaKey.isPublic())
self.assertEqual(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L})
self.assertRaises(keys.BadKeyError,
keys.Key.fromString, badBlob)
def test_fromPrivateBlob(self):
"""
Test that a private key is correctly generated from a private key blob.
"""
rsaBlob = (common.NS('ssh-rsa') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6) + common.MP(7))
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
dsaBlob = (common.NS('ssh-dss') + common.MP(2) + common.MP(3) +
common.MP(4) + common.MP(5) + common.MP(6))
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
badBlob = common.NS('ssh-bad')
self.assertFalse(rsaKey.isPublic())
self.assertEqual(
rsaKey.data(), {'n':2L, 'e':3L, 'd':4L, 'u':5L, 'p':6L, 'q':7L})
self.assertFalse(dsaKey.isPublic())
self.assertEqual(dsaKey.data(), {'p':2L, 'q':3L, 'g':4L, 'y':5L, 'x':6L})
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_blob(self):
"""
Test that the Key object generates blobs correctly.
"""
self.assertEqual(keys.Key(self.rsaObj).blob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
self.assertEqual(keys.Key(self.dsaObj).blob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02'
'\x00\x00\x00\x01\x01')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlob(self):
"""
L{Key.privateBlob} returns the SSH protocol-level format of the private
key and raises L{RuntimeError} if the underlying key object is invalid.
"""
self.assertEqual(keys.Key(self.rsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'
'\x00\x00\x00\x01\x02\x00\x00\x00\x01\x03\x00'
'\x00\x00\x01\x04\x00\x00\x00\x01\x04\x00\x00'
'\x00\x01\x05')
self.assertEqual(keys.Key(self.dsaObj).privateBlob(),
'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x03'
'\x00\x00\x00\x01\x04\x00\x00\x00\x01\x02\x00'
'\x00\x00\x01\x01\x00\x00\x00\x01\x05')
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSH(self):
"""
Test that the Key object generates OpenSSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateRSA_openssh)
self.assertEqual(key.toString('openssh', 'encrypted'),
keydata.privateRSA_openssh_encrypted)
self.assertEqual(key.public().toString('openssh'),
keydata.publicRSA_openssh[:-8]) # no comment
self.assertEqual(key.public().toString('openssh', 'comment'),
keydata.publicRSA_openssh)
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateDSA_openssh)
self.assertEqual(key.public().toString('openssh', 'comment'),
keydata.publicDSA_openssh)
self.assertEqual(key.public().toString('openssh'),
keydata.publicDSA_openssh[:-8]) # no comment
def test_toLSH(self):
"""
Test that the Key object generates LSH keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateRSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicRSA_lsh)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateDSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicDSA_lsh)
def test_toAgentv3(self):
"""
Test that the Key object generates Agent v3 keys correctly.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateRSA_agentv3)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateDSA_agentv3)
def test_toStringErrors(self):
"""
Test that toString raises errors appropriately.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString,
'bad_type')
def test_sign(self):
"""
Test that the Key object generates correct signatures.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.sign(''), self.rsaSignature)
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.sign(''), self.dsaSignature)
def test_verify(self):
"""
Test that the Key object correctly verifies signatures.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, ''))
self.assertFalse(key.verify(self.rsaSignature, 'a'))
self.assertFalse(key.verify(self.dsaSignature, ''))
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, ''))
self.assertFalse(key.verify(self.dsaSignature, 'a'))
self.assertFalse(key.verify(self.rsaSignature, ''))
def test_verifyDSANoPrefix(self):
"""
Some commercial SSH servers send DSA keys as 2 20-byte numbers;
they are still verified as valid keys.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature[-40:], ''))
def test_repr(self):
"""
Test the pretty representation of Key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (0 bits)
attr d:
\t03
attr e:
\t02
attr n:
\t01
attr p:
\t04
attr q:
\t05
attr u:
\t04>""")
|
|
# darcs.py - darcs support for the convert extension
#
# Copyright 2007-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, checktool, commandline, commit, converter_source
from mercurial.i18n import _
from mercurial import util
import os, shutil, tempfile, re
# The naming drift of ElementTree is fun!
try:
from xml.etree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from xml.etree.ElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.ElementTree import ElementTree, XMLParser
except ImportError:
pass
class darcs_source(converter_source, commandline):
def __init__(self, ui, path, rev=None):
converter_source.__init__(self, ui, path, rev=rev)
commandline.__init__(self, ui, 'darcs')
# check for _darcs, ElementTree so that we can easily skip
# test-convert-darcs if ElementTree is not around
if not os.path.exists(os.path.join(path, '_darcs')):
raise NoRepo(_("%s does not look like a darcs repository") % path)
checktool('darcs')
version = self.run0('--version').splitlines()[0].strip()
if version < '2.1':
raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
version)
if "ElementTree" not in globals():
raise util.Abort(_("Python ElementTree module is not available"))
self.path = os.path.realpath(path)
self.lastrev = None
self.changes = {}
self.parents = {}
self.tags = {}
# Check darcs repository format
format = self.format()
if format:
if format in ('darcs-1.0', 'hashed'):
raise NoRepo(_("%s repository format is unsupported, "
"please upgrade") % format)
else:
self.ui.warn(_('failed to detect repository format!'))
def before(self):
self.tmppath = tempfile.mkdtemp(
prefix='convert-' + os.path.basename(self.path) + '-')
output, status = self.run('init', repodir=self.tmppath)
self.checkexit(status)
tree = self.xml('changes', xml_output=True, summary=True,
repodir=self.path)
tagname = None
child = None
for elt in tree.findall('patch'):
node = elt.get('hash')
name = elt.findtext('name', '')
if name.startswith('TAG '):
tagname = name[4:].strip()
elif tagname is not None:
self.tags[tagname] = node
tagname = None
self.changes[node] = elt
self.parents[child] = [node]
child = node
self.parents[child] = []
def after(self):
self.ui.debug('cleaning up %s\n' % self.tmppath)
shutil.rmtree(self.tmppath, ignore_errors=True)
def recode(self, s, encoding=None):
if isinstance(s, unicode):
# XMLParser returns unicode objects for anything it can't
# encode into ASCII. We convert them back to str to get
# recode's normal conversion behavior.
s = s.encode('latin-1')
return super(darcs_source, self).recode(s, encoding)
def xml(self, cmd, **kwargs):
# NOTE: darcs is currently encoding agnostic and will print
# patch metadata byte-for-byte, even in the XML changelog.
etree = ElementTree()
# While we are decoding the XML as latin-1 to be as liberal as
# possible, etree will still raise an exception if any
# non-printable characters are in the XML changelog.
parser = XMLParser(encoding='latin-1')
p = self._run(cmd, **kwargs)
etree.parse(p.stdout, parser=parser)
p.wait()
self.checkexit(p.returncode)
return etree.getroot()
def format(self):
output, status = self.run('show', 'repo', no_files=True,
repodir=self.path)
self.checkexit(status)
m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
if not m:
return None
return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
def manifest(self):
man = []
output, status = self.run('show', 'files', no_directories=True,
repodir=self.tmppath)
self.checkexit(status)
for line in output.split('\n'):
path = line[2:]
if path:
man.append(path)
return man
def getheads(self):
return self.parents[None]
def getcommit(self, rev):
elt = self.changes[rev]
date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
# etree can return unicode objects for name, comment, and author,
# so recode() is used to ensure str objects are emitted.
return commit(author=self.recode(elt.get('author')),
date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
desc=self.recode(desc).strip(),
parents=self.parents[rev])
def pull(self, rev):
output, status = self.run('pull', self.path, all=True,
match='hash %s' % rev,
no_test=True, no_posthook=True,
external_merge='/bin/false',
repodir=self.tmppath)
if status:
if output.find('We have conflicts in') == -1:
self.checkexit(status, output)
output, status = self.run('revert', all=True, repodir=self.tmppath)
self.checkexit(status, output)
def getchanges(self, rev):
copies = {}
changes = []
man = None
for elt in self.changes[rev].find('summary').getchildren():
if elt.tag in ('add_directory', 'remove_directory'):
continue
if elt.tag == 'move':
if man is None:
man = self.manifest()
source, dest = elt.get('from'), elt.get('to')
if source in man:
# File move
changes.append((source, rev))
changes.append((dest, rev))
copies[dest] = source
else:
# Directory move, deduce file moves from manifest
source = source + '/'
for f in man:
if not f.startswith(source):
continue
fdest = dest + '/' + f[len(source):]
changes.append((f, rev))
changes.append((fdest, rev))
copies[fdest] = f
else:
changes.append((elt.text.strip(), rev))
self.pull(rev)
self.lastrev = rev
return sorted(changes), copies
def getfile(self, name, rev):
if rev != self.lastrev:
raise util.Abort(_('internal calling inconsistency'))
path = os.path.join(self.tmppath, name)
data = util.readfile(path)
mode = os.lstat(path).st_mode
mode = (mode & 0111) and 'x' or ''
return data, mode
def gettags(self):
return self.tags
|
|
#!/usr/bin/python
''' verify the cicd operation command coming in from Jenkins via SSH '''
# pylint: disable=invalid-name
# pylint: disable=bare-except
# pylint: disable=too-many-instance-attributes
#Jenkins:
#ssh use-tower1.ops.rhcloud.com -c clustername -o <operation> -e [approved_arg1] -e [approved_arg2] ...
# Current approved arguments:
# "docker-version=<rpmname-version-release>"
# This argument specifies a docker RPM NVR that should be used for openshift-ansible upgrade operations.
# An example "NVR: docker-1.12.6-30.git97ba2c0.el7"
#
# Pull requests which add new arguments must be approved by the security team.
#
#Associate the cluster to operate on with an ssh key in .ssh/authorized_keys
# command=verify-cicd-operation.py -e <environment> <really long key right here>
import argparse
import logging
import logging.handlers
import os
import re
import sys
import yaml
PROGRAM_TO_RUN = "/home/opsmedic/aos-cd/git/aos-cd-jobs/tower-scripts/bin/cicd-control.sh"
VALID_OPERATIONS = ['build-ci-msg',
'commit-config-loop',
'delete',
'disable-config-loop',
'disable-statuspage',
'disable-zabbix-maint',
'enable-config-loop',
'enable-statuspage',
'enable-zabbix-maint',
'generate-byo-inventory',
'install',
'legacy-upgrade',
'online-deployer',
'perf1',
'perf2',
'perf3',
'pre-check',
'run-config-loop',
'schedule-all-nodes',
'smoketest',
'status',
'unschedule-extra-nodes',
'update-inventory',
'update-jenkins-imagestream',
'update-yum-extra-repos',
'upgrade',
'upgrade-control-plane',
'upgrade-logging',
'upgrade-metrics',
'upgrade-nodes',
]
# this is a list of extra arguments that are valid and their corresponding regular expression.
VALID_EXTRA_ARGUMENTS = {'cicd_docker_version' : '^$|^[a-zA-Z0-9._-]+$',
'cicd_openshift_ansible_build' : '^$|^[a-zA-Z0-9./-]+$',
'cicd_openshift_version' : '^$|^[a-zA-Z0-9./-]+$',
}
class VerifyCICDOperation(object):
""" Verify CICD SSH Command """
def __init__(self):
""" This is the init function """
self.clustername = None
self.operation = None
self.environment = None
self.deployment_type = None
self.ssh_original_args = None
self.extra_arguments = []
self.cicd_control_args = None
# set up the logger
self.logger = logging.getLogger('verify_cicid_command_logger')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.handlers.SysLogHandler('/dev/log'))
def run(self):
""" Main function to run the script """
self.logger.info("{}: Args: {}, SSH_ORIGINAL_COMMAND: '{}'".format(os.path.basename(__file__), sys.argv,
os.environ.get("SSH_ORIGINAL_COMMAND", "")))
self.cli_parse_args()
self.ssh_original_parse_args()
self.verify_cluster()
self.verify_operation()
if self.ssh_original_args.extra_args:
self.verify_extra_arguments()
self.build_arg_list()
VerifyCICDOperation.runner(*self.cicd_control_args)
def cli_parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Verify CICD Arg Parsing')
parser.add_argument('-e', '--environment', help='Environment', default=None, required=True)
cli_args = parser.parse_args()
self.environment = cli_args.environment
def ssh_original_parse_args(self):
""" parse the args from the SSH_ORIGINAL_COMMAND env var """
parser = argparse.ArgumentParser(description='Verify CICD SSH_ORIGINAL_COMMAND Arg Parsing',
usage="ENV Var: 'SSH_ORIGINAL_COMMAND' needs to be set correctly")
parser.add_argument('-c', '--cluster', help='Ops Cluster name', default=None, required=True)
parser.add_argument('-o', '--operation', help='Operation to perform', choices=VALID_OPERATIONS,
default=None, required=True)
parser.add_argument('-d', '--deployment', help='Deployment Type', choices=['dedicated', 'online', 'pro'],
default='online', required=False)
parser.add_argument('-e', '--extra-args', help='Extra argmuments to pass on', action='append',
default=None, required=False)
# We want to parse the SSH_ORIGINAL_COMMAND ENV var which comes through SSH
ssh_cmd = os.environ.get("SSH_ORIGINAL_COMMAND", "")
ssh_cmd_args = ssh_cmd.split()
if not ssh_cmd_args:
self.exit_with_msg("Environment variable 'SSH_ORIGINAL_COMMAND' is empty. Exiting...")
self.ssh_original_args = parser.parse_args(ssh_cmd_args)
self.clustername = self.ssh_original_args.cluster
self.operation = self.ssh_original_args.operation
self.deployment_type = self.ssh_original_args.deployment
def exit_with_msg(self, message, exit_code=13):
''' Let's do all of our exiting here. With logging '''
self.logger.info("{}: Exiting on Error: {}".format(os.path.basename(__file__), message))
print message
sys.exit(exit_code)
@staticmethod
def get_clusters():
''' get the clusters from the inventory file '''
with open('/etc/ansible/multi_inventory.yaml') as f:
inventory_data = yaml.safe_load(f)
clusters = {}
for account, account_vars in inventory_data['accounts'].iteritems():
if 'cluster_vars' not in account_vars:
continue
for cluster, cluster_vars in account_vars["cluster_vars"]["clusters"].iteritems():
clusters[cluster] = {'environment': cluster_vars["oo_environment"],
'account': account
}
# Hard coding the "test-key cluster, which is int env"
clusters['test-key'] = {'environment': 'int',
'account': 'test'
}
return clusters
def verify_cluster(self):
''' verify the cluster is valid '''
# Sanity check the cluster_id
if not re.match("(^[a-zA-Z0-9][a-zA-Z0-9._-]+$)", self.clustername):
print "Clustername did not match the approved Regular Expression."
sys.exit(13)
clusters = VerifyCICDOperation.get_clusters()
if self.clustername not in clusters:
self.exit_with_msg("Clustername was not found in the list of known clusters. Exiting...")
if self.environment != clusters[self.clustername]['environment']:
self.exit_with_msg("The environment passed does NOT match the cluster's env. Exiting...")
def verify_operation(self):
''' verify the operation is valid '''
# Sanity check the operation
if not re.match("(^[a-zA-Z0-9][a-zA-Z0-9._-]+$)", self.operation):
self.exit_with_msg("operation did not match the approved Regular Expression.")
def verify_extra_arguments(self):
''' verify the extra arguments are valid '''
for arg in self.ssh_original_args.extra_args:
split_arg = arg.split("=")
if len(split_arg) != 2:
self.exit_with_msg("Extra argmument: '{}' did not match the the approved var structure".format(arg))
if split_arg[0] not in VALID_EXTRA_ARGUMENTS.keys():
self.exit_with_msg("Extra argmument: '{}' is not an approved extra argument".format(arg))
if not re.match(VALID_EXTRA_ARGUMENTS[split_arg[0]], split_arg[1]):
self.exit_with_msg("Extra argmument: '{}' does not match approved regular expression: "
"'{}'".format(arg, VALID_EXTRA_ARGUMENTS[split_arg[0]]))
self.extra_arguments.append(arg)
def build_arg_list(self):
''' build a list of args '''
self.cicd_control_args = ['-c', self.clustername, '-o', self.operation,
'-d', self.deployment_type]
for arg in self.extra_arguments:
self.cicd_control_args += ['-e', arg]
@staticmethod
def runner(*args):
''' run the script that is intended '''
try:
os.execlp(PROGRAM_TO_RUN, PROGRAM_TO_RUN, *args)
except:
pass
sys.exit(11)
# runner (exec) never returns
if __name__ == "__main__":
VCO = VerifyCICDOperation()
VCO.run()
|
|
"""
Unit tests for ./yaml_parse.py
"""
import os
import numpy as np
import cPickle
import tempfile
from numpy.testing import assert_
from pylearn2.config.yaml_parse import load, load_path, initialize
from os import environ
from decimal import Decimal
import yaml
from pylearn2.models.mlp import MLP, Sigmoid
from pylearn2.models.rbm import GaussianBinaryRBM
from pylearn2.space import Conv2DSpace
from pylearn2.linear.conv2d import make_random_conv2D
from pylearn2.energy_functions.rbm_energy import grbm_type_1
def test_load_path():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
f.write("a: 23")
loaded = load_path(fname)
assert_(loaded['a'] == 23)
os.remove(fname)
def test_obj():
loaded = load("a: !obj:decimal.Decimal { value : '1.23' }")
assert_(isinstance(loaded['a'], Decimal))
def test_floats():
loaded = load("a: { a: -1.23, b: 1.23e-1 }")
assert_(isinstance(loaded['a']['a'], float))
assert_(isinstance(loaded['a']['b'], float))
assert_((loaded['a']['a'] + 1.23) < 1e-3)
assert_((loaded['a']['b'] - 1.23e-1) < 1e-3)
def test_import():
loaded = load("a: !import 'decimal.Decimal'")
assert_(loaded['a'] == Decimal)
def test_import_string():
loaded = load("a: !import decimal.Decimal")
assert_(loaded['a'] == Decimal)
def test_import_colon():
loaded = load("a: !import:decimal.Decimal")
assert_(loaded['a'] == Decimal)
def test_preproc_rhs():
environ['TEST_VAR'] = '10'
loaded = load('a: "${TEST_VAR}"')
print "loaded['a'] is %s" % loaded['a']
assert_(loaded['a'] == "10")
del environ['TEST_VAR']
def test_preproc_pkl():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = ('a', 1)
cPickle.dump(d, f)
environ['TEST_VAR'] = fname
loaded = load('a: !pkl: "${TEST_VAR}"')
assert_(loaded['a'] == d)
del environ['TEST_VAR']
def test_late_preproc_pkl():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
array = np.arange(10)
np.save(f, array)
environ['TEST_VAR'] = fname
loaded = load('a: !obj:pylearn2.datasets.npy_npz.NpyDataset '
'{ file: "${TEST_VAR}"}\n')
# Assert the unsubstituted TEST_VAR is in yaml_src
assert_(loaded['a'].yaml_src.find("${TEST_VAR}") != -1)
del environ['TEST_VAR']
def test_unpickle():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = {'a': 1, 'b': 2}
cPickle.dump(d, f)
loaded = load("{'a': !pkl: '%s'}" % fname)
assert_(loaded['a'] == d)
os.remove(fname)
def test_unpickle_key():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
d = ('a', 1)
cPickle.dump(d, f)
loaded = load("{!pkl: '%s': 50}" % fname)
assert_(loaded.keys()[0] == d)
assert_(loaded.values()[0] == 50)
os.remove(fname)
def test_multi_constructor_obj():
"""
Tests whether multi_constructor_obj throws an exception when
the keys in mapping are None.
"""
try:
loaded = load("a: !obj:decimal.Decimal { 1 }")
except TypeError as e:
assert str(e) == "Received non string object (1) as key in mapping."
pass
except Exception, e:
error_msg = "Got the unexpected error: %s" % (e)
raise ValueError(error_msg)
def test_duplicate_keywords():
"""
Tests whether there are doublicate keywords in the yaml
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.mlp.MLP {
"layers": [
!obj:pylearn2.models.mlp.Sigmoid {
"layer_name": 'h0',
"dim": 20,
"sparse_init": 15,
}],
"nvis": 784,
"nvis": 384,
}
}"""
try:
loaded = load(yamlfile)
except yaml.constructor.ConstructorError, e:
message = str(e)
assert message.endswith("found duplicate key (nvis)")
pass
except Exception, e:
error_msg = "Got the unexpected error: %s" % (e)
raise TypeError(error_msg)
def test_duplicate_keywords_2():
"""
Tests whether duplicate keywords as independent parameters works fine.
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.rbm.GaussianBinaryRBM {
"vis_space" : &vis_space !obj:pylearn2.space.Conv2DSpace {
"shape" : [32,32],
"num_channels" : 3
},
"hid_space" : &hid_space !obj:pylearn2.space.Conv2DSpace {
"shape" : [27,27],
"num_channels" : 10
},
"transformer" :
!obj:pylearn2.linear.conv2d.make_random_conv2D {
"irange" : .05,
"input_space" : *vis_space,
"output_space" : *hid_space,
"kernel_shape" : [6,6],
"batch_size" : &batch_size 5
},
"energy_function_class" :
!obj:pylearn2.energy_functions.rbm_energy.grbm_type_1 {},
"learn_sigma" : True,
"init_sigma" : .3333,
"init_bias_hid" : -2.,
"mean_vis" : False,
"sigma_lr_scale" : 1e-3
}
}"""
loaded = load(yamlfile)
def test_parse_null_as_none():
"""
Tests whether None may be passed via yaml kwarg null.
"""
initialize()
yamlfile = """{
"model": !obj:pylearn2.models.autoencoder.Autoencoder {
"nvis" : 1024,
"nhid" : 64,
"act_enc" : Null,
"act_dec" : null
}
}"""
loaded = load(yamlfile)
if __name__ == "__main__":
test_multi_constructor_obj()
test_duplicate_keywords()
test_duplicate_keywords_2()
test_unpickle_key()
|
|
#!/usr/bin/env python
import os, sys, time, getopt, math, sniper_lib
def max_diff(l_notsorted):
l = sorted(filter(lambda x:x != None, l_notsorted))
try:
l = map(float,l)
except ValueError, e:
l = map(str,l)
if len(set(l)) > 1:
return (1,100,True)
else:
return (0.0,0.0,False)
except TypeError, e:
return (0.0,0.0,False)
islendiff = len(l_notsorted) != len(l)
if l[0] == 0.0:
return (l[-1] - l[0], 0.0, islendiff)
else:
return (l[-1] - l[0], 100*(l[-1] / float(l[0]) - 1.0), islendiff)
def get_diffs(l):
try:
l = map(float,l)
except (TypeError, ValueError), e:
return [ _ == l[0] for _ in l[1:] ]
if l[0] == 0:
return [ None for _ in l[1:] ]
else:
return [ 100 * (_ / l[0] - 1) for _ in l[1:] ]
def group(number):
s = '%d' % number
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
def format_value(d):
if (type(d) is long) or (type(d) is int):
if len(group(d)) < 12:
return '%12s' % group(d)
else:
d = float(d)
if type(d) is float:
if abs(d) > 1:
e = 3 * math.floor(math.log10(abs(d) or 1.) / 3)
return '%12s' % ('%.3f' % (d / 10**e) + '%+03d' % e)
elif abs(d) > .01:
return '%12.6f' % d
else:
return '%12.3e' % d
d = str(d)
if len(d) > 12:
return '%12s' % (d[-11:]+'>')
else:
return '%12s' % d
def format_percent(d):
if d > 500:
return '%11.2fx' % (d / 100.)
else:
return '%+11.1f%%' % d
def format_diff(d):
if d is None:
return ' ----'
elif d is True:
return ' =='
elif d is False:
return ' !='
else:
return format_percent(d)
def print_diff(parmsort = None, restype = 'results', resultdirs = [], partial = None, print_alldiffs = True, print_average = False):
jobs = []
stats = {}
maxkeylen = -1
resultstoprint = []
max_cores = 0
keys = []
for resultdir in resultdirs:
res = sniper_lib.get_results(resultsdir = resultdir, partial = partial)
stats[resultdir] = res[restype]
jobs.append(resultdir)
# Find all key names and maximum lenghts
def key_map((k, v)):
return (k, len(v) if type(v) is list else 0)
allkeys = sum([ map(key_map, s.items()) for s in stats.values() ], [])
keyinfo = {}
for key, length in allkeys:
keyinfo[key] = max(keyinfo.get(key, 0), length)
def get_element(statkey, key, core):
data = stats[statkey].get(key)
if data and type(data) is list and len(data) > core:
return data[core]
else:
return None
def get_average(statkey, key):
data = stats[statkey].get(key)
if data and type(data) is list and len(data) > 0:
return long(sum(data) / float(len(data)))
else:
return None
for key, length in sorted(keyinfo.items(), key = lambda (k, v): k.lower()):
if length > 0:
for core in range(1 if print_average else length):
if print_average:
values = [ get_average(statkey, key) for statkey in jobs ]
else:
values = [ get_element(statkey, key, core) for statkey in jobs ]
if any(values):
diff, max_percent_diff, forceprint = max_diff(values)
diffs = get_diffs(values)
if forceprint or diff != 0:
maxkeylen = max(len(key), maxkeylen) # Consider this key for the maximum key character length
resultstoprint.append((key, core, values, diff, max_percent_diff, diffs))
max_cores = max(max_cores, core)
else:
diff, max_percent_diff, forceprint = max_diff(map(lambda x: x.get(key, None), stats.itervalues()))
diffs = get_diffs([ stats[statkey].get(key, None) for statkey in jobs ])
if forceprint or diff != 0:
maxkeylen = max(len(key), maxkeylen) # Consider this key for the maximum key character length
data = []
for statkey in jobs:
try:
data.append(stats[statkey][key])
except KeyError:
data.append(None)
resultstoprint.append((key, None, data, diff, max_percent_diff, diffs))
# Iterate through the collected data items and print them out
print '%*s ' % (maxkeylen+5, ''),
for statkey in jobs:
print '%12s' % (('%s'%statkey)[-12:]),
if print_alldiffs:
for statkey in jobs[1:]:
print ' '*max(0, 11 - len(str(statkey))) + u'\u0394'.encode('utf8') + str(statkey)[-11:],
else:
print '%12s' % 'max-%-err',
print '%12s' % 'max-abs-err',
print
if parmsort == 'abs':
resultstoprint = sorted(resultstoprint, key = lambda x: abs(x[3]), reverse = True)
elif parmsort == 'percent':
resultstoprint = sorted(resultstoprint, key = lambda x: abs(x[4]), reverse = True)
for (key, core, datalist, abs_diff, percent_diff, diffs) in resultstoprint:
if core != None:
if print_average:
print '%-*s[*] =' % (maxkeylen, key),
else:
print '%-*s[%*u] =' % (maxkeylen, key, len(str(max_cores)), core),
else:
print '%-*s %s =' % (maxkeylen, key, ' '*len(str(max_cores))),
for d in datalist:
if d == None:
print ' ----',
else:
print format_value(d),
if print_alldiffs:
for d in diffs:
print format_diff(d),
else:
print format_percent(percent_diff),
print '%12.3g' % abs_diff,
print
if __name__ == "__main__":
parmsort = None
restype = 'results'
resultdirs = []
partial = None
print_alldiffs = True
print_average = False
def usage():
print 'Usage:', sys.argv[0], '[-h|--help (help)] [--sort-abs] [--sort-percent] [--max-diff] [--average] [--config] [--partial=roi-begin:roi-end] [--] [<dir> [<dirN>]]'
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', [ 'help', 'sort-abs', 'sort-percent', 'max-diff', 'average', 'config', 'partial=' ])
except getopt.GetoptError, e:
print e
usage()
sys.exit(1)
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit(1)
if o == '--sort-abs':
parmsort = 'abs'
if o == '--sort-percent':
parmsort = 'percent'
if o == '--max-diff':
print_alldiffs = False
if o == '--average':
print_average = True
if o == '--config':
restype = 'config'
if o == '--partial':
partial = tuple(a.split(':'))[0:2]
if args:
for arg in args:
if os.path.isdir(arg):
resultdirs.append(arg)
else:
print 'Warning: Argument [%s] is not a results directory' % arg
pass
else:
print 'At least one directory is required'
sys.exit(1)
with sniper_lib.OutputToLess():
print_diff(parmsort = parmsort, restype = restype, resultdirs = resultdirs, partial = partial, print_alldiffs = print_alldiffs, print_average = print_average)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from dataclasses import asdict
from logging import getLogger
from third_party.utils import (
assert_all_frozen,
freeze_embeds,
freeze_params,
save_json)
from transformers import TrainerCallback
from transformers.modeling_t5 import T5LayerNorm
from seq2seq.adapters import (AdapterController, MetaAdapterController,
LayerNormHyperNet, AdapterLayersHyperNetController)
from seq2seq.data import TASK_MAPPING
logger = getLogger(__name__)
def create_dir(output_dir):
"""
Checks whether to the output_dir already exists and creates it if not.
Args:
output_dir: path to the output_dir
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def handle_metrics(split, metrics, output_dir, gcs_bucket=None):
"""
Prints and saves metrics or a general dictionary of results.
Args:
split: one of train, val, test, or training arguments.
metrics: metrics dict
output_dir: where to save the metrics, if gcs_bucket is given
we save the results also on the given bucket.
"""
logger.info(f"***** {split} metrics *****")
for key in sorted(metrics.keys()):
logger.info(f" {key} = {metrics[key]}")
save_json_file(metrics, f"{split}_results.json", output_dir, gcs_bucket)
def save_json_file(json_dict, outfile_name, output_dir, gcs_bucket=None):
"""
Saves the given dictionary as a json file to output_dir and also
the given bucket if given.
"""
save_json(json_dict, os.path.join(output_dir, outfile_name))
if gcs_bucket is not None:
logger.info("***** Uploading results into gs-bucket *****")
upload(output_dir, gcs_bucket)
def get_training_args(arguments_list):
"""
Concatenate all training arguments except evaluation strategy which
is not Json serializable.
Args:
arguments_list: list of dataclasses.
Return:
arguments: concatenated arguments.
"""
all_arguments = {}
for arguments in arguments_list:
all_arguments.update(asdict(arguments))
all_arguments.pop("evaluation_strategy")
return all_arguments
def get_last_checkpoint_path(output_dir):
"""
Finds the path for the last checkpoint saved in the output_dir
Args:
output_dir: output_dir
Returns:
path to the last checkpoint saved in the output dir.
"""
paths = glob.glob(os.path.join(output_dir, "checkpoint-*"))
if len(paths) == 0:
return output_dir
else:
checkpoints = [int(checkpoint.split('-')[-1]) for checkpoint in paths]
max_checkpoint = max(checkpoints)
return os.path.join(output_dir, "checkpoint-" + str(max_checkpoint))
def upload(upload_dir: str, gcs_bucket: str) -> None:
"""Uploads the local upload_dir to the gs bucket."""
try:
os.system("/root/google-cloud-sdk/bin/gsutil -m rm -r {}".format(
os.path.join("gs://" + gcs_bucket, upload_dir)))
if os.system("/root/google-cloud-sdk/bin/gsutil -m cp -r {} {}".format(
upload_dir,
os.path.join("gs://" + gcs_bucket, upload_dir))) != 0:
raise Exception('gsutil path not found')
except:
os.system("gsutil -m rm -r {}".format(
os.path.join("gs://" + gcs_bucket, upload_dir)))
os.system("gsutil -m cp -r {} {}".format(
upload_dir,
os.path.join("gs://" + gcs_bucket, upload_dir)))
def use_task_specific_params(model, task):
"""Update config with task specific params during evaluation."""
task_dataset = TASK_MAPPING[task]
task_specific_config = task_dataset.task_specific_config
if task_specific_config is not None:
logger.info(f"using task specific params for {task}: {task_specific_config}")
model.config.update(task_specific_config)
def reset_config(model, config):
"""Resets the config file to the one provided."""
model.config = config
logger.info(f"config is reset to the initial values.")
def freezing_params(model, training_args, model_args, adapter_args):
"""
Freezes the model parameters based on the given setting in the arguments.
Args:
model: the given model.
training_args: defines the training arguments.
model_args: defines the model arguments.
adapter_args: defines the adapters arguments.
"""
# If we are training adapters, we freeze all parameters except the
# parameters of computing task embeddings and adapter controllers.
if training_args.train_adapters:
freeze_params(model)
for name, sub_module in model.named_modules():
if isinstance(sub_module, (MetaAdapterController, AdapterController)):
for param_name, param in sub_module.named_parameters():
param.requires_grad = True
if adapter_args.adapter_config_name == "meta-adapter":
for param in model.task_embedding_controller.parameters():
param.requires_grad = True
if adapter_args.unique_hyper_net:
for name, sub_module in model.named_modules():
if isinstance(sub_module, (AdapterLayersHyperNetController, AdapterController)):
for param_name, param in sub_module.named_parameters():
param.requires_grad = True
if model_args.freeze_model:
freeze_params(model)
# Freezes all models parameters except last linear layer of decoder.
if model_args.freeze_model_but_lm_head:
freeze_params(model)
for param in model.lm_head.parameters():
param.requires_grad = True
if model_args.freeze_embeds:
freeze_embeds(model)
if model_args.freeze_encoder:
freeze_params(model.get_encoder())
assert_all_frozen(model.get_encoder())
# In case of meta-adapters and if task-embeddings are paramteric,
# freezes all parameters except task-embedding parameters.
if model_args.freeze_model_but_task_embeddings:
freeze_params(model)
if adapter_args.adapter_config_name == "meta-adapter" and \
not isinstance(model.task_embedding_controller.task_to_embeddings, dict):
for param in model.task_embedding_controller.task_to_embeddings.parameters():
param.requires_grad = True
# Unfreezes last linear layer of decoder.
if model_args.unfreeze_lm_head:
for param in model.lm_head.parameters():
param.requires_grad = True
# Unfreezes layer norms.
if model_args.unfreeze_layer_norms:
for name, sub_module in model.named_modules():
if isinstance(sub_module, T5LayerNorm):
for param_name, param in sub_module.named_parameters():
param.requires_grad = True
if adapter_args.conditional_layer_norm_for_T5:
for name, sub_module in model.named_modules():
if isinstance(sub_module, LayerNormHyperNet):
for param_name, param in sub_module.named_parameters():
param.requires_grad = True
class T5CheckpointCallback(TrainerCallback):
"""Uploads the output_dir to the gs_bucket."""
def on_save(self, args, state, control, **kwargs):
"""Event called after a checkpoint save."""
if state.is_world_process_zero and args.gcs_bucket is not None:
upload(args.output_dir, args.gcs_bucket)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import Timeout
import swift.common.utils
class MessageTimeout(Timeout):
def __init__(self, seconds=None, msg=None):
Timeout.__init__(self, seconds=seconds)
self.msg = msg
def __str__(self):
return '%s: %s' % (Timeout.__str__(self), self.msg)
class SwiftException(Exception):
pass
class PutterConnectError(Exception):
def __init__(self, status=None):
self.status = status
class InvalidTimestamp(SwiftException):
pass
class InsufficientStorage(SwiftException):
pass
class FooterNotSupported(SwiftException):
pass
class MultiphasePUTNotSupported(SwiftException):
pass
class SuffixSyncError(SwiftException):
pass
class RangeAlreadyComplete(SwiftException):
pass
class DiskFileError(SwiftException):
pass
class DiskFileNotOpen(DiskFileError):
pass
class DiskFileQuarantined(DiskFileError):
pass
class DiskFileCollision(DiskFileError):
pass
class DiskFileNotExist(DiskFileError):
pass
class DiskFileDeleted(DiskFileNotExist):
def __init__(self, metadata=None):
self.metadata = metadata or {}
self.timestamp = swift.common.utils.Timestamp(
self.metadata.get('X-Timestamp', 0))
class DiskFileExpired(DiskFileDeleted):
pass
class DiskFileNoSpace(DiskFileError):
pass
class DiskFileDeviceUnavailable(DiskFileError):
pass
class DiskFileXattrNotSupported(DiskFileError):
pass
class DiskFileBadMetadataChecksum(DiskFileError):
pass
class DeviceUnavailable(SwiftException):
pass
class InvalidAccountInfo(SwiftException):
pass
class PathNotDir(OSError):
pass
class ChunkReadError(SwiftException):
pass
class ChunkReadTimeout(Timeout):
pass
class ChunkWriteTimeout(Timeout):
pass
class ConnectionTimeout(Timeout):
pass
class ResponseTimeout(Timeout):
pass
class DriveNotMounted(SwiftException):
pass
class LockTimeout(MessageTimeout):
pass
class RingLoadError(SwiftException):
pass
class RingBuilderError(SwiftException):
pass
class RingValidationError(RingBuilderError):
pass
class EmptyRingError(RingBuilderError):
pass
class DuplicateDeviceError(RingBuilderError):
pass
class UnPicklingError(SwiftException):
pass
class FileNotFoundError(SwiftException):
pass
class PermissionError(SwiftException):
pass
class ListingIterError(SwiftException):
pass
class ListingIterNotFound(ListingIterError):
pass
class ListingIterNotAuthorized(ListingIterError):
def __init__(self, aresp):
self.aresp = aresp
class SegmentError(SwiftException):
pass
class LinkIterError(SwiftException):
pass
class ReplicationException(Exception):
pass
class ReplicationLockTimeout(LockTimeout):
pass
class MimeInvalid(SwiftException):
pass
class APIVersionError(SwiftException):
pass
class EncryptionException(SwiftException):
pass
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=None, http_reason='',
http_device='', http_response_content='', http_headers=None):
super(ClientException, self).__init__(msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
self.http_response_content = http_response_content
self.http_headers = http_headers or {}
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
if self.http_response_content:
if len(self.http_response_content) <= 60:
b += ' %s' % self.http_response_content
else:
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
class InvalidPidFileException(Exception):
pass
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import copy
import numbers
import numpy as np
import wiki_data
def return_index(a):
for i in range(len(a)):
if (a[i] == 1.0):
return i
def construct_vocab(data, utility, add_word=False):
ans = []
for example in data:
sent = ""
for word in example.question:
if (not (isinstance(word, numbers.Number))):
sent += word + " "
example.original_nc = copy.deepcopy(example.number_columns)
example.original_wc = copy.deepcopy(example.word_columns)
example.original_nc_names = copy.deepcopy(example.number_column_names)
example.original_wc_names = copy.deepcopy(example.word_column_names)
if (add_word):
continue
number_found = 0
if (not (example.is_bad_example)):
for word in example.question:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.word_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
for col_name in example.number_column_names:
for word in col_name:
if (isinstance(word, numbers.Number)):
number_found += 1
else:
if (not (utility.word_ids.has_key(word))):
utility.words.append(word)
utility.word_count[word] = 1
utility.word_ids[word] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[word]] = word
else:
utility.word_count[word] += 1
def word_lookup(word, utility):
if (utility.word_ids.has_key(word)):
return word
else:
return utility.unk_token
def convert_to_int_2d_and_pad(a, utility):
ans = []
#print a
for b in a:
temp = []
if (len(b) > utility.FLAGS.max_entry_length):
b = b[0:utility.FLAGS.max_entry_length]
for remaining in range(len(b), utility.FLAGS.max_entry_length):
b.append(utility.dummy_token)
assert len(b) == utility.FLAGS.max_entry_length
for word in b:
temp.append(utility.word_ids[word_lookup(word, utility)])
ans.append(temp)
#print ans
return ans
def convert_to_bool_and_pad(a, utility):
a = a.tolist()
for i in range(len(a)):
for j in range(len(a[i])):
if (a[i][j] < 1):
a[i][j] = False
else:
a[i][j] = True
a[i] = a[i] + [False] * (utility.FLAGS.max_elements - len(a[i]))
return a
seen_tables = {}
def partial_match(question, table, number):
answer = []
match = {}
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
for word in question:
if (number):
if (word == table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
else:
if (word in table[i][j]):
answer[i][j] = 1.0
match[i] = 1.0
return answer, match
def exact_match(question, table, number):
#performs exact match operation
answer = []
match = {}
matched_indices = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
for j in range(len(table[i])):
if (number):
for word in question:
if (word == table[i][j]):
match[i] = 1.0
answer[i][j] = 1.0
else:
table_entry = table[i][j]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
#if(len(table_entry) == 1):
#print "match: ", table_entry, question
match[i] = 1.0
answer[i][j] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, match, matched_indices
def partial_column_match(question, table, number):
answer = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
for word in question:
if (word in table[i]):
answer[i] = 1.0
return answer
def exact_column_match(question, table, number):
#performs exact match on column names
answer = []
matched_indices = []
for i in range(len(table)):
answer.append(0)
for i in range(len(table)):
table_entry = table[i]
for k in range(len(question)):
if (k + len(table_entry) <= len(question)):
if (table_entry == question[k:(k + len(table_entry))]):
answer[i] = 1.0
matched_indices.append((k, len(table_entry)))
return answer, matched_indices
def get_max_entry(a):
e = {}
for w in a:
if (w != "UNK, "):
if (e.has_key(w)):
e[w] += 1
else:
e[w] = 1
if (len(e) > 0):
(key, val) = sorted(e.items(), key=lambda x: -1 * x[1])[0]
if (val > 1):
return key
else:
return -1.0
else:
return -1.0
def list_join(a):
ans = ""
for w in a:
ans += str(w) + ", "
return ans
def group_by_max(table, number):
#computes the most frequently occurring entry in a column
answer = []
for i in range(len(table)):
temp = []
for j in range(len(table[i])):
temp.append(0)
answer.append(temp)
for i in range(len(table)):
if (number):
curr = table[i]
else:
curr = [list_join(w) for w in table[i]]
max_entry = get_max_entry(curr)
#print i, max_entry
for j in range(len(curr)):
if (max_entry == curr[j]):
answer[i][j] = 1.0
else:
answer[i][j] = 0.0
return answer
def pick_one(a):
for i in range(len(a)):
if (1.0 in a[i]):
return True
return False
def check_processed_cols(col, utility):
return True in [
True for y in col
if (y != utility.FLAGS.pad_int and y !=
utility.FLAGS.bad_number_pre_process)
]
def complete_wiki_processing(data, utility, train=True):
#convert to integers and padding
processed_data = []
num_bad_examples = 0
for example in data:
number_found = 0
if (example.is_bad_example):
num_bad_examples += 1
if (not (example.is_bad_example)):
example.string_question = example.question[:]
#entry match
example.processed_number_columns = example.processed_number_columns[:]
example.processed_word_columns = example.processed_word_columns[:]
example.word_exact_match, word_match, matched_indices = exact_match(
example.string_question, example.original_wc, number=False)
example.number_exact_match, number_match, _ = exact_match(
example.string_question, example.original_nc, number=True)
if (not (pick_one(example.word_exact_match)) and not (
pick_one(example.number_exact_match))):
assert len(word_match) == 0
assert len(number_match) == 0
example.word_exact_match, word_match = partial_match(
example.string_question, example.original_wc, number=False)
#group by max
example.word_group_by_max = group_by_max(example.original_wc, False)
example.number_group_by_max = group_by_max(example.original_nc, True)
#column name match
example.word_column_exact_match, wcol_matched_indices = exact_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match, ncol_matched_indices = exact_column_match(
example.string_question, example.original_nc_names, number=False)
if (not (1.0 in example.word_column_exact_match) and not (
1.0 in example.number_column_exact_match)):
example.word_column_exact_match = partial_column_match(
example.string_question, example.original_wc_names, number=False)
example.number_column_exact_match = partial_column_match(
example.string_question, example.original_nc_names, number=False)
if (len(word_match) > 0 or len(number_match) > 0):
example.question.append(utility.entry_match_token)
if (1.0 in example.word_column_exact_match or
1.0 in example.number_column_exact_match):
example.question.append(utility.column_match_token)
example.string_question = example.question[:]
example.number_lookup_matrix = np.transpose(
example.number_lookup_matrix)[:]
example.word_lookup_matrix = np.transpose(example.word_lookup_matrix)[:]
example.columns = example.number_columns[:]
example.word_columns = example.word_columns[:]
example.len_total_cols = len(example.word_column_names) + len(
example.number_column_names)
example.column_names = example.number_column_names[:]
example.word_column_names = example.word_column_names[:]
example.string_column_names = example.number_column_names[:]
example.string_word_column_names = example.word_column_names[:]
example.sorted_number_index = []
example.sorted_word_index = []
example.column_mask = []
example.word_column_mask = []
example.processed_column_mask = []
example.processed_word_column_mask = []
example.word_column_entry_mask = []
example.question_attention_mask = []
example.question_number = example.question_number_1 = -1
example.question_attention_mask = []
example.ordinal_question = []
example.ordinal_question_one = []
new_question = []
if (len(example.number_columns) > 0):
example.len_col = len(example.number_columns[0])
else:
example.len_col = len(example.word_columns[0])
for (start, length) in matched_indices:
for j in range(length):
example.question[start + j] = utility.unk_token
#print example.question
for word in example.question:
if (isinstance(word, numbers.Number) or wiki_data.is_date(word)):
if (not (isinstance(word, numbers.Number)) and
wiki_data.is_date(word)):
word = word.replace("X", "").replace("-", "")
number_found += 1
if (number_found == 1):
example.question_number = word
if (len(example.ordinal_question) > 0):
example.ordinal_question[len(example.ordinal_question) - 1] = 1.0
else:
example.ordinal_question.append(1.0)
elif (number_found == 2):
example.question_number_1 = word
if (len(example.ordinal_question_one) > 0):
example.ordinal_question_one[len(example.ordinal_question_one) -
1] = 1.0
else:
example.ordinal_question_one.append(1.0)
else:
new_question.append(word)
example.ordinal_question.append(0.0)
example.ordinal_question_one.append(0.0)
example.question = [
utility.word_ids[word_lookup(w, utility)] for w in new_question
]
example.question_attention_mask = [0.0] * len(example.question)
#when the first question number occurs before a word
example.ordinal_question = example.ordinal_question[0:len(
example.question)]
example.ordinal_question_one = example.ordinal_question_one[0:len(
example.question)]
#question-padding
example.question = [utility.word_ids[utility.dummy_token]] * (
utility.FLAGS.question_length - len(example.question)
) + example.question
example.question_attention_mask = [-10000.0] * (
utility.FLAGS.question_length - len(example.question_attention_mask)
) + example.question_attention_mask
example.ordinal_question = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question)
) + example.ordinal_question
example.ordinal_question_one = [0.0] * (utility.FLAGS.question_length -
len(example.ordinal_question_one)
) + example.ordinal_question_one
if (True):
#number columns and related-padding
num_cols = len(example.columns)
start = 0
for column in example.number_columns:
if (check_processed_cols(example.processed_number_columns[start],
utility)):
example.processed_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_number_columns[start])),
key=lambda k: example.processed_number_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_number_index.append(sorted_index)
example.columns[start] = column + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(column))
example.processed_number_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_number_columns[start]))
start += 1
example.column_mask.append(0.0)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.sorted_number_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.processed_number_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.number_exact_match.append([0.0] *
(utility.FLAGS.max_elements))
example.number_group_by_max.append([0.0] *
(utility.FLAGS.max_elements))
example.column_mask.append(-100000000.0)
example.processed_column_mask.append(-100000000.0)
example.number_column_exact_match.append(0.0)
example.column_names.append([utility.dummy_token])
#word column and related-padding
start = 0
word_num_cols = len(example.word_columns)
for column in example.word_columns:
if (check_processed_cols(example.processed_word_columns[start],
utility)):
example.processed_word_column_mask.append(0.0)
sorted_index = sorted(
range(len(example.processed_word_columns[start])),
key=lambda k: example.processed_word_columns[start][k],
reverse=True)
sorted_index = sorted_index + [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements - len(sorted_index))
example.sorted_word_index.append(sorted_index)
column = convert_to_int_2d_and_pad(column, utility)
example.word_columns[start] = column + [[
utility.word_ids[utility.dummy_token]
] * utility.FLAGS.max_entry_length] * (utility.FLAGS.max_elements -
len(column))
example.processed_word_columns[start] += [utility.FLAGS.pad_int] * (
utility.FLAGS.max_elements -
len(example.processed_word_columns[start]))
example.word_column_entry_mask.append([0] * len(column) + [
utility.word_ids[utility.dummy_token]
] * (utility.FLAGS.max_elements - len(column)))
start += 1
example.word_column_mask.append(0.0)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.sorted_word_index.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_columns.append([[utility.word_ids[utility.dummy_token]] *
utility.FLAGS.max_entry_length] *
(utility.FLAGS.max_elements))
example.word_column_entry_mask.append(
[utility.word_ids[utility.dummy_token]] *
(utility.FLAGS.max_elements))
example.word_exact_match.append([0.0] * (utility.FLAGS.max_elements))
example.word_group_by_max.append([0.0] * (utility.FLAGS.max_elements))
example.processed_word_columns.append([utility.FLAGS.pad_int] *
(utility.FLAGS.max_elements))
example.word_column_mask.append(-100000000.0)
example.processed_word_column_mask.append(-100000000.0)
example.word_column_exact_match.append(0.0)
example.word_column_names.append([utility.dummy_token] *
utility.FLAGS.max_entry_length)
seen_tables[example.table_key] = 1
#convert column and word column names to integers
example.column_ids = convert_to_int_2d_and_pad(example.column_names,
utility)
example.word_column_ids = convert_to_int_2d_and_pad(
example.word_column_names, utility)
for i_em in range(len(example.number_exact_match)):
example.number_exact_match[i_em] = example.number_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_exact_match[i_em]))
example.number_group_by_max[i_em] = example.number_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_group_by_max[i_em]))
for i_em in range(len(example.word_exact_match)):
example.word_exact_match[i_em] = example.word_exact_match[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_exact_match[i_em]))
example.word_group_by_max[i_em] = example.word_group_by_max[
i_em] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_group_by_max[i_em]))
example.exact_match = example.number_exact_match + example.word_exact_match
example.group_by_max = example.number_group_by_max + example.word_group_by_max
example.exact_column_match = example.number_column_exact_match + example.word_column_exact_match
#answer and related mask, padding
if (example.is_lookup):
example.answer = example.calc_answer
example.number_print_answer = example.number_lookup_matrix.tolist()
example.word_print_answer = example.word_lookup_matrix.tolist()
for i_answer in range(len(example.number_print_answer)):
example.number_print_answer[i_answer] = example.number_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.number_print_answer[i_answer]))
for i_answer in range(len(example.word_print_answer)):
example.word_print_answer[i_answer] = example.word_print_answer[
i_answer] + [0.0] * (utility.FLAGS.max_elements -
len(example.word_print_answer[i_answer]))
example.number_lookup_matrix = convert_to_bool_and_pad(
example.number_lookup_matrix, utility)
example.word_lookup_matrix = convert_to_bool_and_pad(
example.word_lookup_matrix, utility)
for remaining in range(num_cols, utility.FLAGS.max_number_cols):
example.number_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.number_print_answer.append([0.0] * utility.FLAGS.max_elements)
for remaining in range(word_num_cols, utility.FLAGS.max_word_cols):
example.word_lookup_matrix.append([False] *
utility.FLAGS.max_elements)
example.word_print_answer.append([0.0] * utility.FLAGS.max_elements)
example.print_answer = example.number_print_answer + example.word_print_answer
else:
example.answer = example.calc_answer
example.print_answer = [[0.0] * (utility.FLAGS.max_elements)] * (
utility.FLAGS.max_number_cols + utility.FLAGS.max_word_cols)
#question_number masks
if (example.question_number == -1):
example.question_number_mask = np.zeros([utility.FLAGS.max_elements])
else:
example.question_number_mask = np.ones([utility.FLAGS.max_elements])
if (example.question_number_1 == -1):
example.question_number_one_mask = -10000.0
else:
example.question_number_one_mask = np.float64(0.0)
if (example.len_col > utility.FLAGS.max_elements):
continue
processed_data.append(example)
return processed_data
def add_special_words(utility):
utility.words.append(utility.entry_match_token)
utility.word_ids[utility.entry_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.entry_match_token]] = utility.entry_match_token
utility.entry_match_token_id = utility.word_ids[utility.entry_match_token]
print("entry match token: ", utility.word_ids[
utility.entry_match_token], utility.entry_match_token_id)
utility.words.append(utility.column_match_token)
utility.word_ids[utility.column_match_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.column_match_token]] = utility.column_match_token
utility.column_match_token_id = utility.word_ids[utility.column_match_token]
print("entry match token: ", utility.word_ids[
utility.column_match_token], utility.column_match_token_id)
utility.words.append(utility.dummy_token)
utility.word_ids[utility.dummy_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.dummy_token]] = utility.dummy_token
utility.dummy_token_id = utility.word_ids[utility.dummy_token]
utility.words.append(utility.unk_token)
utility.word_ids[utility.unk_token] = len(utility.word_ids)
utility.reverse_word_ids[utility.word_ids[
utility.unk_token]] = utility.unk_token
def perform_word_cutoff(utility):
if (utility.FLAGS.word_cutoff > 0):
for word in utility.word_ids.keys():
if (utility.word_count.has_key(word) and utility.word_count[word] <
utility.FLAGS.word_cutoff and word != utility.unk_token and
word != utility.dummy_token and word != utility.entry_match_token and
word != utility.column_match_token):
utility.word_ids.pop(word)
utility.words.remove(word)
def word_dropout(question, utility):
if (utility.FLAGS.word_dropout_prob > 0.0):
new_question = []
for i in range(len(question)):
if (question[i] != utility.dummy_token_id and
utility.random.random() > utility.FLAGS.word_dropout_prob):
new_question.append(utility.word_ids[utility.unk_token])
else:
new_question.append(question[i])
return new_question
else:
return question
def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=None):
#prepare feed dict dictionary
feed_dict = {}
feed_examples = []
for j in range(batch_size):
feed_examples.append(data[curr + j])
if (train):
feed_dict[gr.batch_question] = [
word_dropout(feed_examples[j].question, utility)
for j in range(batch_size)
]
else:
feed_dict[gr.batch_question] = [
feed_examples[j].question for j in range(batch_size)
]
feed_dict[gr.batch_question_attention_mask] = [
feed_examples[j].question_attention_mask for j in range(batch_size)
]
feed_dict[
gr.batch_answer] = [feed_examples[j].answer for j in range(batch_size)]
feed_dict[gr.batch_number_column] = [
feed_examples[j].columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_number_column] = [
feed_examples[j].processed_number_columns for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_number_column] = [
feed_examples[j].sorted_number_index for j in range(batch_size)
]
feed_dict[gr.batch_processed_sorted_index_word_column] = [
feed_examples[j].sorted_word_index for j in range(batch_size)
]
feed_dict[gr.batch_question_number] = np.array(
[feed_examples[j].question_number for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_one] = np.array(
[feed_examples[j].question_number_1 for j in range(batch_size)]).reshape(
(batch_size, 1))
feed_dict[gr.batch_question_number_mask] = [
feed_examples[j].question_number_mask for j in range(batch_size)
]
feed_dict[gr.batch_question_number_one_mask] = np.array(
[feed_examples[j].question_number_one_mask for j in range(batch_size)
]).reshape((batch_size, 1))
feed_dict[gr.batch_print_answer] = [
feed_examples[j].print_answer for j in range(batch_size)
]
feed_dict[gr.batch_exact_match] = [
feed_examples[j].exact_match for j in range(batch_size)
]
feed_dict[gr.batch_group_by_max] = [
feed_examples[j].group_by_max for j in range(batch_size)
]
feed_dict[gr.batch_column_exact_match] = [
feed_examples[j].exact_column_match for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question] = [
feed_examples[j].ordinal_question for j in range(batch_size)
]
feed_dict[gr.batch_ordinal_question_one] = [
feed_examples[j].ordinal_question_one for j in range(batch_size)
]
feed_dict[gr.batch_number_column_mask] = [
feed_examples[j].column_mask for j in range(batch_size)
]
feed_dict[gr.batch_number_column_names] = [
feed_examples[j].column_ids for j in range(batch_size)
]
feed_dict[gr.batch_processed_word_column] = [
feed_examples[j].processed_word_columns for j in range(batch_size)
]
feed_dict[gr.batch_word_column_mask] = [
feed_examples[j].word_column_mask for j in range(batch_size)
]
feed_dict[gr.batch_word_column_names] = [
feed_examples[j].word_column_ids for j in range(batch_size)
]
feed_dict[gr.batch_word_column_entry_mask] = [
feed_examples[j].word_column_entry_mask for j in range(batch_size)
]
return feed_dict
|
|
"""Tests for the forms of the ``task_list`` app."""
from datetime import date
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_libs.tests.factories import UserFactory
from ..forms import (
TaskCreateForm,
TaskDoneToggleForm,
TaskListCreateForm,
TaskListUpdateForm,
TaskUpdateForm,
TemplateForm,
)
from ..models import Parent, Task, TaskList
from .factories import TaskFactory, TaskListFactory
class TaskCreateFormTestCase(TestCase):
"""Test for the ``TaskCreateForm`` form class."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.task_list = TaskListFactory()
self.task_list.users.add(self.user)
self.valid_data = {
'title': 'task title',
}
def test_form_validates_and_saves(self):
form = TaskCreateForm(data=self.valid_data, user=self.user,
task_list=self.task_list)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
instance = form.save()
self.assertEqual(TaskList.objects.count(), 1, msg=(
'After save is called, there should be one task in the db.'))
self.assertEqual(instance.assigned_to.all()[0], self.user, msg=(
'After save, the user should be assigned to the task.'))
form = TaskCreateForm(data={}, user=self.user,
task_list=self.task_list)
self.assertFalse(form.is_valid(), msg=(
'Without correct data, the form should not be valid.'))
class TaskDoneToggleFormTestCase(TestCase):
"""Test for the ``TaskDoneToggleForm`` form class."""
longMessage = True
def setUp(self):
self.task = TaskFactory()
self.valid_data = {'task': self.task.pk}
def test_form(self):
form = TaskDoneToggleForm(data=self.valid_data)
self.assertTrue(form.is_valid(), msg='The form should be valid.')
form.save()
self.assertEqual(type(Task.objects.get().is_done), date, msg=(
'After save is called, is_done should be a date.'))
form.save()
self.assertEqual(Task.objects.get().is_done, None, msg=(
'After save is called again, is_done should be None again.'))
class TaskListCreateFormTestCase(TestCase):
"""Test for the ``TaskListCreateForm`` form class."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.template = TaskListFactory(is_template=True)
self.template.users.add(self.user)
self.task = TaskFactory(task_list=self.template)
self.valid_data = {
'title': 'task list title',
}
self.from_template_data = {
'title': 'task list title',
'template': self.template.pk,
}
self.content_object = UserFactory()
self.user_ctype = ContentType.objects.get_for_model(User)
def test_form_validates_and_saves(self):
form = TaskListCreateForm(data=self.valid_data, user=self.user,
ctype_pk=self.user_ctype.pk,
obj_pk=self.content_object.pk)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
instance = form.save()
self.assertEqual(
TaskList.objects.filter(is_template=False).count(), 1, msg=(
'After save is called, there should be one task list in the'
' db.'))
self.assertEqual(Parent.objects.count(), 1, msg=(
'There should be one Parent object in the database.'))
parent = Parent.objects.get()
self.assertEqual(parent.task_list, instance, msg=(
'The Parent object should have the task list instance assigned.'))
self.assertEqual(parent.content_object, self.content_object, msg=(
'The Parent object should have the designated user assigned.'))
self.assertEqual(instance.users.all()[0], self.user, msg=(
'After save, the user should be assigned to the list.'))
form = TaskListCreateForm(data={}, user=self.user)
self.assertFalse(form.is_valid(), msg=(
'Without correct data, the form should not be valid.'))
form = TaskListCreateForm(data=self.from_template_data, user=self.user)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid. Errors: {0}'.format(
form.errors)))
form.save()
self.assertEqual(
TaskList.objects.filter(is_template=False).count(), 2, msg=(
'After save is called, there should be another task list in'
' the db.'))
self.assertEqual(Task.objects.count(), 2, msg=(
'After save is called, there should be two tasks in the db.'))
class TaskListUpdateFormTestCase(TestCase):
"""Test for the ``TaskListUpdateForm`` form class."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.task = TaskFactory()
self.task_list = self.task.task_list
self.task_list.users.add(self.user)
self.other_user = UserFactory()
self.valid_data = {
'title': 'task list title',
'users': [self.other_user.pk],
}
def test_form_validates_and_saves(self):
form = TaskListUpdateForm(data=self.valid_data, user=self.user,
instance=self.task_list)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
instance = form.save()
self.assertEqual(TaskList.objects.count(), 1, msg=(
'After save is called, there should be one task list in the db.'))
self.assertEqual(self.task_list.users.count(), 2, msg=(
'There should be two users assigned.'))
self.assertEqual(instance.users.all()[0], self.user, msg=(
'After save, the user should be assigned to the list.'))
form = TaskListUpdateForm(data={}, user=self.user,
instance=self.task_list)
self.assertFalse(form.is_valid(), msg=(
'Without correct data, the form should not be valid.'))
class TaskUpdateFormTestCase(TestCase):
"""Test for the ``TaskUpdateForm`` form class."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.task = TaskFactory()
self.task.task_list.users.add(self.user)
self.task.assigned_to.add(self.user)
self.other_user = UserFactory()
self.valid_data = {
'title': 'task list title',
'assigned_to': [self.other_user.pk],
'priority': '3',
}
def test_form_validates_and_saves(self):
bad_data = self.valid_data.copy()
bad_data.update({'assigned_to': [self.other_user.pk]})
form = TaskUpdateForm(data=bad_data, user=self.user,
task_list=self.task.task_list,
instance=self.task)
self.assertFalse(form.is_valid(), msg=(
'With incorrect data, the form should not be valid.'))
self.task.task_list.users.add(self.other_user)
form = TaskUpdateForm(data=self.valid_data, user=self.user,
task_list=self.task.task_list,
instance=self.task)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
instance = form.save()
self.assertEqual(Task.objects.count(), 1, msg=(
'After save is called, there should be one task in the db.'))
self.assertEqual(self.task.assigned_to.count(), 2, msg=(
'There should be two users assigned.'))
self.assertEqual(instance.assigned_to.all()[0], self.user, msg=(
'After save, the user should be assigned to the task.'))
form = TaskUpdateForm(data={}, user=self.user,
task_list=self.task.task_list,
instance=instance)
self.assertFalse(form.is_valid(), msg=(
'Without correct data, the form should not be valid.'))
class TemplateFormTestCase(TestCase):
"""Tests for the ``TemplateForm`` form class."""
longMessage = True
def setUp(self):
self.user = UserFactory()
self.task_list = TaskListFactory(title='title')
self.task_list.users.add(self.user)
self.task = TaskFactory(task_list=self.task_list)
self.existing_template = TaskListFactory(is_template=True, title='bar')
self.existing_template.users.add(self.user)
self.valid_data = {'title': 'my title'}
def test_form(self):
form = TemplateForm(data=self.valid_data, user=self.user,
instance=self.task_list)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
instance = form.save()
self.assertEqual(TaskList.objects.count(), 3, msg=(
'After the list is saved as a template, there should be 3 task'
' lists in the db.'))
self.assertEqual(Task.objects.count(), 2, msg=(
'After the list is saved as a template, there should be 2 tasks'
' in the db.'))
bad_data = self.valid_data.copy()
bad_data.update({'title': self.existing_template.title})
form = TemplateForm(data=bad_data, user=self.user,
instance=instance)
self.assertFalse(form.is_valid(), msg=(
'With incorrect data, the form should not be valid.'))
data = self.valid_data.copy()
data.update({'title': 'changed the title'})
form = TemplateForm(data=data, user=self.user, instance=instance)
self.assertTrue(form.is_valid(), msg=(
'With correct data, the form should be valid.'))
form.save()
self.assertEqual(TaskList.objects.count(), 3, msg=(
'After template is saved again, there should still be 3 task'
' lists in the db.'))
self.assertEqual(Task.objects.count(), 2, msg=(
'After the template is saved again, there should still be 2 tasks'
' in the db.'))
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
blocktools.add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ['0.0.0.0']
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see OOB_FUNC_MODULE). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(8000, 5001)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ['0.0.0.0']
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ['127.0.0.1']
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version of the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient.
WEBSOCKET_CLIENT_PORT = 8001
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = '0.0.0.0'
# Actual URL for webclient component to reach the websocket.
# The WEBSOCKET_CLIENT_PORT will be automatically appended to this URL.
WEBSOCKET_CLIENT_URL = "ws://localhost"
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [8022]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ['0.0.0.0']
# Activate SSL protocol (SecureSocketLibrary)
SSL_ENABLED = False
# Ports to use for SSL
SSL_PORTS = [4001]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ['0.0.0.0']
# Activate custom websocket support. This is unrelated to the websocket client!
# This is intended to be used by optional third-party connections/applications
# or clients.
WEBSOCKET_ENABLED = False
# Ports to use for Websockets
WEBSOCKET_PORTS = [8021]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_INTERFACES = ['0.0.0.0']
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the database file if using sqlite).
if sys.argv[1] == 'test' if len(sys.argv)>1 else False:
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, 'game_template')
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, 'server', 'logs')
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# Rotate log files when server and/or portal stops. This will keep log
# file sizes down. Turn off to get ever growing log files and never
# loose log info.
CYCLE_LOGFILES = True
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'UTC'
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = (
'evennia.web.utils.backends.CaseInsensitiveModelBackend',)
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = 'en-us'
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = 3600
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer.
IDLE_COMMAND = "idle"
# The set of encodings tried. A Player object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your players are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# The game server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = 'localhost'
AMP_PORT = 5000
AMP_INTERFACE = '127.0.0.1'
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 5
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING ="You entered commands too fast. Wait a moment and try again."
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.'postgresql_psycopg2',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, 'server', 'evennia.db3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# The handler that outputs errors when searching
# objects using object.search().
SEARCH_AT_RESULT = "evennia.commands.cmdparser.at_search_result"
# The parser used in order to separate multiple
# object matches (so you can separate between same-named
# objects without using dbrefs).
SEARCH_AT_MULTIMATCH_INPUT = "evennia.commands.cmdparser.at_multimatch_input"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many players you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs",)
# Module holding OOB (Out of Band) hook objects. This allows for customization
# and expansion of which hooks OOB protocols are allowed to call on the server
# protocols for attaching tracker hooks for when various object field change
OOB_PLUGIN_MODULES = ["evennia.server.oob_cmds", "server.conf.oobfuncs"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
######################################################################
# Default command sets
######################################################################
# Note that with the exception of the unloggedin set (which is not
# stored anywhere in the database), changing these paths will only affect
# NEW created characters/objects, not those already in play. So if you plan to
# change this, it's recommended you do it before having created a lot of objects
# (or simply reset the database after the change for simplicity).
# Command set used on session before player has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in player with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for players without a character (ooc)
CMDSET_PLAYER = "commands.default_cmdsets.PlayerCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "contribs"]
# Line editor path. Points to a line editor class that commands may use to give
# users extended editing control. See the default path for a reference implementation
# and usage.
LINE_EDITOR = 'evennia.commands.default.lineeditor.LineEditor'
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = ["typeclasses", "evennia", "evennia.contrib", "evennia.contrib.tutorial_examples"]
# Typeclass for player objects (linked to a character) (fallback)
BASE_PLAYER_TYPECLASS = "typeclasses.players.Player"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to a player (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2,3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ['world', 'evennia.contrib', 'evennia.contrib.tutorial_examples']
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# These measures might or might not make sense to your game world.
TIME_SEC_PER_MIN = 60
TIME_MIN_PER_HOUR = 60
TIME_HOUR_PER_DAY = 24
TIME_DAY_PER_WEEK = 7
TIME_WEEK_PER_MONTH = 4
TIME_MONTH_PER_YEAR = 12
######################################################################
# Inlinefunc
######################################################################
# Evennia supports inline function preprocessing. This allows
# users to supply {func() ... {/func in text, performing dynamic
# text formatting and manipulation on the fly. If disabled, such
# inline functions will not be parsed.
INLINEFUNC_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
INLINEFUNC_MODULES = ["evennia.utils.inlinefunc", "server.conf.inlinefunc"]
######################################################################
# Default Player setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per player.
# 0 - single session, one player, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one player, one character, each session getting
# the same data
# 2 - multiple sessions, one player, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed for MULTISESSION_MODE 2,3. This is
# checked by the default ooc char-creation command. Forced to 1 for
# MULTISESSION_MODE 0 and 1.
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions.
PERMISSION_HIERARCHY = ["Guests", # note-only used if GUEST_ENABLED=True
"Players",
"PlayerHelpers",
"Builders",
"Wizards",
"Immortals"]
# The default permission given to all new players
PERMISSION_PLAYER_DEFAULT = "Players"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
CLIENT_DEFAULT_HEIGHT = 45 # telnet standard is 24 but does anyone use such
# low-res displays anymore?
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest"
GUEST_ENABLED = False
# Typeclass for guest player objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.players.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# players/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s+1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# This is a list of global channels created by the
# initialization script the first time Evennia starts.
# The superuser (user #1) will be automatically subscribed
# to all channels in this list. Each channel is described by
# a dictionary keyed with the same keys valid as arguments
# to the evennia.create.create_channel() function.
# Note: Evennia will treat the first channel in this list as
# the general "public" channel and the second as the
# general "mud info" channel. Other channels beyond that
# are up to the admin to design and call appropriately.
DEFAULT_CHANNELS = [
# public channel
{"key": "Public",
"aliases": ('ooc', 'pub'),
"desc": "Public discussion",
"locks": "control:perm(Wizards);listen:all();send:all()"},
# connection/mud info
{"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Immortals);listen:perm(Wizards);send:false()"}
]
######################################################################
# External Channel connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients. IRC and IMC2
# requires that you have twisted.words installed.
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED=False
RSS_UPDATE_INTERVAL = 60*10 # 10 minutes
# IMC (Inter-MUD communication) allows to connect an Evennia channel
# to an IMC2 server. This lets them talk to people on other MUDs also
# using IMC. Evennia's IMC2 client was developed against MudByte's
# network. You must register your MUD on the network before you can
# use it, go to http://www.mudbytes.net/imc2-intermud-join-network.
# Choose 'Other unsupported IMC2 version' from the choices and and
# enter your information there. You should enter the same 'short mud
# name' as your SERVERNAME above, then choose imc network server as
# well as client/server passwords same as below. When enabled, the
# command @imc2chan becomes available in-game and allows you to
# connect Evennia channels to IMC channels on the network. The Evennia
# discussion channel 'ievennia' is on server01.mudbytes.net:5000.
# NOTE - IMC2 is currently NOT FUNCTIONAL due to lack of testing means.
IMC2_ENABLED = False
IMC2_NETWORK = "server01.mudbytes.net"
IMC2_PORT = 5000 # this is the imc2 port, not on localhost
IMC2_CLIENT_PWD = ""
IMC2_SERVER_PWD = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# While true, show "pretty" error messages for template syntax errors.
TEMPLATE_DEBUG = DEBUG
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () #'Your Name', '[email protected]'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# It's safe to dis-regard this, as it's a Django feature we only half use as a
# dependency, not actually what it's primarily meant for.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'sessionid'
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'web.urls' #src.web.urls?
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = '/'
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = '/accounts/login'
# Where to redirect users who wish to logout.
LOGOUT_URL = '/accounts/login'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Directories from which static files will be gathered.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "web", "static_overrides"),
os.path.join(EVENNIA_DIR, "web", "static"),)
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ('README.md',)
# The name of the currently selected web template. This corresponds to the
# directory names shown in the webtemplates directory.
ACTIVE_TEMPLATE = 'prosimii'
# We setup the location of the website template as well as the admin site.
TEMPLATE_DIRS = (
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "templates", ACTIVE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates"),)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',)
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', # 1.4?
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',)
# Context processors define context variables, generally for the template
# system to use.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.debug',
'evennia.web.utils.general_context.general_context',)
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'evennia.utils.idmapper',
'evennia.server',
'evennia.typeclasses',
'evennia.players',
'evennia.objects',
'evennia.comms',
'evennia.help',
'evennia.scripts',
'evennia.web.webclient')
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "players.PlayerDB"
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = 'evennia.server.tests.EvenniaTestSuiteRunner'
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = 'changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS'
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Utility function to work with Atom tagged members and to automatize
preferences handling.
"""
from collections import OrderedDict
from ast import literal_eval
from textwrap import fill
from atom.api import Str, Enum, Atom, Constant
from inspect import getfullargspec
from inspect import cleandoc
# String identifing the preference tag
PREF_KEY = 'pref'
# Position in the list for the to pref and from pref methods
TO_PREF_ID = 0
FROM_PREF_ID = 1
def tagged_members(obj, meta=None, meta_value=None):
""" Utility function to retrieve tagged members from an object
Parameters
----------
obj : Atom
Object from which the tagged members should be retrieved.
meta : str, optional
The tag to look for, only member which has this tag will be returned
meta_value : optional
The value of the metadata used for filtering the members returned
Returns
-------
tagged_members : dict(str, Member)
Dictionary of the members whose metadatas corresponds to the predicate
"""
members = obj.members()
if meta is None and meta_value is None:
return members
elif meta_value is None:
return {key: member for key, member in members.items()
if member.metadata is not None and meta in member.metadata}
else:
return {key: member for key, member in members.items()
if member.metadata is not None and
meta in member.metadata and
member.metadata[meta] == meta_value}
def member_from_pref(obj, member, val):
""" Retrieve the value stored in the preferences for a member.
Parameters
----------
obj : Atom
Object who owns the member.
member : Member
Member for which the preferences should be retrieved.
val : Value
Value that is stored in the preferences, depending on the case this
might be a serialized value or simply a string.
Returns
-------
value : Value
The deserialized value that can be assigned to the member.
"""
meta_value = member.metadata[PREF_KEY]
# If 'pref=True' then we rely on the standard save mechanism
if meta_value is True:
# If the member is a subclass of Str then we just take the
# raw value and Atom will handle the casting if any for us.
# If it is a subclass of basestring then we save it as-is
if isinstance(member, Str):
value = val
# If it is an Enum where the first item is a (subclass of) string, then
# we assume that the whole Enum contains strings and we save it as-is
elif isinstance(member, Enum) and isinstance(member.items[0], str):
value = val
# Otherwise, we eval it, or we might throw an error
else:
try:
value = literal_eval(val)
except ValueError:
# Silently ignore failed evaluation as we can have a string
# assigned to a value.
value = val
# If the user provided a custom "from_pref" function, then we check
# that it has the correct signature and use it to obtain the value
elif (isinstance(meta_value, (tuple, list)) and
len(getfullargspec(meta_value[FROM_PREF_ID])[0]) == 3):
value = meta_value[FROM_PREF_ID](obj, member, val)
elif meta_value is False:
raise NotImplementedError(
fill(cleandoc('''you set 'pref=False' for this member. If you did
not want to save it you should simply not declare this tag.''')))
else:
raise NotImplementedError(
fill(cleandoc('''the 'pref' tag of this member was not set to true,
therefore the program expects you to declare two functions,
'member_to_pref(obj,member,val)' and 'member_from_pref(obj,member,
val)' that will handle the serialization and deserialization of
the value. Those should be passed as a list or a tuple, where
the first element is member_to and the second is member_from.
It is possible that you failed to properly declare the signature
of those two functions.''')))
return value
def member_to_pref(obj, member, val):
""" Provide the value that will be stored in the preferences for a member.
Parameters
----------
obj : Atom
Object who owns the member.
member : Member
Member for which the preferences should be retrieved
val : Value
Value of the member to be stored in the preferences
Returns
-------
pref_value : str
The serialized value/string that will be stored in the pref.
"""
meta_value = member.metadata[PREF_KEY]
# If 'pref=True' then we rely on the standard save mechanism
if meta_value is True:
# If val is string-like, then we can simply cast it and rely on
# python/Atom default methods.
if isinstance(val, str):
pref_value = val
else:
pref_value = repr(val)
# If the user provided a custom "to_pref" function, then we check
# that it has the correct signature and use it to obtain the value
elif (isinstance(meta_value, (tuple, list)) and
len(getfullargspec(meta_value[TO_PREF_ID])[0]) == 3):
pref_value = meta_value[TO_PREF_ID](obj, member, val)
elif meta_value is False:
raise NotImplementedError(
fill(cleandoc('''you set 'pref=False' for this member. If you did
not want to save it you should simply not declare this tag.''')))
else:
raise NotImplementedError(
fill(cleandoc('''the 'pref' tag of this member was not set to true,
therefore the program expects you to declare two functions,
'member_to_pref(obj,member,val)' and 'member_from_pref(obj,member,
val)' that will handle the serialization and deserialization of
the value. Those should be passed as a list or a tuple, where
the first element is member_to and the second is member_from.
It is possible that you failed to properly declare the signature
of those two functions.''')))
return pref_value
def ordered_dict_to_pref(obj, member, val):
""" Function to convert an OrderedDict to something that can
be easily stored and read back, in this case a list of tuples.
Parameters
----------
obj: Atom
The instance calling the function
member: Member
The member that must be stored
val: OrderedDict
The current value of the member
Returns
-------
value : str
the serialized value
"""
return repr(list(val.items()))
def ordered_dict_from_pref(obj, member, val):
"""Read back the list of tuples saved by 'ordered_dict_to_pref'.
We simply do a literal_eval of the list of tuples, and then convert it to
an OrderedDict.
Parameters
----------
obj: Atom
The instance calling the function
member: Member
The member that must be stored
val: str
The string representation of the stored value
Returns
-------
value : OrderedDict
An Ordered Dict that can be assigned to the member.
"""
return OrderedDict(literal_eval(val))
class HasPrefAtom(Atom):
""" Base class for Atom object using preferences.
This class defines the basic functions used to build a string dict from
the member value and to update the members from such a dict.
"""
pass
def preferences_from_members(self):
""" Get the members values as string to store them in .ini files.
"""
pref = OrderedDict()
for name, member in tagged_members(self, 'pref').items():
old_val = getattr(self, name)
if issubclass(type(old_val), HasPrefAtom):
pref[name] = old_val.preferences_from_members()
else:
pref[name] = member_to_pref(self, member, old_val)
return pref
def update_members_from_preferences(self, parameters):
""" Use the string values given in the parameters to update the members
This function will call itself on any tagged HasPrefAtom member.
"""
for name, member in tagged_members(self, 'pref').items():
if name not in parameters or isinstance(member, Constant):
continue
old_val = getattr(self, name)
if issubclass(type(old_val), HasPrefAtom):
old_val.update_members_from_preferences(parameters[name])
# This is meant to prevent updating fields which expect a custom
# instance
elif old_val is None:
pass
else:
value = parameters[name]
converted = member_from_pref(self, member, value)
try:
setattr(self, name, converted)
except Exception as e:
msg = 'An exception occured when trying to set {} to {}'
raise ValueError(msg.format(name, converted)) from e
HasPrefAtom.preferences_from_members = preferences_from_members
HasPrefAtom.update_members_from_preferences = update_members_from_preferences
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from mongokit import *
from bson.objectid import ObjectId
import datetime
class JsonTestCase(unittest.TestCase):
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection['test'].drop_collection('mongokit')
self.connection['test'].drop_collection('versionned_mongokit')
def test_simple_to_json(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
"egg":datetime.datetime,
},
"spam":[],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['bla']['egg'] = datetime.datetime(2010, 1, 1)
mydoc['spam'] = range(10)
mydoc.save()
assert mydoc.to_json() == '{"_id": "mydoc", "bla": {"egg": 1262304000000, "foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}', mydoc.to_json()
assert mydoc.to_json_type() == {'_id': 'mydoc', 'bla': {'egg': 1262304000000, 'foo': u'bar', 'bar': 42}, 'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}, mydoc.to_json_type()
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc2'
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['spam'] = [datetime.datetime(2000, 1, 1), datetime.datetime(2008, 8, 8)]
mydoc.save()
assert mydoc.to_json() == '{"_id": "mydoc2", "bla": {"egg": null, "foo": "bar", "bar": 42}, "spam": [946684800000, 1218153600000]}', mydoc.to_json()
assert mydoc.to_json_type() == {'_id': 'mydoc2', 'bla': {'egg': None, 'foo': u'bar', 'bar': 42}, 'spam': [946684800000, 1218153600000]}, mydoc.to_json_type()
def test_simple_to_json_with_oid(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['spam'] = range(10)
mydoc.save()
assert isinstance(mydoc.to_json_type()['_id']['$oid'], basestring), type(mydoc.to_json_type()['_id'])
assert isinstance(mydoc.to_json(), unicode)
def test_simple_to_json_with_oid_in_list(self):
class A(Document):
structure = {
"foo":unicode,
}
class B(Document):
structure = {
'bar':[ObjectId],
'egg':{
'nested':ObjectId,
}
}
self.connection.register([A, B])
a = self.col.A()
a["foo"] = u"bar"
a.save()
assert isinstance(a.to_json_type()['_id']['$oid'], basestring), type(a.to_json_type()['_id'])
a.to_json()
b = self.col.B()
b['bar'] = [a['_id']]
b['egg']['nested'] = a['_id']
b.save()
print b.to_json_type()
assert isinstance(b.to_json_type()['_id']['$oid'], basestring), b.to_json_type()
assert isinstance(b.to_json_type()['egg']['nested']['$oid'], basestring), b.to_json_type()
assert isinstance(b.to_json_type()['bar'][0]['$oid'], basestring), b.to_json_type()
assert isinstance(b.to_json_type()['egg']['nested']['$oid'], basestring), b.to_json_type()
assert "ObjectId" not in b.to_json()
def test_simple_to_json_with_no_id(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['spam'] = range(10)
assert "_id" not in mydoc.to_json_type()
assert mydoc.to_json() == '{"bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}'
def test_to_json_custom_type(self):
class CustomFloat(CustomType):
mongo_type = unicode
python_type = float
def to_bson(self, value):
if value is not None:
return unicode(value)
def to_python(self, value):
if value is not None:
return float(value)
class MyDoc(Document):
structure = {
"doc":{
"foo":CustomFloat(),
},
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc['doc']['foo'] = 3.70
mydoc.save()
assert mydoc.to_json() == '{"doc": {"foo": 3.7000000000000002}, "_id": "mydoc"}', mydoc.to_json()
assert mydoc.to_json_type() == {"doc": {"foo": 3.7000000000000002}, "_id": "mydoc"}
def test_to_json_embeded_doc(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed['_id'] = u'embed'
embed["bla"]["foo"] = u"bar"
embed["bla"]["bar"] = 42
embed['spam'] = range(10)
embed.save()
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc['doc']['embed'] = embed
mydoc.save()
assert mydoc.to_json() == '{"doc": {"embed": {"_collection": "mongokit", "_database": "test", "_id": "embed", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, "_id": "mydoc"}'
assert mydoc.to_json_type() == {"doc": {"embed": {"_id": "embed", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, "_id": "mydoc"}
def test_to_json_embeded_doc_with_oid(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed["bla"]["foo"] = u"bar"
embed["bla"]["bar"] = 42
embed['spam'] = range(10)
embed.save()
mydoc = self.col.MyDoc()
mydoc['doc']['embed'] = embed
mydoc.save()
assert isinstance(mydoc.to_json_type()['doc']['embed']['_id']['$oid'], basestring)
assert mydoc.to_json() == '{"doc": {"embed": {"_collection": "mongokit", "_database": "test", "_id": {"$oid": "%s"}, "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, "_id": {"$oid": "%s"}}' % (
embed['_id'], mydoc['_id']), mydoc.to_json()
def test_to_json_with_None_embeded_doc(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc.save()
assert mydoc.to_json() == '{"doc": {"embed": null}, "_id": "mydoc"}'
assert mydoc.to_json_type() == {'doc': {'embed': None}, '_id': 'mydoc'}, mydoc.to_json_type()
def test_to_json_with_dict_in_list(self):
class MyDoc(Document):
structure = {
"foo":[{'bar':unicode, 'egg':int}],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc["foo"] = [{'bar':u'bla', 'egg':3}, {'bar':u'bli', 'egg':4}]
mydoc.save()
assert mydoc.to_json() == '{"foo": [{"bar": "bla", "egg": 3}, {"bar": "bli", "egg": 4}], "_id": "mydoc"}', mydoc.to_json()
assert mydoc.to_json_type() == {'foo': [{'bar': u'bla', 'egg': 3}, {'bar': u'bli', 'egg': 4}], '_id': 'mydoc'}
def test_simple_from_json(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
self.connection.register([MyDoc])
json = '{"_id": "mydoc", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}'
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'_id': 'mydoc', 'bla': {'foo': 'bar', 'bar': 42}, 'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}
assert mydoc.collection == self.col
def test_simple_from_json2(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
"egg":datetime.datetime,
},
"spam":[datetime.datetime],
}
self.connection.register([MyDoc])
json = '{"_id": "mydoc2", "bla": {"foo": "bar", "bar": 42, "egg":946684800000}, "spam": [946684800000, 1218153600000]}'
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'_id': 'mydoc2', 'bla': {'foo': 'bar', 'bar': 42, "egg":datetime.datetime(2000, 1, 1, 0, 0)}, 'spam': [datetime.datetime(2000, 1, 1, 0, 0), datetime.datetime(2008, 8, 8, 0, 0)]}, mydoc
assert mydoc.collection == self.col
def test_from_json_embeded_doc(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed['_id'] = u"embed"
embed["bla"] = {"foo": u"bar", "bar": 42}
embed["spam"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
embed.save()
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc['doc']['embed'] = embed
mydoc.save()
json = mydoc.to_json()
assert json == '{"doc": {"embed": {"_collection": "mongokit", "_database": "test", "_id": "embed", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, "_id": "mydoc"}', json
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'doc': {'embed': {u'_id': u'embed', u'bla': {u'foo': u'bar', u'bar': 42}, u'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, '_id': u'mydoc'}, mydoc
assert isinstance(mydoc['doc']['embed'], EmbedDoc)
def test_from_json_embeded_doc_with_oid(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed["bla"] = {"foo": u"bar", "bar": 42}
embed["spam"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
embed.save()
mydoc = self.col.MyDoc()
mydoc['doc']['embed'] = embed
mydoc.save()
json = mydoc.to_json()
assert json == '{"doc": {"embed": {"_collection": "mongokit", "_database": "test", "_id": {"$oid": "%s"}, "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, "_id": {"$oid": "%s"}}' %(
embed['_id'], mydoc['_id']), json
doc = self.col.MyDoc.from_json(json)
assert doc == {'doc': {'embed': {u'_id': embed['_id'], u'bla': {u'foo': u'bar', u'bar': 42}, u'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}}, '_id': mydoc['_id']}, doc
assert isinstance(doc['doc']['embed'], EmbedDoc)
def test_from_json_with_None_embeded_doc(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":EmbedDoc,
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc.save()
json= mydoc.to_json()
assert json == '{"doc": {"embed": null}, "_id": "mydoc"}'
doc = self.col.MyDoc.from_json(json)
assert doc == {'doc': {'embed': None}, '_id': 'mydoc'}
def test_from_json_embeded_doc_in_list(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":[EmbedDoc],
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed['_id'] = u"embed"
embed["bla"] = {"foo": u"bar", "bar": 42}
embed["spam"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
embed.save()
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc['doc']['embed'] = [embed]
mydoc.save()
json = mydoc.to_json()
assert json == '{"doc": {"embed": [{"_collection": "mongokit", "_database": "test", "_id": "embed", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}]}, "_id": "mydoc"}'
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'doc': {'embed': [{u'_id': u'embed', u'bla': {u'foo': u'bar', u'bar': 42}, u'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}]}, '_id': u'mydoc'}, mydoc
assert isinstance(mydoc['doc']['embed'][0], EmbedDoc)
def test_from_json_embeded_doc_in_list_with_oid(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":[EmbedDoc],
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
embed = self.col.EmbedDoc()
embed["bla"] = {"foo": u"bar", "bar": 42}
embed["spam"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
embed.save()
mydoc = self.col.MyDoc()
mydoc['doc']['embed'] = [embed]
mydoc.save()
json = mydoc.to_json()
assert json == '{"doc": {"embed": [{"_collection": "mongokit", "_database": "test", "_id": {"$oid": "%s"}, "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}]}, "_id": {"$oid": "%s"}}' %(
embed['_id'], mydoc['_id']), json
doc = self.col.MyDoc.from_json(json)
assert doc == {'doc': {'embed': [{u'_id': embed['_id'], u'bla': {u'foo': u'bar', u'bar': 42}, u'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}]}, '_id': mydoc['_id']}, doc
assert isinstance(doc['doc']['embed'][0], EmbedDoc)
def test_from_json_with_no_embeded_doc_in_list(self):
class EmbedDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
class MyDoc(Document):
structure = {
"doc":{
"embed":[EmbedDoc],
},
}
use_autorefs = True
self.connection.register([MyDoc, EmbedDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc.save()
json = mydoc.to_json()
assert json == '{"doc": {"embed": []}, "_id": "mydoc"}'
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'doc': {'embed': []}, '_id': 'mydoc'}
def test_from_json_dict_in_list(self):
class MyDoc(Document):
structure = {
"doc":{
"embed":[{"foo":unicode, "bar":int}],
},
}
use_autorefs = True
self.connection.register([MyDoc])
json = '{"doc": {"embed": [{"foo": "bar", "bar": 42}]}, "_id": "mydoc"}'
mydoc = self.col.MyDoc.from_json(json)
assert mydoc == {'doc': {'embed': [{'foo': 'bar', 'bar': 42}]}, '_id': 'mydoc'}, mydoc
def test_from_json_unicode(self):
class MyDoc(Document):
structure = {
"doc":{
"name":unicode
},
"foo": unicode,
}
use_autorefs = True
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['doc']['name'] = u'bla'
mydoc['foo'] = u'bar'
json = mydoc.to_json()
mydoc2 = self.col.MyDoc.from_json(json)
assert isinstance(mydoc['doc']['name'], unicode)
assert isinstance(mydoc['foo'], unicode)
assert isinstance(mydoc2['doc']['name'], unicode)
assert isinstance(mydoc2['foo'], unicode)
def test_simple_to_json_from_cursor(self):
class MyDoc(Document):
structure = {
"bla":{
"foo":unicode,
"bar":int,
},
"spam":[],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['spam'] = range(10)
mydoc.save()
json = mydoc.to_json()
assert json == '{"_id": "mydoc", "bla": {"foo": "bar", "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}'
mydoc2 = self.col.MyDoc()
mydoc2['_id'] = u'mydoc2'
mydoc2["bla"]["foo"] = u"bla"
mydoc2["bla"]["bar"] = 32
mydoc2['spam'] = [datetime.datetime(2000, 1, 1), datetime.datetime(2008, 8, 8)]
mydoc2.save()
json2 = mydoc2.to_json()
assert [i.to_json() for i in self.col.MyDoc.fetch()] == [json, json2]
def test_anyjson_import_error(self):
import sys
newpathlist = sys.path
sys.path = []
class MyDoc(Document):
structure = {
"foo":int,
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc["foo"] = 4
mydoc.save()
self.assertRaises(ImportError, mydoc.to_json)
self.assertRaises(ImportError, self.col.MyDoc.from_json, '{"_id":"mydoc", "foo":4}')
sys.path = newpathlist
del newpathlist
def test_to_json_with_dot_notation(self):
class MyDoc(Document):
use_dot_notation = True
structure = {
"bla":{
"foo":unicode,
"bar":int,
"egg":datetime.datetime,
},
"spam":[],
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc["bla"]["foo"] = u"bar"
mydoc["bla"]["bar"] = 42
mydoc['bla']['egg'] = datetime.datetime(2010, 1, 1)
mydoc['spam'] = range(10)
mydoc.save()
self.assertEqual(mydoc.to_json(),
'{"_id": "mydoc", "bla": {"bar": 42, "foo": "bar", "egg": 1262304000000}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}')
self.assertEqual(mydoc.to_json_type(),
{'_id': 'mydoc', 'bla': {'egg': 1262304000000, 'foo': u'bar', 'bar': 42}, 'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc.bla.foo = u"bar"
mydoc.bla.bar = 42
mydoc.bla.egg = datetime.datetime(2010, 1, 1)
mydoc.spam = range(10)
mydoc.save()
self.assertEqual(mydoc.to_json(),
'{"_id": "mydoc", "bla": {"bar": 42, "foo": "bar", "egg": 1262304000000}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}')
self.assertEqual(mydoc.to_json_type(),
{'_id': 'mydoc', 'bla': {'egg': 1262304000000, 'foo': u'bar', 'bar': 42}, 'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc2'
mydoc.bla.foo = u"bar"
mydoc.bla.bar = 42
mydoc.spam = [datetime.datetime(2000, 1, 1), datetime.datetime(2008, 8, 8)]
mydoc.save()
self.assertEqual(mydoc.to_json(),
'{"_id": "mydoc2", "bla": {"bar": 42, "foo": "bar", "egg": null}, "spam": [946684800000, 1218153600000]}')
self.assertEqual(mydoc.to_json_type(),
{'_id': 'mydoc2', 'bla': {'egg': None, 'foo': u'bar', 'bar': 42}, 'spam': [946684800000, 1218153600000]})
def test_to_json_with_i18n_and_dot_notation(self):
class MyDoc(Document):
use_dot_notation = True
structure = {
"bla":{
"foo":unicode,
"bar":int,
"egg":datetime.datetime,
},
"spam":[],
}
i18n = ['bla.foo']
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc.bla.foo = u"bar"
mydoc.bla.bar = 42
mydoc.bla.egg = datetime.datetime(2010, 1, 1)
mydoc.spam = range(10)
mydoc.set_lang('fr')
mydoc.bla.foo = u"arf"
mydoc.save()
self.assertEqual(mydoc.to_json_type(),
{'_id': 'mydoc', 'bla': {'bar': 42, 'foo': {'fr': u'arf', 'en': u'bar'}, 'egg': 1262304000000}, 'spam': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
self.assertEqual(mydoc.to_json(),
'{"_id": "mydoc", "bla": {"egg": 1262304000000, "foo": {"fr": "arf", "en": "bar"}, "bar": 42}, "spam": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}')
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc2'
mydoc.bla.foo = u"bar"
mydoc.bla.bar = 42
mydoc.spam = [datetime.datetime(2000, 1, 1), datetime.datetime(2008, 8, 8)]
mydoc.save()
self.assertEqual(mydoc.to_json_type(),
{'_id': 'mydoc2', 'bla': {'bar': 42, 'foo': {'en': u'bar'}, 'egg': None}, 'spam': [946684800000, 1218153600000]})
self.assertEqual(mydoc.to_json(),
'{"_id": "mydoc2", "bla": {"egg": null, "foo": {"en": "bar"}, "bar": 42}, "spam": [946684800000, 1218153600000]}')
def test_from_json_with_list(self):
class MyDoc(Document):
structure = {
'foo': {'bar': [unicode]}
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'mydoc'
mydoc['foo']['bar'] = [u'a', u'b', u'c']
mydoc.save()
json = u'{"_id": "mydoc", "foo":{"bar":["a", "b", "c"]}}'
doc_from_json = self.col.MyDoc.from_json(json)
doc_from_json.save()
assert doc_from_json == mydoc
def test_from_json_with_ref(self):
class A(Document):
structure = {
'foo': unicode
}
class B(Document):
structure = {
'bar': int,
'a': A,
}
use_autorefs = True
self.connection.register([A, B])
a = self.col.A()
a['_id'] = u'a'
a['foo'] = u'a'
a.save()
json = '{"_id": "b", "bar":1, "a":{"$id": "a", "$ref": "%s", "$db": "%s"}}' % (self.col.name, self.col.database.name)
print json
b = self.col.B.from_json(json)
b.save()
assert isinstance(b['a'], A), type(b['a'])
def test_from_json_with_ref_in_list(self):
class A(Document):
structure = {
'foo': unicode
}
class B(Document):
structure = {
'bar': int,
'a': [A],
}
use_autorefs = True
self.connection.register([A, B])
a = self.col.A()
a['_id'] = u'a'
a['foo'] = u'a'
a.save()
json = '{"_id": "b", "bar":1, "a":[{"$id": "a", "$ref": "%s", "$db": "%s"}]}' % (self.col.name, self.col.database.name)
b = self.col.B.from_json(json)
b.save()
assert isinstance(b['a'][0], A), type(b['a'][0])
def test_from_json_with_type_as_key(self):
class MyDoc(Document):
structure = {
'foo': {unicode:[unicode]}
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc['_id'] = u'a'
mydoc['foo'][u'bar'] = [u'bla', u'ble']
json = '{"_id": "a", "foo": {"bar":["bla", "ble"]}}'
mydoc_from_json = self.col.MyDoc.from_json(json)
assert mydoc == mydoc_from_json, (mydoc, mydoc_from_json)
def test_from_json_with_null_date(self):
class MyDoc(Document):
structure = {
'date': datetime.datetime,
'date_in_list': [datetime.datetime],
}
self.connection.register([MyDoc])
json = '{"_id": "a", "date": null, "date_in_list":[]}'
mydoc_from_json = self.col.MyDoc.from_json(json)
assert mydoc_from_json['_id'] == 'a'
assert mydoc_from_json['date'] is None
assert mydoc_from_json['date_in_list'] == []
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A generic bot to do data ingestion (batch uploading).
usage: data_ingestion.py -csvdir:local_dir/ -page:config_page
"""
#
# (C) Pywikibot team, 2013
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: eac28be77329999ec47a03a5c3d877027b21fc8a $'
#
import base64
import codecs
import hashlib
import io
import os
import sys
import posixpath
if sys.version_info[0] > 2:
import csv
else:
import unicodecsv as csv
import pywikibot
from pywikibot import pagegenerators
from pywikibot.tools import deprecated, deprecated_args
from scripts import upload
if sys.version_info[0] > 2:
from urllib.parse import urlparse
from urllib.request import urlopen
else:
from urlparse import urlparse
from urllib import urlopen
class Photo(pywikibot.FilePage):
"""Represents a Photo (or other file), with metadata, to be uploaded."""
def __init__(self, URL, metadata, site=None):
"""
Constructor.
@param URL: URL of photo
@type URL: str
@param metadata: metadata about the photo that can be referred to
from the title & template
@type metadata: dict
@param site: target site
@type site: APISite
"""
self.URL = URL
self.metadata = metadata
self.metadata["_url"] = URL
self.metadata["_filename"] = filename = posixpath.split(
urlparse(URL)[2])[1]
self.metadata["_ext"] = ext = filename.split(".")[-1]
if ext == filename:
self.metadata["_ext"] = ext = None
self.contents = None
if not site:
site = pywikibot.Site(u'commons', u'commons')
# default title
super(Photo, self).__init__(site,
self.getTitle('%(_filename)s.%(_ext)s'))
def downloadPhoto(self):
"""
Download the photo and store it in a io.BytesIO object.
TODO: Add exception handling
"""
if not self.contents:
imageFile = urlopen(self.URL).read()
self.contents = io.BytesIO(imageFile)
return self.contents
@deprecated_args(site=None)
def findDuplicateImages(self):
"""
Find duplicates of the photo.
Calculates the SHA1 hash and asks the MediaWiki api
for a list of duplicates.
TODO: Add exception handling, fix site thing
"""
hashObject = hashlib.sha1()
hashObject.update(self.downloadPhoto().getvalue())
return list(
page.title(withNamespace=False) for page in
self.site.allimages(sha1=base64.b16encode(hashObject.digest())))
def getTitle(self, fmt):
"""
Populate format string with %(name)s entries using metadata.
Note: this does not clean the title, so it may be unusable as
a MediaWiki page title, and cause an API exception when used.
@param fmt: format string
@type fmt: unicode
@return: formatted string
@rtype: unicode
"""
# FIXME: normalise the title so it is usable as a MediaWiki title.
return fmt % self.metadata
def getDescription(self, template, extraparams={}):
"""Generate a description for a file."""
params = {}
params.update(self.metadata)
params.update(extraparams)
description = u'{{%s\n' % template
for key in sorted(params.keys()):
value = params[key]
if not key.startswith("_"):
description = description + (
u'|%s=%s' % (key, self._safeTemplateValue(value))) + "\n"
description = description + u'}}'
return description
def _safeTemplateValue(self, value):
"""Replace pipe (|) with {{!}}."""
return value.replace("|", "{{!}}")
def CSVReader(fileobj, urlcolumn, site=None, *args, **kwargs):
"""Yield Photo objects for each row of a CSV file."""
reader = csv.DictReader(fileobj, *args, **kwargs)
for line in reader:
yield Photo(line[urlcolumn], line, site=site)
class DataIngestionBot(pywikibot.Bot):
"""Data ingestion bot."""
def __init__(self, reader, titlefmt, pagefmt,
site=pywikibot.Site(u'commons', u'commons')):
"""Constructor."""
super(DataIngestionBot, self).__init__(generator=reader)
self.reader = reader
self.titlefmt = titlefmt
self.pagefmt = pagefmt
if site:
self.site = site
def treat(self, photo):
"""Process each page."""
duplicates = photo.findDuplicateImages()
if duplicates:
pywikibot.output(u"Skipping duplicate of %r" % duplicates)
return duplicates[0]
title = photo.getTitle(self.titlefmt)
description = photo.getDescription(self.pagefmt)
bot = upload.UploadRobot(url=photo.URL,
description=description,
useFilename=title,
keepFilename=True,
verifyDescription=False,
targetSite=self.site)
bot._contents = photo.downloadPhoto().getvalue()
bot._retrieved = True
bot.run()
return title
@deprecated("treat()")
def doSingle(self):
"""Process one page."""
return self.treat(next(self.reader))
@classmethod
def parseConfigurationPage(cls, configurationPage):
"""
Parse a Page which contains the configuration.
@param configurationPage: page with configuration
@type configurationPage: L{pywikibot.Page}
"""
configuration = {}
# Set a bunch of defaults
configuration['csvDialect'] = u'excel'
configuration['csvDelimiter'] = ';'
configuration['csvEncoding'] = u'Windows-1252' # FIXME: Encoding hell
templates = configurationPage.templatesWithParams()
for (template, params) in templates:
if template.title(withNamespace=False) == u'Data ingestion':
for param in params:
(field, sep, value) = param.partition(u'=')
# Remove leading or trailing spaces
field = field.strip()
value = value.strip()
if not value:
value = None
configuration[field] = value
return configuration
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
csv_dir = None
for arg in local_args:
if arg.startswith('-csvdir:'):
csv_dir = arg[8:]
else:
genFactory.handleArg(arg)
config_generator = genFactory.getCombinedGenerator()
if not config_generator or not csv_dir:
pywikibot.showHelp()
return
for config_page in config_generator:
try:
config_page.get()
except pywikibot.NoPage:
pywikibot.error('%s does not exist' % config_page)
continue
configuration = DataIngestionBot.parseConfigurationPage(config_page)
filename = os.path.join(csv_dir, configuration['csvFile'])
try:
f = codecs.open(filename, 'r', configuration['csvEncoding'])
except (IOError, OSError) as e:
pywikibot.error('%s could not be opened: %s' % (filename, e))
continue
try:
files = CSVReader(f, urlcolumn='url',
site=config_page.site,
dialect=configuration['csvDialect'],
delimiter=str(configuration['csvDelimiter']))
bot = DataIngestionBot(files,
configuration['titleFormat'],
configuration['formattingTemplate'],
site=None)
bot.run()
finally:
f.close()
if __name__ == "__main__":
main()
|
|
import os
import cgi
import logging
from StringIO import StringIO
import struct
import wsgiref.handlers
from datetime import date
from google.appengine.api import images
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class Attachment(db.Model):
path = db.StringProperty()
filename = db.StringProperty()
uploaded_data = db.BlobProperty()
content_type = db.StringProperty()
height = db.IntegerProperty()
width = db.IntegerProperty()
size = db.IntegerProperty()
# FROM http://www.google.com/codesearch?hl=en&q=+getImageInfo+show:RjgT7H1iBVM:V39CptbrGJ8:XcXNaKeZR3k&sa=N&cd=2&ct=rc&cs_p=http://www.zope.org/Products/Zope3/3.0.0final/ZopeX3-3.0.0.tgz&cs_f=ZopeX3-3.0.0/Dependencies/zope.app.file-ZopeX3-3.0.0/zope.app.file/image.py#l88
def extract_image_attributes(self,data):
data = str(data)
size = len(data)
height = -1
width = -1
content_type = ''
# handle GIFs
if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):
# Check to see if content_type is correct
content_type = 'image/gif'
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
# See PNG v1.2 spec (http://www.cdrom.com/pub/png/spec/)
# Bytes 0-7 are below, 4-byte chunk length, then 'IHDR'
# and finally the 4-byte width, height
elif ((size >= 24) and data.startswith('\211PNG\r\n\032\n') and (data[12:16] == 'IHDR')):
content_type = 'image/png'
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
# Maybe this is for an older PNG version.
elif (size >= 16) and data.startswith('\211PNG\r\n\032\n'):
# Check to see if we have the right content type
content_type = 'image/png'
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
# handle JPEGs
elif (size >= 2) and data.startswith('\377\330'):
content_type = 'image/jpeg'
jpeg = StringIO(data)
jpeg.read(2)
b = jpeg.read(1)
try:
while (b and ord(b) != 0xDA):
while (ord(b) != 0xFF): b = jpeg.read(1)
while (ord(b) == 0xFF): b = jpeg.read(1)
if (ord(b) >= 0xC0 and ord(b) <= 0xC3):
jpeg.read(3)
h, w = struct.unpack(">HH", jpeg.read(4))
break
else:
jpeg.read(int(struct.unpack(">H", jpeg.read(2))[0])-2)
b = jpeg.read(1)
width = int(w)
height = int(h)
except struct.error:
pass
except ValueError:
pass
return height,width
def update_uploaded_data(self, data, content_type):
if content_type.startswith('image'):
self.height, self.width = self.extract_image_attributes(data)
if not self.height:
#if we can't determine the image attributes in the original format try converting it to a PNG with a no-op rotate
image = images.Image(data)
image.rotate(0)
self.height, self.width = self.extract_image_attributes(image.execute_transforms(output_encoding=images.PNG))
self.content_type = content_type
self.uploaded_data = data
self.size = len(data)
# I'm attempting to replicate the resize format from http://www.imagemagick.org/Usage/resize/#resize
# at least enough to be usable for avatar or photo gallery thumbnails
def resize(self, format):
preserve_aspect_ratio = True
allow_scale_up = True
if format.endswith("!"):
preserve_aspect_ratio = False
format = format.rstrip("!")
elif format.endswith(">"):
allow_scale_up = False
format = format.rstrip(">")
width,height = format.split('x')
img = images.Image(self.uploaded_data)
if not preserve_aspect_ratio:
requested_aspect = float(height)/float(width)
aspect = float(self.height)/float(self.width)
ratio = requested_aspect / aspect
if (ratio < 1):
left_x = 0.0
right_x = 1.0
top_y = 0.5 - (ratio / 2)
bottom_y = 0.5 + (ratio / 2)
else:
top_y = 0.0
bottom_y = 1.0
left_x = 0.5 - ((1/ratio) / 2)
right_x = 0.5 + ((1/ratio) / 2)
# seem to have issues with small rounding errors for larger images - request for 2000x2000 can end up at 1998x2000
# presumably rounding errors - the 0-1 scale for cropping is weird...
img.crop(left_x=left_x,top_y=top_y,right_x=right_x,bottom_y=bottom_y)
if allow_scale_up or int(width) < self.width or int(height) < self.height:
img.resize(width=int(width), height=int(height))
output_encoding, content_type = images.PNG, 'image/png'
if self.content_type == 'image/jpeg' or self.content_type == 'image/jpg':
output_encoding, content_type = images.JPEG, 'image/jpeg'
img.rotate(0) #no-op so that we don't break if we haven't done any transforms
return img.execute_transforms(output_encoding), content_type
class UploadAttachmentPage(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'new.html')
self.response.out.write(template.render(path, {}))
class AttachmentPage(webapp.RequestHandler):
def get(self):
attachment = None
try:
id = self.request.path.split('/')[-1]
attachment = Attachment.get(db.Key(id))
except:
None
if not attachment:
attachment = db.Query(Attachment).filter("path =", self.request.path[1::]).get()
if not attachment:
# Either "id" wasn't provided, or there was no attachment with that ID
# in the datastore.
self.error(404)
return
today = date.today()
self.response.headers.add_header("Expires", date(year=today.year + 1,month=today.month, day=today.day).ctime())
format = self.request.get("resize")
if format:
memcache_client = memcache.Client()
cache_key = "attachment-" + str(attachment.key()) + "-" + format
result = memcache_client.get(cache_key)
if not result:
data, content_type = attachment.resize(format)
memcache_client.set(cache_key, [data, content_type])
else:
data, content_type = result[0], result[1]
self.response.headers['Content-Type'] = content_type
self.response.out.write(data)
else:
self.response.headers['Content-Type'] = str(attachment.content_type)
self.response.out.write(attachment.uploaded_data)
def post(self):
form = cgi.FieldStorage()
path = form.getvalue('path')
attachment = db.Query(Attachment).filter("path =", path).get()
if not attachment:
attachment = Attachment()
attachment.path = path
uploaded_data = form['uploaded_data']
attachment.filename = uploaded_data.filename
attachment.update_uploaded_data(uploaded_data.value, uploaded_data.type)
attachment.put()
logging.debug('Added attachment with path: ' + attachment.path + ' id: ' + str(attachment.key()))
self.redirect('/attachments/' + str(attachment.key()))
class RedirectPage(webapp.RequestHandler):
def get(self):
self.redirect('/attachments/new')
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication(
[('/attachments/new', UploadAttachmentPage),
('/attachments.*', AttachmentPage),
('/.*', RedirectPage)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for heteroscedastic.py."""
from absl.testing import parameterized
import edward2 as ed
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_MCSoftmaxDense_logit_noise_normal_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'MCSoftmaxDense'
}, {
'testcase_name': '_MCSoftmaxDense_logit_noise_logistic_10',
'logit_noise': tfp.distributions.Logistic,
'num_classes': 10,
'model_type': 'MCSoftmaxDense'
}, {
'testcase_name': '_MCSoftmaxDense_logit_noise_gumbel_10',
'logit_noise': tfp.distributions.Gumbel,
'num_classes': 10,
'model_type': 'MCSoftmaxDense'
},
{
'testcase_name': '_MCSoftmaxDenseFA_logit_noise_normal_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'MCSoftmaxDenseFA'
},
{
'testcase_name': '_MCSigmoidDenseFA_logit_noise_normal_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'MCSigmoidDenseFA'
},
{
'testcase_name': '_MCSoftmaxDenseFAPE_logit_noise_normal_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'MCSoftmaxDenseFAPE'
},
{
'testcase_name': '_MCSigmoidDenseFAPE_logit_noise_normal_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'MCSigmoidDenseFAPE'
},
{
'testcase_name': '_MCSigmoidDenseFA_logit_noise_normal_2',
'logit_noise': tfp.distributions.Normal,
'num_classes': 2,
'model_type': 'MCSigmoidDenseFA'
}, {
'testcase_name': '_MCSigmoidDenseFA_logit_noise_logistic_2',
'logit_noise': tfp.distributions.Logistic,
'num_classes': 2,
'model_type': 'MCSigmoidDenseFA'
}, {
'testcase_name': '_MCSigmoidDenseFA_logit_noise_gumbel_2',
'logit_noise': tfp.distributions.Gumbel,
'num_classes': 2,
'model_type': 'MCSigmoidDenseFA'
},
{
'testcase_name': '_Exact_logit_noise_normal_2',
'logit_noise': tfp.distributions.Normal,
'num_classes': 2,
'model_type': 'Exact'
}, {
'testcase_name': '_Exact_logit_noise_logistic_2',
'logit_noise': tfp.distributions.Logistic,
'num_classes': 2,
'model_type': 'Exact'
},
{
'testcase_name': '_EnsembleGibbsCE_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'EnsembleGibbsCE'
}, {
'testcase_name': '_EnsembleGibbsCE_2',
'logit_noise': tfp.distributions.Normal,
'num_classes': 2,
'model_type': 'EnsembleGibbsCE'
}, {
'testcase_name': '_EnsembleEnsembleCE_10',
'logit_noise': tfp.distributions.Normal,
'num_classes': 10,
'model_type': 'EnsembleEnsembleCE'
}, {
'testcase_name': '_EnsembleEnsembleCE_2',
'logit_noise': tfp.distributions.Normal,
'num_classes': 2,
'model_type': 'EnsembleEnsembleCE'
},)
class Classifier(tf.keras.Model):
"""Wrapper for classifiers defined below.
Handles different architectures and differences between eager/graph execution.
"""
def __init__(self, model_type='MCSoftmaxDense', num_classes=2,
logit_noise=tfp.distributions.Normal,
**kwargs):
super().__init__()
if model_type == 'MCSoftmaxDense':
self.classifier = DenseClassifier(num_classes, **kwargs)
elif model_type == 'MCSoftmaxDenseFA':
self.classifier = DenseFAClassifier(
num_classes, num_factors=max(num_classes//2, 2), **kwargs)
elif model_type == 'MCSigmoidDenseFA':
self.classifier = SigmoidDenseFAClassifier(
num_classes,
num_factors=max(num_classes//2, 2) if num_classes > 2 else 0,
**kwargs)
elif model_type == 'MCSoftmaxDenseFAPE':
self.classifier = DenseFAClassifier(
num_classes, num_factors=max(num_classes//2, 2),
parameter_efficient=True, **kwargs)
elif model_type == 'MCSigmoidDenseFAPE':
self.classifier = SigmoidDenseFAClassifier(
num_classes, num_factors=max(num_classes//2, 2),
parameter_efficient=True, **kwargs)
elif model_type == 'Exact':
self.classifier = ExactSigmoidDenseClassifier(num_classes, logit_noise)
elif model_type == 'EnsembleGibbsCE':
self.classifier = EnsembleClassifier(
num_classes, averaging='gibbs_cross_ent')
elif model_type == 'EnsembleEnsembleCE':
self.classifier = EnsembleClassifier(
num_classes, averaging='ensemble_cross_ent')
def call(self, inputs, **kwargs):
if tf.executing_eagerly():
return self.classifier(inputs, **kwargs)
else:
# TODO(basilm): Find a way around neeed for variable_scope - using
# tf.enable_resource_variables() doesn't seem to work.
with tf.compat.v1.variable_scope('scope', use_resource=True):
return self.classifier(inputs, **kwargs)
class DenseClassifier(tf.keras.Model):
"""Feedforward neural network with MCSoftmaxDense output layer."""
def __init__(self, num_classes, logit_noise=tfp.distributions.Normal,
temperature=1.0, train_mc_samples=1000, test_mc_samples=1000,
compute_pred_variance=False):
"""Creates an instance of DenseClassifier.
A feedforward network which computes the predictive and log predictive
distribution.
Args:
num_classes: Integer. Number of classes for classification task.
logit_noise: tfp.distributions instance. Must be a location-scale
distribution. Valid values: tfp.distributions.Normal,
tfp.distributions.Logistic, tfp.distributions.Gumbel.
temperature: Float or scalar `Tensor` representing the softmax
temperature.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
compute_pred_variance: Boolean. Whether to estimate the predictive
variance.
Returns:
DenseClassifier instance.
"""
super(DenseClassifier, self).__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
self.output_layer = ed.layers.MCSoftmaxDense(
num_classes=num_classes, logit_noise=logit_noise,
temperature=temperature, train_mc_samples=train_mc_samples,
test_mc_samples=test_mc_samples,
compute_pred_variance=compute_pred_variance)
def call(self, inputs, training=True, seed=None):
"""Computes the forward pass through the feedforward neural network.
Args:
inputs: `Tensor`. Input tensor.
training: Boolean. Whether we are training or not.
seed: Python integer for seeding the random number generator.
Returns:
A tuple of `Tensors` (probs, log_probs, predictive_variance).
"""
hidden_x = self.hidden_layer(inputs)
return self.output_layer(hidden_x, training=training, seed=seed)
class DenseFAClassifier(tf.keras.Model):
"""Feedforward neural network with MCSoftmaxDenseFA output layer."""
def __init__(self, num_classes, num_factors,
temperature=1.0, parameter_efficient=False,
train_mc_samples=1000, test_mc_samples=1000,
compute_pred_variance=False):
"""Creates an instance of DenseFAClassifier.
A feedforward network which computes the predictive and log predictive
distribution.
Args:
num_classes: Integer. Number of classes for classification task.
num_factors: Integer. Number of factors to use for factor analysis approx.
temperature: Float or scalar `Tensor` representing the softmax
temperature.
parameter_efficient: Boolean. Whether to use the parameter efficient
version of the method.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
compute_pred_variance: Boolean. Whether to estimate the predictive
variance.
Returns:
DenseFAClassifier instance.
"""
super(DenseFAClassifier, self).__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
self.output_layer = ed.layers.MCSoftmaxDenseFA(
num_classes=num_classes, num_factors=num_factors,
temperature=temperature, parameter_efficient=parameter_efficient,
train_mc_samples=train_mc_samples,
test_mc_samples=test_mc_samples,
compute_pred_variance=compute_pred_variance)
def call(self, inputs, training=True, seed=None):
"""Computes the forward pass through the feedforward neural network.
Args:
inputs: `Tensor`. Input tensor.
training: Boolean. Whether we are training or not.
seed: Python integer for seeding the random number generator.
Returns:
A tuple of `Tensors` (probs, log_probs, predictive_variance).
"""
hidden_x = self.hidden_layer(inputs)
return self.output_layer(hidden_x, training=training, seed=seed)
class SigmoidDenseFAClassifier(tf.keras.Model):
"""Feedforward neural network with MCSigmoidDenseFA output layer."""
def __init__(self, num_classes, num_factors,
temperature=1.0, parameter_efficient=False,
train_mc_samples=1000, test_mc_samples=1000,
compute_pred_variance=False):
"""Creates an instance of SigmoidDenseFAClassifier.
A feedforward network which computes the predictive and log predictive
distribution.
Args:
num_classes: Integer. Number of classes for classification task.
num_factors: Integer. Number of factors to use for factor analysis approx.
temperature: Float or scalar `Tensor` representing the softmax
temperature.
parameter_efficient: Boolean. Whether to use the parameter efficient
version of the method.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
compute_pred_variance: Boolean. Whether to estimate the predictive
variance.
Returns:
SigmoidDenseFAClassifier instance.
"""
super(SigmoidDenseFAClassifier, self).__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
self.output_layer = ed.layers.MCSigmoidDenseFA(
1 if num_classes == 2 else num_classes, num_factors=num_factors,
temperature=temperature, parameter_efficient=parameter_efficient,
train_mc_samples=train_mc_samples,
test_mc_samples=test_mc_samples,
compute_pred_variance=compute_pred_variance)
def call(self, inputs, training=True, seed=None):
"""Computes the forward pass through the feedforward neural network.
Args:
inputs: `Tensor`. Input tensor.
training: Boolean. Whether we are training or not.
seed: Python integer for seeding the random number generator.
Returns:
A tuple of `Tensors` (probs, log_probs, predictive_variance).
"""
hidden_x = self.hidden_layer(inputs)
return self.output_layer(hidden_x, training=training, seed=seed)
class ExactSigmoidDenseClassifier(tf.keras.Model):
"""Feedforward neural network with ExactSigmoidDense output layer."""
def __init__(self, num_classes, logit_noise):
"""Creates an instance of ExactSigmoidDenseClassifier.
A feedforward network which computes the predictive and log predictive
distribution.
Args:
num_classes: Integer. Number of classes for classification task.
logit_noise: tfp.distributions instance. Must be either
tfp.distributions.Normal or tfp.distributions.Logistic.
Returns:
ExactSigmoidDenseClassifier instance.
"""
super(ExactSigmoidDenseClassifier, self).__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
self.output_layer = ed.layers.ExactSigmoidDense(
1 if num_classes == 2 else num_classes, logit_noise=logit_noise)
def call(self, inputs, training=True, seed=None):
"""Computes the forward pass through the feedforward neural network.
Args:
inputs: `Tensor`. Input tensor.
training: Boolean. Whether we are training or not.
seed: Python integer for seeding the random number generator.
Returns:
A tuple of `Tensors` (probs, log_probs, predictive_variance).
"""
hidden_x = self.hidden_layer(inputs)
return self.output_layer(hidden_x, training=training)
class EnsembleClassifier(tf.keras.Model):
"""Feedforward neural network with Ensemble output layer."""
def __init__(self, num_classes, averaging, ensemble_weighting=(0.8, 0.2)):
"""Creates an instance of EnsembleClassifier.
A feedforward network which computes the predictive and log predictive
distribution.
Args:
num_classes: Integer. Number of classes for classification task.
averaging: String `ensemble_cross_ent` or `gibbs_cross_ent`. For
`ensemble_cross_ent`: loss = - log (sum_i weighting[i] * p_i)
i.e. ensemble members are trained in the knowledge they will be
ensembled. For `gibbs_cross_ent`:
loss = - sum_i weighting[i] * log (p_i), this can help promote
diversity.
ensemble_weighting: Tuple of len(layers) representing a probability
distribution over layers.
Returns:
EnsembleClassifier instance.
"""
super(EnsembleClassifier, self).__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
if num_classes == 2:
layer_1 = ed.layers.MCSigmoidDenseFA(1)
layer_2 = ed.layers.ExactSigmoidDense(1)
else:
layer_1 = ed.layers.MCSoftmaxDense(num_classes=num_classes)
layer_2 = ed.layers.MCSoftmaxDenseFA(num_classes=num_classes,
num_factors=num_classes//2)
self.output_layer = ed.layers.EnsembleHeteroscedasticOutputs(
num_classes, (layer_1, layer_2),
ensemble_weighting=ensemble_weighting, averaging=averaging)
def call(self, inputs, training=True):
"""Computes the forward pass through the feedforward neural network.
Args:
inputs: `Tensor`. Input tensor.
training: Boolean. Whether we are training or not.
Returns:
A tuple of `Tensors` (probs, log_probs, predictive_variance).
"""
hidden_x = self.hidden_layer(inputs)
return self.output_layer(hidden_x, training=training)
class HeteroscedasticLibTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
if not tf.executing_eagerly():
tf.compat.v1.enable_resource_variables()
super().setUp()
# Helpers for HeteroscedasticLibTest.
def create_dataset(self, num_classes):
x = np.asarray([[1.0, 2.0], [0.5, 1.5], [0.2, 0.15], [-0.3, 0.0]])
y = np.asarray([[i % num_classes] for i in range(4)])
return tf.convert_to_tensor(x), tf.convert_to_tensor(y)
@test_cases()
def test_layer_construction(self, logit_noise, num_classes, model_type):
if model_type == 'MCSoftmaxDense':
output_layer = ed.layers.MCSoftmaxDense(num_classes=num_classes,
logit_noise=logit_noise)
self.assertIsNotNone(output_layer)
@test_cases()
def test_model_construction(self, logit_noise, num_classes, model_type):
if model_type == 'MCSoftmaxDense':
classifier = DenseClassifier(num_classes, logit_noise)
elif model_type == 'EnsembleEnsembleCE':
classifier = EnsembleClassifier(num_classes, 'ensemble_cross_ent')
elif model_type == 'EnsembleGibbsCE':
classifier = EnsembleClassifier(num_classes, 'gibbs_cross_ent')
else:
return
self.assertIsNotNone(classifier)
def test_ensemble_weighting(self):
classifier = EnsembleClassifier(
2, 'ensemble_cross_ent', ensemble_weighting=(0.5, 0.5))
self.assertIsNotNone(classifier)
classifier = EnsembleClassifier(
2, 'ensemble_cross_ent', ensemble_weighting=(0.8, 0.2))
self.assertIsNotNone(classifier)
with self.assertRaises(ValueError):
classifier = EnsembleClassifier(
2, 'ensemble_cross_ent', ensemble_weighting=(0.4, 0.5))
with self.assertRaises(ValueError):
classifier = EnsembleClassifier(
2, 'ensemble_cross_ent', ensemble_weighting=(1.5, -0.5))
@test_cases()
def test_model_outputs(self, logit_noise, num_classes, model_type):
x, _ = self.create_dataset(num_classes)
classifier = Classifier(model_type, num_classes, logit_noise)
res = classifier(x)
probs = res[2]
log_probs = res[1]
self.assertIsNotNone(probs)
self.assertIsNotNone(log_probs)
self.initialise()
if num_classes == 2 or 'Sigmoid' in model_type:
for prob in self.evaluate(probs).flatten():
self.assertAlmostEqual(prob + (1.0 - prob), 1.0, 2)
else:
total_probs = tf.reduce_sum(probs, axis=-1)
for prob in self.evaluate(total_probs).flatten():
self.assertAlmostEqual(prob, 1.0, 2)
res = classifier(x, training=False)
probs = res[2]
log_probs = res[1]
self.assertIsNotNone(probs)
self.assertIsNotNone(log_probs)
if num_classes == 2 or 'Sigmoid' in model_type:
for prob in self.evaluate(probs).flatten():
self.assertAlmostEqual(prob + (1.0 - prob), 1.0, 2)
else:
total_probs = tf.reduce_sum(probs, axis=-1)
for prob in self.evaluate(total_probs).flatten():
self.assertAlmostEqual(prob, 1.0, 2)
@test_cases()
def test_train_step(self, logit_noise, num_classes, model_type):
x, y = self.create_dataset(num_classes)
classifier = Classifier(model_type, num_classes, logit_noise)
if num_classes == 2:
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
if tf.executing_eagerly():
optimizer = tf.keras.optimizers.Adam()
def train_step(inputs, labels, model):
"""Defines a single training step: Update weights based on one batch."""
with tf.GradientTape() as tape:
log_preds = model(inputs)[1]
loss_value = loss_fn(labels, log_preds)
grads = tape.gradient(loss_value, model.trainable_weights)
grads, _ = tf.clip_by_global_norm(grads, 2.5)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss_value
loss_value = train_step(x, y, classifier).numpy()
else:
optimizer = tf.compat.v1.train.AdamOptimizer()
log_preds = classifier(x)[1]
loss_value = loss_fn(y, log_preds)
train_op = optimizer.minimize(loss_value)
self.initialise()
loss_value, _ = self.evaluate([loss_value, train_op])
self.assertGreater(loss_value, 0)
@test_cases()
def test_predictive_variance(self, logit_noise, num_classes, model_type):
if model_type == 'Exact' or model_type.startswith('Ensemble'):
return
x, _ = self.create_dataset(num_classes)
classifier = Classifier(model_type, num_classes, logit_noise,
compute_pred_variance=True)
pred_variance = classifier(x)[3]
self.assertIsNotNone(pred_variance)
self.initialise()
pred_variance = self.evaluate(pred_variance)
for per_class_variance in pred_variance.flatten():
self.assertGreater(per_class_variance, 0)
def initialise(self):
if not tf.executing_eagerly():
self.evaluate([tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer()])
def segmentation_test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_classes_10_factors_5',
'num_classes': 10,
'num_factors': 5,
}, {
'testcase_name': '_classes_10_factors_0',
'num_classes': 10,
'num_factors': 0,
},)
class SegmentationClassifier(tf.keras.Model):
"""Segmentation classifier."""
def __init__(self, num_classes, num_factors):
super().__init__()
self.hidden_layer = tf.keras.layers.Dense(16)
self.output_layer = ed.layers.MCSoftmaxDenseFASegmentation(
num_classes, num_factors)
def call(self, inputs, training=True):
if tf.executing_eagerly():
hidden = self.hidden_layer(inputs, training=training)
return self.output_layer(hidden, training=training)
else:
with tf.compat.v1.variable_scope('scope', use_resource=True):
hidden = self.hidden_layer(inputs, training=training)
return self.output_layer(hidden, training=training)
class SegLayerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
if not tf.executing_eagerly():
tf.compat.v1.enable_resource_variables()
super().setUp()
# Helpers for SegLayerTest.
def create_dataset(self, num_classes):
x = np.random.normal(size=(4, 32, 32, 16))
y = np.random.choice(num_classes, size=(4, 32, 32))
return tf.convert_to_tensor(x), tf.convert_to_tensor(y)
@segmentation_test_cases()
def test_layer_construction(self, num_classes, num_factors):
output_layer = ed.layers.MCSoftmaxDenseFASegmentation(
num_classes, num_factors)
self.assertIsNotNone(output_layer)
@segmentation_test_cases()
def test_model_construction(self, num_classes, num_factors):
classifier = SegmentationClassifier(num_classes, num_factors)
self.assertIsNotNone(classifier)
@segmentation_test_cases()
def test_model_outputs(self, num_classes, num_factors):
x, _ = self.create_dataset(num_classes)
classifier = SegmentationClassifier(num_classes, num_factors)
res = classifier(x)
probs = res[2]
log_probs = res[1]
self.assertIsNotNone(probs)
self.assertIsNotNone(log_probs)
self.initialise()
total_probs = tf.reduce_sum(probs, axis=-1)
for prob in self.evaluate(total_probs).flatten():
self.assertAlmostEqual(prob, 1.0, 2)
res = classifier(x, training=False)
probs = res[2]
log_probs = res[1]
self.assertIsNotNone(probs)
self.assertIsNotNone(log_probs)
total_probs = tf.reduce_sum(probs, axis=-1)
for prob in self.evaluate(total_probs).flatten():
self.assertAlmostEqual(prob, 1.0, 2)
@segmentation_test_cases()
def test_train_step(self, num_classes, num_factors):
x, y = self.create_dataset(num_classes)
classifier = SegmentationClassifier(num_classes, num_factors)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
if tf.executing_eagerly():
optimizer = tf.keras.optimizers.Adam()
def train_step(inputs, labels, model):
"""Defines a single training step: Update weights based on one batch."""
with tf.GradientTape() as tape:
log_preds = model(inputs)[1]
loss_value = loss_fn(labels, log_preds)
grads = tape.gradient(loss_value, model.trainable_weights)
grads, _ = tf.clip_by_global_norm(grads, 2.5)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss_value
loss_value = train_step(x, y, classifier).numpy()
else:
optimizer = tf.compat.v1.train.AdamOptimizer()
log_preds = classifier(x)[1]
loss_value = loss_fn(y, log_preds)
train_op = optimizer.minimize(loss_value)
self.initialise()
loss_value, _ = self.evaluate([loss_value, train_op])
self.assertGreater(loss_value, 0)
def initialise(self):
if not tf.executing_eagerly():
self.evaluate([tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer()])
if __name__ == '__main__':
tf.test.main()
|
|
import colander
from daybed import schemas
from daybed.tests.support import unittest
class PointFieldTests(unittest.TestCase):
def setUp(self):
self.schema = schemas.PointField.definition()
definition = self.schema.deserialize(
{'name': 'location',
'type': 'point'})
self.validator = schemas.PointField.validation(**definition)
def test_deserialization_is_idempotent(self):
self.assertEquals([0.4, 45.0],
self.validator.deserialize([0.4, 45.0]))
def test_coordinates_are_deserialized_as_float_or_integer(self):
self.assertEquals([0.4, 45.0],
self.validator.deserialize('[0.4, 45.0]'))
self.assertEquals([0, 45],
self.validator.deserialize('[0, 45]'))
def test_coordinates_can_have_several_dimensions(self):
self.assertEquals([0.4, 45.0, 1280],
self.validator.deserialize('[0.4, 45.0, 1280]'))
self.assertEquals([0.4, 45.0, 1280, 2048],
self.validator.deserialize(
'[0.4, 45.0, 1280, 2048]'))
def test_coordinates_cannot_be_null_if_required(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, colander.null)
def test_coordinates_can_be_null_if_not_required(self):
definition = self.schema.deserialize(
{'name': 'location',
'type': 'point',
'required': 'false'})
validator = schemas.PointField.validation(**definition)
self.assertEquals(colander.null,
validator.deserialize(colander.null))
def test_coordinates_must_be_valid_json(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[0.4,,45.0]')
def test_coordinates_cannot_be_invalid_data(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[0.4]')
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[[0.4, 45.0]]')
self.assertRaises(colander.Invalid,
self.validator.deserialize, '"0.4, 45.0"')
self.assertRaises(colander.Invalid,
self.validator.deserialize, '["a", "b"]')
def test_coordinates_cannot_exceed_earth(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[181.0, 91.0]')
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[-181.0, -91.0]')
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[120.0, -91.0]')
class EuclideanPointFieldTests(unittest.TestCase):
def test_point_euclidean(self):
schema = schemas.PointField.definition()
definition = schema.deserialize(
{'name': 'location',
'type': 'point',
'gps': False})
validator = schemas.PointField.validation(**definition)
self.assertEquals([181.0, 91.0],
validator.deserialize('[181.0, 91.0]'))
class LineFieldTests(unittest.TestCase):
def setUp(self):
self.schema = schemas.LineField.definition()
definition = self.schema.deserialize(
{'name': 'along',
'type': 'line'})
self.validator = schemas.LineField.validation(**definition)
def test_lines_have_at_least_two_points(self):
self.assertEquals([[0.4, 45.0], [0.6, 65.0]],
self.validator.deserialize(
'[[0.4, 45.0], [0.6, 65.0]]'))
self.assertEquals([[0.4, 45.0], [0.6, 65.0], [0.8, 85.0]],
self.validator.deserialize(
'[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0]]'))
def test_lines_cannot_be_null_if_required(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, colander.null)
def test_lines_can_be_null_if_not_required(self):
definition = self.schema.deserialize(
{'name': 'along',
'type': 'line',
'required': 'false'})
validator = schemas.LineField.validation(**definition)
self.assertEquals(colander.null,
validator.deserialize(colander.null))
def test_lines_must_have_at_least_two_points(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[[0.4, 45.0]]')
def test_lines_must_be_a_list_of_coordinates(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[0.4, 45.0]')
def test_lines_must_be_valid_json(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[[4,4],[4,,5]]')
class PolygonFieldTests(unittest.TestCase):
def setUp(self):
schema = schemas.PolygonField.definition()
definition = schema.deserialize(
{'name': 'area',
'type': 'polygon'})
self.validator = schemas.PolygonField.validation(**definition)
def test_polygones_are_linear_ring(self):
self.assertEquals(
[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0], [0.4, 45.0]]],
self.validator.deserialize(
'[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0], [0.4, 45.0]]]'))
def test_polygones_are_automatically_closed(self):
self.assertEquals(
[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0], [0.4, 45.0]]],
self.validator.deserialize(
'[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0]]]'))
def test_polygones_can_have_holes(self):
self.assertEquals(
[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0], [0.4, 45.0]],
[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0], [0.4, 45.0]]],
self.validator.deserialize(
"""[[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0]],
[[0.4, 45.0], [0.6, 65.0], [0.8, 85.0]]]"""))
def test_polygones_must_have_enough_points(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize, '[[[0.4, 45.0]]]')
self.assertRaises(colander.Invalid,
self.validator.deserialize,
'[[[0.4, 45.0], [0.6, 65.0]]]')
class GeoJSONFieldTests(unittest.TestCase):
def setUp(self):
schema = schemas.GeoJSONField.definition()
definition = schema.deserialize(
{'name': 'webmap',
'type': 'geojson'})
self.validator = schemas.GeoJSONField.validation(**definition)
def test_geojson_can_be_a_point(self):
deserialized = self.validator.deserialize("""
{"type": "Point",
"coordinates": [100.0, 0.0] }""")
self.assertDictEqual({"type": "Point",
"coordinates": [100.0, 0.0]}, deserialized)
def test_geojson_can_be_a_linestring(self):
deserialized = self.validator.deserialize("""
{"type": "LineString",
"coordinates": [[1, 2], [2, 3]] }""")
self.assertDictEqual({"type": "LineString",
"coordinates": [[1, 2], [2, 3]]}, deserialized)
def test_geojson_can_be_a_polygon(self):
deserialized = self.validator.deserialize("""
{"type": "Polygon",
"coordinates": [[[1, 2], [2, 3], [1, 2]]] }""")
self.assertDictEqual({"type": "Polygon",
"coordinates": [[[1, 2], [2, 3], [1, 2]]]},
deserialized)
def test_geojson_can_be_a_collection(self):
deserialized = self.validator.deserialize("""
{"type": "GeometryCollection",
"geometries": [{"type": "Point",
"coordinates": [100.0, 0.0] }]}""")
self.assertDictEqual({"type": "Point",
"coordinates": [100.0, 0.0]},
deserialized['geometries'][0])
def test_geojson_must_have_type(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize,
'{"coordinates": [1, 2] }')
self.assertRaises(colander.Invalid,
self.validator.deserialize,
'{"type": null, "coordinates": [1, 2] }')
def test_geojson_cannot_have_unknown_type(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize,
'{"type": "Triangle", "coordinates": [1, 2] }')
def test_geojson_collection_items_cannot_have_unknown_type(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize,
"""{"type": "GeometryCollection",
"geometries": [{"type": "Triangle",
"coordinates": [1, 0] }]}""")
def test_geojson_collection_must_have_geometries(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize,
"""{"type": "GeometryCollection"}""")
self.assertRaises(colander.Invalid,
self.validator.deserialize,
"""{"type": "GeometryCollection",
"geometries": true}""")
def test_geojson_collection_can_be_empty(self):
deserialized = self.validator.deserialize("""
{"type": "GeometryCollection",
"geometries": []}""")
self.assertDictEqual({"type": "GeometryCollection",
"geometries": []},
deserialized)
def test_geojson_point_must_have_valid_coordinates(self):
self.assertRaises(colander.Invalid,
self.validator.deserialize,
"""{"type": "Point"}""")
self.assertRaises(colander.Invalid,
self.validator.deserialize,
"""{"type": "Point",
"coordinates": ["a", "b"]}""")
def test_geojson_can_be_multipoints(self):
deserialized = self.validator.deserialize("""
{"type": "MultiPoint",
"coordinates": [[1.0, 0.0], [2.0, 1.0]]}""")
self.assertDictEqual({"type": "MultiPoint",
"coordinates": [[1.0, 0.0], [2.0, 1.0]]},
deserialized)
|
|
# A small helper class to house functions needed by KeplerMapper.visualize
import numpy as np
from sklearn import preprocessing
import json
from collections import defaultdict
from ast import literal_eval
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
palette = [
"#0500ff",
"#0300ff",
"#0100ff",
"#0002ff",
"#0022ff",
"#0044ff",
"#0064ff",
"#0084ff",
"#00a4ff",
"#00a4ff",
"#00c4ff",
"#00e4ff",
"#00ffd0",
"#00ff83",
"#00ff36",
"#17ff00",
"#65ff00",
"#b0ff00",
"#fdff00",
"#FFf000",
"#FFdc00",
"#FFc800",
"#FFb400",
"#FFa000",
"#FF8c00",
"#FF7800",
"#FF6400",
"#FF5000",
"#FF3c00",
"#FF2800",
"#FF1400",
"#FF0000",
]
def _colors_to_rgb(colorscale):
""" Ensure that the color scale is formatted in rgb strings.
If the colorscale is a hex string, then convert to rgb.
"""
if colorscale[0][1][0] == "#":
plotly_colors = np.array(colorscale)[:, 1].tolist()
for k, hexcode in enumerate(plotly_colors):
hexcode = hexcode.lstrip("#")
hex_len = len(hexcode)
step = hex_len // 3
colorscale[k][1] = "rgb" + str(
tuple(int(hexcode[j : j + step], 16) for j in range(0, hex_len, step))
)
return colorscale
def _to_html_format(st):
return st.replace("\n", "<br>")
def _map_val2color(val, vmin, vmax, colorscale=None):
""" Maps a value val in [vmin, vmax] to the corresponding color in
the colorscale
returns the rgb color code of that color
"""
colorscale = colorscale or colorscale_default
if vmin >= vmax:
raise ValueError("vmin should be < vmax")
scale = list(map(float, np.array(colorscale)[:, 0]))
colors = np.array(colorscale)[:, 1]
colors_01 = (
np.array(list(map(literal_eval, [color[3:] for color in colors]))) / 255.0
)
v = (val - vmin) / float((vmax - vmin)) # val is mapped to v in[0,1]
idx = 0
# sequential search for the two consecutive indices idx, idx+1 such that
# v belongs to the interval [scale[idx], scale[idx+1]
while v > scale[idx + 1]:
idx += 1
left_scale_val = scale[idx]
right_scale_val = scale[idx + 1]
vv = (v - left_scale_val) / (right_scale_val - left_scale_val)
# get the triplet of three values in [0,1] that represent the rgb color
# corresponding to val
val_color01 = colors_01[idx] + vv * (colors_01[idx + 1] - colors_01[idx])
val_color_0255 = list(map(np.uint8, 255 * val_color01))
return "rgb" + str(tuple(val_color_0255))
def init_color_function(graph, color_function=None):
# If no color_function provided we color by row order in data set
# Reshaping to 2-D array is required for sklearn 0.19
n_samples = np.max([i for s in graph["nodes"].values() for i in s]) + 1
if color_function is None:
color_function = np.arange(n_samples).reshape(-1, 1)
else:
color_function = color_function.reshape(-1, 1)
color_function = color_function.astype(np.float64)
# MinMax Scaling to be friendly to non-scaled input.
scaler = preprocessing.MinMaxScaler()
color_function = scaler.fit_transform(color_function).ravel()
# "Scaler might have floating point issues, 1.0000...0002". Force max and min
color_function[color_function > 1] = 1
color_function[color_function < 0] = 0
return color_function
def format_meta(graph, custom_meta=None, color_function_name=None):
n = [l for l in graph["nodes"].values()]
n_unique = len(set([i for s in n for i in s]))
if custom_meta is None:
custom_meta = graph["meta_data"]
if "clusterer" in custom_meta.keys():
clusterer = custom_meta["clusterer"]
custom_meta["clusterer"] = _to_html_format(clusterer)
if "projection" in custom_meta.keys():
projection = custom_meta["projection"]
custom_meta["projection"] = _to_html_format(projection)
if color_function_name is not None:
custom_meta["color_function"] = color_function_name
mapper_summary = {
"custom_meta": custom_meta,
"n_nodes": len(graph["nodes"]),
"n_edges": sum([len(l) for l in graph["links"].values()]),
"n_total": sum([len(l) for l in graph["nodes"].values()]),
"n_unique": n_unique,
}
return mapper_summary
def format_mapper_data(
graph, color_function, X, X_names, lens, lens_names, custom_tooltips, env, nbins=10
):
# import pdb; pdb.set_trace()
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(graph["nodes"].items()):
node_id_to_num[node_id] = i
c = _color_function(member_ids, color_function)
t = _type_node()
s = _size_node(member_ids)
tt = _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_id,
nbins,
)
n = {
"id": "",
"name": node_id,
"color": c,
"type": _type_node(),
"size": s,
"tooltip": tt,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(graph["links"].items()):
for linked_node_id in linked_node_ids:
json_dict["links"].append(
{
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
"width": _size_link_width(graph, node_id, linked_node_id),
}
)
return json_dict
def build_histogram(data, colorscale=None, nbins=10):
""" Build histogram of data based on values of color_function
"""
if colorscale is None:
colorscale = colorscale_default
# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions
colorscale = _colors_to_rgb(colorscale)
h_min, h_max = 0, 1
hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins)
bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1)
histogram = []
max_bucket_value = max(hist)
sum_bucket_value = sum(hist)
for bar, mid in zip(hist, bin_mids):
height = np.floor(((bar / max_bucket_value) * 100) + 0.5)
perc = round((bar / sum_bucket_value) * 100.0, 1)
color = _map_val2color(mid, 0.0, 1.0, colorscale)
histogram.append({"height": height, "perc": perc, "color": color})
return histogram
def graph_data_distribution(graph, color_function, colorscale, nbins=10):
node_averages = []
for node_id, member_ids in graph["nodes"].items():
member_colors = color_function[member_ids]
node_averages.append(np.mean(member_colors))
histogram = build_histogram(node_averages, colorscale=colorscale, nbins=nbins)
return histogram
def _format_cluster_statistics(member_ids, X, X_names):
# TODO: Cache X_mean and X_std for all clusters.
# TODO: replace long tuples with named tuples.
# TODO: Name all the single letter variables.
# TODO: remove duplication between above_stats and below_stats
# TODO: Should we only show variables that are much above or below the mean?
cluster_data = {"above": [], "below": [], "size": len(member_ids)}
cluster_stats = ""
if X is not None:
# List vs. numpy handling: cast to numpy array
if isinstance(X_names, list):
X_names = np.array(X_names)
# Defaults when providing no X_names
if X_names.shape[0] == 0:
X_names = np.array(["f_%s" % (i) for i in range(X.shape[1])])
cluster_X_mean = np.mean(X[member_ids], axis=0)
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
above_mean = cluster_X_mean > X_mean
std_m = np.sqrt((cluster_X_mean - X_mean) ** 2) / X_std
stat_zip = list(
zip(
std_m,
X_names,
np.mean(X, axis=0),
cluster_X_mean,
above_mean,
np.std(X, axis=0),
)
)
stats = sorted(stat_zip, reverse=True)
above_stats = [a for a in stats if bool(a[4]) is True]
below_stats = [a for a in stats if bool(a[4]) is False]
if len(above_stats) > 0:
for s, f, i, c, a, v in above_stats[:5]:
cluster_data["above"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
if len(below_stats) > 0:
for s, f, i, c, a, v in below_stats[:5]:
cluster_data["below"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
return cluster_data
def _format_projection_statistics(member_ids, lens, lens_names):
projection_data = []
if lens is not None:
if isinstance(lens_names, list):
lens_names = np.array(lens_names)
# Create defaults when providing no lens_names
if lens_names.shape[0] == 0:
lens_names = np.array(["p_%s" % (i) for i in range(lens.shape[1])])
means_v = np.mean(lens[member_ids], axis=0)
maxs_v = np.max(lens[member_ids], axis=0)
mins_v = np.min(lens[member_ids], axis=0)
for name, mean_v, max_v, min_v in zip(lens_names, means_v, maxs_v, mins_v):
projection_data.append(
{
"name": name,
"mean": round(mean_v, 3),
"max": round(max_v, 3),
"min": round(min_v, 3),
}
)
return projection_data
def _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins=10,
):
projection_stats = _format_projection_statistics(member_ids, lens, lens_names)
cluster_stats = _format_cluster_statistics(member_ids, X, X_names)
member_histogram = build_histogram(
color_function[member_ids], colorscale=colorscale, nbins=nbins
)
return projection_stats, cluster_stats, member_histogram
def _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
nbins,
):
# TODO: Allow customization in the form of aggregate per node and per entry in node.
# TODO: Allow users to turn off tooltip completely.
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
# list will render better than numpy arrays
custom_tooltips = list(custom_tooltips)
colorscale = colorscale_default
projection_stats, cluster_stats, histogram = _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins,
)
tooltip = env.get_template("cluster_tooltip.html").render(
projection_stats=projection_stats,
cluster_stats=cluster_stats,
custom_tooltips=custom_tooltips,
histogram=histogram,
dist_label="Member",
node_id=node_ID,
)
return tooltip
def _color_function(member_ids, color_function):
return np.mean(color_function[member_ids])
def _size_node(member_ids):
return int(np.log(len(member_ids) + 1) + 1)
def _type_node():
return "circle"
def _size_link_width(graph, node_id, linked_node_id):
return 1
|
|
import urllib.parse
import requests
TRELLO_API_URL = 'https://trello.com/1'
class TrelloError(Exception):
def __init__(self, session, status_code, url, text, desc="API call error"):
self.session = session
self.status_code = status_code
self.url = url
self.text = text
maxlen = 255
if len(self.text) > maxlen:
display_text = self.text[:(maxlen-3)] + '...'
else:
display_text = self.text
super().__init__("{desc}: {url} -> {status_code} {display_text}"
.format(desc=desc, url=url,
status_code=status_code,
display_text=display_text))
class CustomTrelloError(TrelloError):
status_code = 0
desc = ""
def __init__(self, session, url, text):
super().__init__(session, self.status_code, url, text, self.desc)
class AuthError(CustomTrelloError):
status_code = 401
desc = "Request was denied"
class NotFoundError(CustomTrelloError):
status_code = 404
desc = "URL not found"
class RequestError(CustomTrelloError):
status_code = 400
desc = "Invalid data"
class App:
def __init__(self, key):
self.key = key
def auth_url(self):
params = {
'callback_method': 'fragment',
'return_url': 'http://example.com/',
'scope': 'read',
'expiration': 'never',
'name': 'Oxymeal Trello Bot',
'key': self.key,
}
return TRELLO_API_URL + '/authorize?' + urllib.parse.urlencode(params)
def session(self, token):
return Session(self, token)
class Session:
def __init__(self, app, token):
self.app = app
self.token = token
self.members = MembersAPI(self)
self.actions = ActionsAPI(self)
self.webhooks = WebhooksAPI(self)
self.boards = BoardsAPI(self)
self.lists = ListsAPI(self)
self.cards = CardsAPI(self)
def _api_request(self, method, url, params=None, data=None):
if params is None: params = {}
params['key'] = self.app.key
params['token'] = self.token
r = requests.request(method, TRELLO_API_URL + url, params=params, data=data)
if r.status_code == 400:
raise RequestError(self, url, r.text)
if r.status_code == 401:
raise AuthError(self, url, r.text)
if r.status_code == 404:
raise NotFoundError(self, url, r.text)
elif r.status_code != 200:
raise TrelloError(self, r.status_code, url, r.text)
return r.json()
def _api_get(self, url, *, params=None):
return self._api_request('get', url, params)
def _api_post(self, url, *, params=None, data=None):
return self._api_request('post', url, params, data)
def _api_put(self, url, *, params=None, data=None):
return self._api_request('put', url, params, data)
def _api_delete(self, url, *, params=None):
return self._api_request('delete', url, params)
class API:
def __init__(self, session, model_class):
self.session = session
self.model_class = model_class
@property
def url_base(self):
return self.model_class.url_base
def all(self):
json = self.session._api_get(self.url_base)
return [self.model_class.from_dict(self.session, m) for m in json]
def get(self, id):
json = self.session._api_get(self.url_base + '/' + id)
return self.model_class.from_dict(self.session, json)
def add(self, **kwargs):
json = self.session._api_post(self.url_base, data=kwargs)
return self.model_class.from_dict(self.session, json)
class MembersAPI(API):
def __init__(self, session):
super().__init__(session, Member)
def me(self):
json = self.session._api_get(self.url_base + '/me')
return Member.from_dict(self.session, json)
class ActionsAPI(API):
def __init__(self, session):
super().__init__(session, Action)
class WebhooksAPI(API):
def __init__(self, session):
super().__init__(session, Webhook)
class BoardsAPI(API):
def __init__(self, session):
super().__init__(session, Board)
class ListsAPI(API):
def __init__(self, session):
super().__init__(session, List)
class CardsAPI(API):
def __init__(self, session):
super().__init__(session, Card)
class Model:
url_base = ''
def __init__(self, session, id):
self.session = session
self.id = id
@classmethod
def from_dict(cls, session, d):
return Model(session, d['id'])
def _sub_url(self, url):
return "{base}/{id}{url}".format(base=self.url_base, id=self.id, url=url)
def delete(self):
self._api_delete(self.url_base + '/' + self.id)
class Member(Model):
url_base = '/members'
def __init__(self, session, id, username, fullname, url):
self.session = session
self.id = id
self.username = username
self.fullname = fullname
self.url = url
@classmethod
def from_dict(cls, session, d):
return Member(session,
d['id'],
d.get('username'),
d.get('fullName'),
d.get('url'))
def boards(self, *, filter=None):
params = {}
if filter:
if isinstance(filter, list):
filter = ','.join(filter)
params['filter'] = filter
json = self.session._api_get(self._sub_url('/boards'), params=params)
return [Board.from_dict(self.session, d) for d in json]
class Action(Model):
url_base = '/actions'
def __init__(self, session, id, id_member_creator, type,
changed_field=None, old_value=None):
super().__init__(session, id)
self.id_member_creator = id_member_creator
self.type = type
if changed_field:
self.changed_field = changed_field
self.old_value = old_value
@classmethod
def from_dict(cls, session, d):
action = Action(session, d['id'], d['idMemberCreator'], d['type'])
data = d['data']
if 'board' in data:
action.board = Board.from_dict(session, data['board'])
if 'list' in data:
action.list = List.from_dict(session, data['list'])
if 'listBefore' in data:
action.list_before = List.from_dict(session, data['listBefore'])
if 'listAfter' in data:
action.list_after = List.from_dict(session, data['listAfter'])
if 'card' in data:
action.card = Card.from_dict(session, data['card'])
if hasattr(action, 'list'):
action.card.id_list = action.list.id
if 'text' in data:
action.text = data['text']
if 'member' in d:
action.member = Member.from_dict(session, d['member'])
if 'memberCreator' in d:
action._member_creator = Member.from_dict(session, d['memberCreator'])
if 'old' in data:
action.changed_field = list(data['old'].keys())[0]
action.old_value = data['old'][action.changed_field]
return action
def member_creator(self):
if hasattr(self, '_member_creator'):
return self._member_creator
return self.session.members.get(self.id_member_creator)
class Webhook(Model):
url_base = '/webhooks'
def __init__(self, session, id, callback_url, id_model, description=""):
super().__init__(session, id)
self.callback_url = callback_url
self.id_model = id_model
self.description = description
@classmethod
def from_dict(cls, session, d):
return Webhook(session,
d['id'],
d['callbackURL'],
d['idModel'],
d.get('description'))
class Card(Model):
url_base = '/cards'
def __init__(self, session, id, name, id_list, short_link=None):
self.session = session
self.id = id
self.name = name
self.id_list = id_list
self.short_link = short_link
@classmethod
def from_dict(cls, session, d):
return Card(session,
d['id'],
d['name'],
d.get('id_list'),
d.get('shortLink'))
@property
def url(self):
return "https://trello.com/c/{}/".format(self.short_link)
def list(self):
return self.session.lists.get(self.id_list)
class Board(Model):
url_base = '/boards'
def __init__(self, session, id, name, desc, short_link=None):
self.session = session
self.id = id
self.name = name
self.desc = desc
self.short_link = short_link
@classmethod
def from_dict(self, session, d):
return Board(session,
d['id'],
d['name'],
d.get('desc'),
d.get('shortLink'))
@property
def url(self):
return "https://trello.com/b/{}/".format(self.short_link)
def actions(self):
json = self.session._api_get(self._sub_url('/actions'))
return [Action.from_dict(self.session, d) for d in json]
def lists(self):
json = self.session._api_get(self._sub_url('/lists'))
return [List.from_dict(self.session, d) for d in json]
class List(Model):
url_base = '/lists'
def __init__(self, session, id, name):
self.session = session
self.id = id
self.name = name
@classmethod
def from_dict(self, session, d):
return List(session,
d['id'],
d['name'])
def board(self):
json = self.session._api_get(self._sub_url('/board'))
return Board.from_dict(self.session, json)
def cards(self):
json = self.session._api_get(self._sub_url('/cards'))
cs = [Card.from_dict(self.session, d) for d in json]
for c in cs:
c.id_list = self.id
return cs
|
|
from js9 import j
from .SourceLoader import SourceLoader
from JumpScale9.errorhandling.ErrorConditionObject import ErrorConditionObject
import colored_traceback
import pygments.lexers
import cProfile
from contextlib import contextmanager
import asyncio
import functools
import logging
import traceback
from collections import MutableMapping
import time
colored_traceback.add_hook(always=True)
def _execute_cb(job, future):
"""
callback call after a job has finished executing
job: is the job object
future: future that hold the result of the job execution
"""
if job._cancelled is True:
return
elapsed = time.time() - job._started
job.logger.info("{} took {} sec to execute".format(job, elapsed))
service_action_obj = None
if job.service is not None:
action_name = job.model.dbobj.actionName
if action_name in job.service.model.actions:
service_action_obj = job.service.model.actions[action_name]
service_action_obj.lastRun = j.data.time.epoch
# check if an exception happend during job execution
exception = None
try:
exception = future.exception()
except asyncio.CancelledError as err:
# catch CancelledError since it's not an anormal to have some job cancelled
exception = err
job.logger.info("{} has been cancelled".format(job))
if exception is not None:
# state state of job and run to error
# this state will be check by RunStep and Run and it will raise an exception
job.state = 'error'
job.model.dbobj.state = 'error'
if service_action_obj:
service_action_obj.state = 'error'
# make sure we don't keep increasing this number forever, it could overflow.
if job.model.dbobj.runKey:
run = j.core.jobcontroller.db.runs.get(job.model.dbobj.runKey)
run = run.objectGet()
if service_action_obj.errorNr < len(run.retries) + 1:
service_action_obj.errorNr += 1
job.service.model.dbobj.state = 'error'
ex = exception if exception is not None else TimeoutError()
eco = j.errorhandler.processPythonExceptionObject(exception)
job._processError(eco)
if not isinstance(exception, asyncio.CancelledError):
tb_lines = [line.rstrip('\n') for line in traceback.format_exception(exception.__class__, exception, exception.__traceback__)]
job.logger.error("{} failed:\n{}".format(job, '\n'.join(tb_lines)))
else:
# job executed succefully
job.state = 'ok'
job.model.dbobj.state = 'ok'
if service_action_obj:
service_action_obj.state = 'ok'
service_action_obj.errorNr = 0
if job.service:
job.service.model.dbobj.state = 'ok'
job.logger.info("{} done successfuly".format(job))
if service_action_obj and service_action_obj.period > 0: # recurring action.
job.model.save()
job.model.delete()
del job
else:
job.save()
@contextmanager
def generate_profile(job):
"""
context manager that generate profile of the code it wrap
"""
if job.model.dbobj.profile is False:
yield
else:
try:
pr = cProfile.Profile()
pr.enable()
yield
finally:
pr.create_stats()
# TODO: *1 this is slow, needs to be fetched differently
stat_file = j.sal.fs.getTempFileName()
pr.dump_stats(stat_file)
job.model.dbobj.profileData = j.sal.fs.fileGetBinaryContents(stat_file)
j.sal.fs.remove(stat_file)
class JobHandler(logging.Handler):
def __init__(self, job_model, level=logging.NOTSET):
super().__init__(level=level)
self._job_model = job_model
def emit(self, record):
if record.levelno <= 20:
category = 'msg'
elif 20 < record.levelno <= 30:
category = 'alert'
else:
category = 'errormsg'
self._job_model.log(msg=record.getMessage(), level=record.levelno, category=category, epoch=int(record.created), tags='')
class JobContext(MutableMapping):
def __init__(self, model):
self._dict = {}
for ctx in model.dbobj.context:
self._dict[ctx.key] = ctx.value
def __getitem__(self, key):
return self._dict.__getitem__(key)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key can only be of type str")
if not isinstance(value, str):
raise TypeError("value can only be of type str")
self._dict.__setitem__(key, value)
def __delitem__(self, key):
return self._dict.__delitem__(key)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def __repr__(self):
return self._dict.__repr__()
def __str__(self):
return self._dict.__str__()
def keys(self):
return self._dict.keys()
class Job:
"""
is what needs to be done for 1 specific action for a service
"""
def __init__(self, model):
# used to track how long the job takes to execute
self._started = None
self.model = model
self.context = JobContext(model)
self._cancelled = False
self._action = None
self._service = None
self._future = None
self.saveService = True
self._sourceLoader = None
self.logger = j.logger.get('j.core.jobcontroller.job.{}'.format(self.model.key))
self._logHandler = JobHandler(self.model)
self.logger.addHandler(self._logHandler)
def __del__(self):
self.cleanup()
@property
def _loop(self):
return j.atyourservice.server.loop
def cleanup(self):
"""
clean the logger handler from the job object so it doesn't make the job stays in memory
"""
self.logger.removeHandler(self._logHandler)
jc_log_refs = j.logger.logging.manager.loggerDict.get('j.core.jobcontroller', {})
job_log_refs = j.logger.logging.manager.loggerDict.get('j.core.jobcontroller.job', {})
# Properly cleaning logger referernces in logging module to avoid memory leaks.
jc_log_refs.loggerMap.pop(self.logger, None)
job_log_refs.loggerMap.pop(self.logger, None)
j.logger.logging.manager.loggerDict.pop(self.logger.name, None)
for h in self.logger.handlers:
self.logger.removeHandler(h)
self._logHandler = None
self.logger = None
@property
def action(self):
if self._action is None:
self._action = j.core.jobcontroller.db.actions.get(self.model.dbobj.actionKey)
return self._action
def printLogs(self):
logs = list()
for log in self.model.dbobj.logs:
logs.append(("{epoch} - {category}: {log}".format(
epoch=j.data.time.epoch2HRDateTime(log.epoch),
category=log.category,
log=log.log
)))
logs = '\n'.join(logs)
print(logs)
return logs
@property
def sourceLoader(self):
if self._sourceLoader is None:
if self._service is None:
raise j.exceptions.RuntimeError("can't dynamicly load action code, no service present in job object")
self._sourceLoader = SourceLoader(self._service)
return self._sourceLoader
@property
def method(self):
return self.sourceLoader.get_method(self.model.dbobj.actionName)
@property
def service(self):
if self._service is None:
if self.model.dbobj.actorName != "":
repo = j.atyourservice.server.aysRepos.get(path=self.model.dbobj.repoKey)
try:
self._service = repo.serviceGetByKey(self.model.dbobj.serviceKey)
except j.exceptions.NotFound:
# If run contains a delete, this is perfectly acceptable
self.logger.warning("job {} tried to access a non existing service {}.".format(self, self.model.dbobj.serviceKey))
return None
return self._service
@service.setter
def service(self, value):
self._service = value
self.model.dbobj.serviceKey = value.model.key
def _processError(self, eco):
if j.data.types.string.check(eco):
# case it comes from the result of the processmanager
eco = j.data.serializer.json.loads(eco)
epoch = eco['epoch']
if eco['_traceback'] != '':
category = 'trace'
msg = eco['_traceback']
elif eco['errormessage'] != '':
category = 'errormsg'
msg = eco['errormessage']
else:
raise j.exceptions.RuntimeError("error message empty, can't process error")
level = int(eco['level'])
tags = eco['tags']
elif isinstance(eco, ErrorConditionObject):
epoch = eco.epoch
if eco._traceback != '':
category = 'trace'
msg = eco._traceback
elif eco.errormessage != '':
category = 'errormsg'
msg = eco.errormessage
else:
raise j.exceptions.RuntimeError("error message empty, can't process error")
level = eco.level
tags = eco.tags
self.model.log(
msg=msg,
level=level,
category=category,
epoch=epoch,
tags=tags)
self.save()
def error(self, errormsg, level=1, tags=""):
self.model.log(
msg=errormsg,
level=level,
category="errormsg",
tags=tags)
self.save()
raise RuntimeError(errormsg)
def save(self):
if self.saveService and self.service is not None:
if self.model.dbobj.actionName in self.service.model.actions:
service_action_obj = self.service.model.actions[self.model.dbobj.actionName]
service_action_obj.state = str(self.model.dbobj.state)
if not service_action_obj.longjob:
self.service.saveAll()
if not j.sal.fs.exists(j.sal.fs.joinPaths(self.service.aysrepo.path, "services")):
return # repo destroyed or service is deleted.
# fill the context list in capnp obj before save
self.model.dbobj.init('context', len(self.context))
i = 0
for k, v in self.context.items():
kv = self.model.dbobj.context[i]
kv.key = k
kv.value = v
i += 1
self.model.save()
def executeInProcess(self):
"""
deprecated, all jobs are exected in process now.
it's now a synonyme of execute()
"""
return self.execute()
def execute(self):
"""
this method returns a future
you need to await it to schedule it the event loop.
the future return a tuple containing (result, stdout, stderr)
ex: result, stdout, stderr = await job.execute()
"""
self._started = time.time()
# for now use default ThreadPoolExecutor
if self.model.dbobj.debug is False:
self.model.dbobj.debug = self.sourceLoader.source.find('ipdb') != -1 or \
self.sourceLoader.source.find('IPython') != -1
# In case dev mode is enabled, long running job are never schedule.
# This is to be able to unblock AYS main thread in case the long job is buggy.
# Once the actor has been updated in dev mode, you can disable dev mode and continue normal execution
if j.atyourservice.server.dev_mode or self.service.model.actions[self.action.dbobj.name].longjob is False:
self._future = self._loop.run_in_executor(None, self.method, self)
else:
# THIS FEATURE IS VERY DANGEROUS: USE with CARE or you END UP with a blocked AYS.
# CODE IN `inner` coroutine defined must be fully async or ays main thread will block.
# typical definition:
# def longjob(job):
# async def inner(job):
# ## code here
# return inner(job)
# self.method here is longjob and u need to call it with job to get the coroutine object returned `inner`
coroutine = self.method(self)
if not asyncio.iscoroutine(coroutine):
raise RuntimeError("the method used for a the long job %s of service %s is not a courotine" % (self.action.dbobj.name, self.service))
self._future = asyncio.ensure_future(coroutine, loop=self._loop)
# register callback to deal with logs and state of the job after execution
self._future.add_done_callback(functools.partial(_execute_cb, self))
if self.service is not None and self.model.dbobj.actionName in self.service.model.actions:
service_action_obj = self.service.model.actions[self.model.dbobj.actionName]
service_action_obj.state = 'running'
self.model.dbobj.state = 'running'
self.save()
return self._future
def cancel(self):
self._cancelled = True
if self._future:
self._future.remove_done_callback(_execute_cb)
self._future.cancel()
self.logger.info("job {} cancelled".format(self))
def str_error(self, error):
out = 'Error of %s:' % str(self)
formatter = pygments.formatters.Terminal256Formatter(style=pygments.styles.get_style_by_name("vim"))
if error.__str__() != "":
out += "\n*TRACEBACK*********************************************************************************\n"
lexer = pygments.lexers.get_lexer_by_name("pytb", stripall=True)
tb_colored = pygments.highlight(error.__str__(), lexer, formatter)
out += tb_colored
out += "\n\n******************************************************************************************\n"
return out
def __repr__(self):
out = "job: %s!%s (%s)" % (
(self.model.dbobj.actorName, self.model.dbobj.serviceName, self.model.dbobj.actionName))
return out
__str__ = __repr__
|
|
#!/usr/bin/env python
'''Functions that interact with external tools/services
'''
from __future__ import absolute_import, print_function, division
from peyotl.nexson_syntax import get_ot_study_info_from_nexml, \
DEFAULT_NEXSON_VERSION, \
BY_ID_HONEY_BADGERFISH, \
convert_nexson_format, \
sort_arbitrarily_ordered_nexson
from peyotl.nexson_syntax.helper import _simplify_all_meta_by_id_del
def _get_treebase_url(treebase_id):
# Use TreeBASE API to fetch NeXML, then pass it as a string
# to _import_nexson_from_nexml()
# EXAMPLE: Here's Phylografter's fetch URL for study 15515 as 'nexml' (versus 'nexus'):
# http://purl.org/phylo/treebase/phylows/study/TB2:S15515?format=nexml
# ... which redirects to:
# http://treebase.org/treebase-web/phylows/study/TB2:S15515?format=nexml
# ... which redirects to:
# http://treebase.org/treebase-web/search/downloadAStudy.html?id=15515&format=nexml
#
# Since our download follows redirects, let's respect the PhyloWS API on treebase.org
url_format = 'http://treebase.org/treebase-web/phylows/study/TB2:S{t:d}?format=nexml'
return url_format.format(t=treebase_id)
def get_ot_study_info_from_treebase_nexml(src=None,
nexml_content=None,
encoding=u'utf8',
nexson_syntax_version=DEFAULT_NEXSON_VERSION,
merge_blocks=True,
sort_arbitrary=False):
'''Normalize treebase-specific metadata into the locations where
open tree of life software that expects it.
See get_ot_study_info_from_nexml for the explanation of the src,
nexml_content, encoding, and nexson_syntax_version arguments
If merge_blocks is True then peyotl.manip.merge_otus_and_trees
Actions to "normalize" TreeBase objects to ot Nexson
1. the meta id for any meta item that has only a value and an id
2. throw away rdfs:isDefinedBy
3. otu @label -> otu ^ot:originalLabel
4. ^tb:indentifier.taxon, ^tb:indentifier.taxonVariant and some skos:closeMatch
fields to ^ot:taxonLink
5. remove "@xml:base"
6. coerce edge lengths to native types
'''
#pylint: disable=R0915
raw = get_ot_study_info_from_nexml(src=src,
nexml_content=nexml_content,
encoding=encoding,
nexson_syntax_version=BY_ID_HONEY_BADGERFISH)
nexml = raw['nexml']
SKOS_ALT_LABEL = '^skos:altLabel'
SKOS_CLOSE_MATCH = '^skos:closeMatch'
strippable_pre = {
'http://www.ubio.org/authority/metadata.php?lsid=urn:lsid:ubio.org:namebank:': '@ubio',
'http://purl.uniprot.org/taxonomy/': '@uniprot',
}
moveable2taxon_link = {"^tb:identifier.taxon": '@tb:identifier.taxon',
"^tb:identifier.taxonVariant": '@tb:identifier.taxonVariant', }
to_del = ['^rdfs:isDefinedBy', '@xml:base']
for tag in to_del:
if tag in nexml:
del nexml[tag]
_simplify_all_meta_by_id_del(nexml)
_otu2label = {}
prefix_map = {}
# compose dataDeposit
nexid = nexml['@id']
tb_url = 'http://purl.org/phylo/treebase/phylows/study/TB2:' + nexid
nexml['^ot:dataDeposit'] = {'@href': tb_url}
# compose dataDeposit
bd = nexml.get("^dcterms:bibliographicCitation")
if bd:
nexml['^ot:studyPublicationReference'] = bd
doi = nexml.get('^prism:doi')
if doi:
nexml['^ot:studyPublication'] = {'@href': doi}
year = nexml.get('^prism:publicationDate')
if year:
try:
nexml['^ot:studyYear'] = int(year)
except:
pass
#
for otus in nexml['otusById'].values():
for tag in to_del:
if tag in otus:
del otus[tag]
_simplify_all_meta_by_id_del(otus)
for oid, otu in otus['otuById'].items():
for tag in to_del:
if tag in otu:
del otu[tag]
_simplify_all_meta_by_id_del(otu)
label = otu['@label']
_otu2label[oid] = label
otu['^ot:originalLabel'] = label
del otu['@label']
al = otu.get(SKOS_ALT_LABEL)
if al is not None:
if otu.get('^ot:altLabel') is None:
otu['^ot:altLabel'] = al
del otu[SKOS_ALT_LABEL]
tl = {}
scm = otu.get(SKOS_CLOSE_MATCH)
#_LOG.debug('scm = ' + str(scm))
if scm:
if isinstance(scm, dict):
h = scm.get('@href')
if h:
try:
for p, t in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
del otu[SKOS_CLOSE_MATCH]
prefix_map[t] = p
except:
pass
else:
nm = []
try:
for el in scm:
h = el.get('@href')
if h:
found = False
for p, t in strippable_pre.items():
if h.startswith(p):
ident = h[len(p):]
tl[t] = ident
found = True
prefix_map[t] = p
break
if not found:
nm.append(el)
except:
pass
if len(nm) < len(scm):
if len(nm) > 1:
otu[SKOS_CLOSE_MATCH] = nm
elif len(nm) == 1:
otu[SKOS_CLOSE_MATCH] = nm[0]
else:
del otu[SKOS_CLOSE_MATCH]
#_LOG.debug('tl =' + str(tl))
for k, t in moveable2taxon_link.items():
al = otu.get(k)
if al:
tl[t] = al
del otu[k]
if tl:
otu['^ot:taxonLink'] = tl
for trees in nexml['treesById'].values():
for tag in to_del:
if tag in trees:
del trees[tag]
_simplify_all_meta_by_id_del(trees)
for tree in trees['treeById'].values():
for tag in to_del:
if tag in tree:
del tree[tag]
_simplify_all_meta_by_id_del(tree)
tt = tree.get('@xsi:type', 'nex:FloatTree')
if tt.lower() == 'nex:inttree':
e_len_coerce = int
else:
e_len_coerce = float
for edge_d in tree['edgeBySourceId'].values():
for edge in edge_d.values():
try:
x = e_len_coerce(edge['@length'])
edge['@length'] = x
except:
pass
for node in tree['nodeById'].values():
nl = node.get('@label')
if nl:
no = node.get('@otu')
if no and _otu2label[no] == nl:
del node['@label']
if prefix_map:
nexml['^ot:taxonLinkPrefixes'] = prefix_map
if merge_blocks:
from peyotl.manip import merge_otus_and_trees
merge_otus_and_trees(raw)
if nexson_syntax_version != BY_ID_HONEY_BADGERFISH:
convert_nexson_format(raw,
nexson_syntax_version,
current_format=BY_ID_HONEY_BADGERFISH,
sort_arbitrary=sort_arbitrary)
elif sort_arbitrary:
sort_arbitrarily_ordered_nexson(raw)
return raw
def import_nexson_from_treebase(treebase_id,
nexson_syntax_version=DEFAULT_NEXSON_VERSION):
url = _get_treebase_url(treebase_id)
try:
return get_ot_study_info_from_treebase_nexml(src=url,
nexson_syntax_version=nexson_syntax_version)
except Exception as x:
_LOG.exception('Error parsing NeXML from {}'.format(url))
raise
|
|
# Human friendly input/output in Python.
#
# Author: Peter Odding <[email protected]>
# Last Change: October 9, 2016
# URL: https://humanfriendly.readthedocs.io
"""
Interaction with UNIX terminals.
The :mod:`~humanfriendly.terminal` module makes it easy to interact with UNIX
terminals and format text for rendering on UNIX terminals. If the terms used in
the documentation of this module don't make sense to you then please refer to
the `Wikipedia article on ANSI escape sequences`_ for details about how ANSI
escape sequences work.
.. _Wikipedia article on ANSI escape sequences: http://en.wikipedia.org/wiki/ANSI_escape_code#Sequence_elements
"""
# Standard library modules.
import os
import re
import subprocess
import sys
# The `fcntl' module is platform specific so importing it may give an error. We
# hide this implementation detail from callers by handling the import error and
# setting a flag instead.
try:
import fcntl
import termios
import struct
HAVE_IOCTL = True
except ImportError:
HAVE_IOCTL = False
# Modules included in our package. We import find_meta_variables() here to
# preserve backwards compatibility with older versions of humanfriendly where
# that function was defined in this module.
from humanfriendly.compat import is_unicode
from humanfriendly.text import concatenate, format
from humanfriendly.usage import find_meta_variables, format_usage # NOQA
ANSI_CSI = '\x1b['
"""The ANSI "Control Sequence Introducer" (a string)."""
ANSI_SGR = 'm'
"""The ANSI "Select Graphic Rendition" sequence (a string)."""
ANSI_ERASE_LINE = '%sK' % ANSI_CSI
"""The ANSI escape sequence to erase the current line (a string)."""
ANSI_RESET = '%s0%s' % (ANSI_CSI, ANSI_SGR)
"""The ANSI escape sequence to reset styling (a string)."""
ANSI_COLOR_CODES = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7)
"""
A dictionary with (name, number) pairs of `portable color codes`_. Used by
:func:`ansi_style()` to generate ANSI escape sequences that change font color.
.. _portable color codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
"""
ANSI_TEXT_STYLES = dict(bold=1, faint=2, underline=4, inverse=7, strike_through=9)
"""
A dictionary with (name, number) pairs of text styles (effects). Used by
:func:`ansi_style()` to generate ANSI escape sequences that change text
styles. Only widely supported text styles are included here.
"""
CLEAN_OUTPUT_PATTERN = re.compile(u'(\r|\n|\b|%s)' % re.escape(ANSI_ERASE_LINE))
"""
A compiled regular expression used to separate significant characters from other text.
This pattern is used by :func:`clean_terminal_output()` to split terminal
output into regular text versus backspace, carriage return and line feed
characters and ANSI 'erase line' escape sequences.
"""
DEFAULT_LINES = 25
"""The default number of lines in a terminal (an integer)."""
DEFAULT_COLUMNS = 80
"""The default number of columns in a terminal (an integer)."""
HIGHLIGHT_COLOR = os.environ.get('HUMANFRIENDLY_HIGHLIGHT_COLOR', 'green')
"""
The color used to highlight important tokens in formatted text (e.g. the usage
message of the ``humanfriendly`` program). If the environment variable
``$HUMANFRIENDLY_HIGHLIGHT_COLOR`` is set it determines the value of
:data:`HIGHLIGHT_COLOR`.
"""
def message(*args, **kw):
"""
Show an informational message on the terminal.
:param args: Any position arguments are passed on to :func:`~humanfriendly.text.format()`.
:param kw: Any keyword arguments are passed on to :func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string to :data:`sys.stderr` (followed by a newline).
"""
sys.stderr.write(format(*args, **kw) + '\n')
def warning(*args, **kw):
"""
Show a warning message on the terminal.
:param args: Any position arguments are passed on to :func:`~humanfriendly.text.format()`.
:param kw: Any keyword arguments are passed on to :func:`~humanfriendly.text.format()`.
Renders the message using :func:`~humanfriendly.text.format()` and writes
the resulting string to :data:`sys.stderr` (followed by a newline). If
:data:`sys.stderr` is connected to a terminal :func:`ansi_wrap()` is used
to color the message in a red font (to make the warning stand out from
surrounding text).
"""
text = format(*args, **kw)
if terminal_supports_colors(sys.stderr):
text = ansi_wrap(text, color='red')
sys.stderr.write(text + '\n')
def ansi_strip(text, readline_hints=True):
"""
Strip ANSI escape sequences from the given string.
:param text: The text from which ANSI escape sequences should be removed (a
string).
:param readline_hints: If :data:`True` then :func:`readline_strip()` is
used to remove `readline hints`_ from the string.
:returns: The text without ANSI escape sequences (a string).
"""
pattern = '%s.*?%s' % (re.escape(ANSI_CSI), re.escape(ANSI_SGR))
text = re.sub(pattern, '', text)
if readline_hints:
text = readline_strip(text)
return text
def ansi_style(**kw):
"""
Generate ANSI escape sequences for the given color and/or style(s).
:param color: The name of a color (one of the strings 'black', 'red',
'green', 'yellow', 'blue', 'magenta', 'cyan' or 'white') or
:data:`None` (the default) which means no escape sequence to
switch color will be emitted.
:param readline_hints: If :data:`True` then :func:`readline_wrap()` is
applied to the generated ANSI escape sequences (the
default is :data:`False`).
:param kw: Any additional keyword arguments are expected to match an entry
in the :data:`ANSI_TEXT_STYLES` dictionary. If the argument's
value evaluates to :data:`True` the respective style will be
enabled.
:returns: The ANSI escape sequences to enable the requested text styles or
an empty string if no styles were requested.
:raises: :py:exc:`~exceptions.ValueError` when an invalid color name is given.
"""
# Start with sequences that change text styles.
sequences = [str(ANSI_TEXT_STYLES[k]) for k, v in kw.items() if k in ANSI_TEXT_STYLES and v]
# Append the color code (if any).
color_name = kw.get('color')
if color_name:
# Validate the color name.
if color_name not in ANSI_COLOR_CODES:
msg = "Invalid color name %r! (expected one of %s)"
raise ValueError(msg % (color_name, concatenate(sorted(ANSI_COLOR_CODES))))
sequences.append('3%i' % ANSI_COLOR_CODES[color_name])
if sequences:
encoded = ANSI_CSI + ';'.join(sequences) + ANSI_SGR
return readline_wrap(encoded) if kw.get('readline_hints') else encoded
else:
return ''
def ansi_width(text):
"""
Calculate the effective width of the given text (ignoring ANSI escape sequences).
:param text: The text whose width should be calculated (a string).
:returns: The width of the text without ANSI escape sequences (an
integer).
This function uses :func:`ansi_strip()` to strip ANSI escape sequences from
the given string and returns the length of the resulting string.
"""
return len(ansi_strip(text))
def ansi_wrap(text, **kw):
"""
Wrap text in ANSI escape sequences for the given color and/or style(s).
:param text: The text to wrap (a string).
:param kw: Any keyword arguments are passed to :func:`ansi_style()`.
:returns: The result of this function depends on the keyword arguments:
- If :func:`ansi_style()` generates an ANSI escape sequence based
on the keyword arguments, the given text is prefixed with the
generated ANSI escape sequence and suffixed with
:data:`ANSI_RESET`.
- If :func:`ansi_style()` returns an empty string then the text
given by the caller is returned unchanged.
"""
start_sequence = ansi_style(**kw)
if start_sequence:
end_sequence = ANSI_RESET
if kw.get('readline_hints'):
end_sequence = readline_wrap(end_sequence)
return start_sequence + text + end_sequence
else:
return text
def readline_wrap(expr):
"""
Wrap an ANSI escape sequence in `readline hints`_.
:param text: The text with the escape sequence to wrap (a string).
:returns: The wrapped text.
.. _readline hints: http://superuser.com/a/301355
"""
return '\001' + expr + '\002'
def readline_strip(expr):
"""
Remove `readline hints`_ from a string.
:param text: The text to strip (a string).
:returns: The stripped text.
"""
return expr.replace('\001', '').replace('\002', '')
def clean_terminal_output(text):
"""
Clean up the terminal output of a command.
:param text: The raw text with special characters (a Unicode string).
:returns: A list of Unicode strings (one for each line).
This function emulates the effect of backspace (0x08), carriage return
(0x0D) and line feed (0x0A) characters and the ANSI 'erase line' escape
sequence on interactive terminals. It's intended to clean up command output
that was originally meant to be rendered on an interactive terminal and
that has been captured using e.g. the script_ program [#]_ or the
:mod:`pty` module [#]_.
.. [#] My coloredlogs_ package supports the ``coloredlogs --to-html``
command which uses script_ to fool a subprocess into thinking that
it's connected to an interactive terminal (in order to get it to
emit ANSI escape sequences).
.. [#] My capturer_ package uses the :mod:`pty` module to fool the current
process and subprocesses into thinking they are connected to an
interactive terminal (in order to get them to emit ANSI escape
sequences).
**Some caveats about the use of this function:**
- Strictly speaking the effect of carriage returns cannot be emulated
outside of an actual terminal due to the interaction between overlapping
output, terminal widths and line wrapping. The goal of this function is
to sanitize noise in terminal output while preserving useful output.
Think of it as a useful and pragmatic but possibly lossy conversion.
- The algorithm isn't smart enough to properly handle a pair of ANSI escape
sequences that open before a carriage return and close after the last
carriage return in a linefeed delimited string; the resulting string will
contain only the closing end of the ANSI escape sequence pair. Tracking
this kind of complexity requires a state machine and proper parsing.
.. _capturer: https://pypi.python.org/pypi/capturer
.. _coloredlogs: https://pypi.python.org/pypi/coloredlogs
.. _script: http://man7.org/linux/man-pages/man1/script.1.html
"""
cleaned_lines = []
current_line = ''
current_position = 0
for token in CLEAN_OUTPUT_PATTERN.split(text):
if token == '\r':
# Seek back to the start of the current line.
current_position = 0
elif token == '\b':
# Seek back one character in the current line.
current_position = max(0, current_position - 1)
else:
if token == '\n':
# Capture the current line.
cleaned_lines.append(current_line)
if token in ('\n', ANSI_ERASE_LINE):
# Clear the current line.
current_line = ''
current_position = 0
elif token:
# Merge regular output into the current line.
new_position = current_position + len(token)
prefix = current_line[:current_position]
suffix = current_line[new_position:]
current_line = prefix + token + suffix
current_position = new_position
# Capture the last line (if any).
cleaned_lines.append(current_line)
# Remove any empty trailing lines.
while cleaned_lines and not cleaned_lines[-1]:
cleaned_lines.pop(-1)
return cleaned_lines
def connected_to_terminal(stream=None):
"""
Check if a stream is connected to a terminal.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the stream is connected to a terminal,
:data:`False` otherwise.
See also :func:`terminal_supports_colors()`.
"""
stream = sys.stdout if stream is None else stream
try:
return stream.isatty()
except Exception:
return False
def terminal_supports_colors(stream=None):
"""
Check if a stream is connected to a terminal that supports ANSI escape sequences.
:param stream: The stream to check (a file-like object,
defaults to :data:`sys.stdout`).
:returns: :data:`True` if the terminal supports ANSI escape sequences,
:data:`False` otherwise.
This function is inspired by the implementation of
`django.core.management.color.supports_color()
<https://github.com/django/django/blob/master/django/core/management/color.py>`_.
"""
return (sys.platform != 'Pocket PC' and
(sys.platform != 'win32' or 'ANSICON' in os.environ) and
connected_to_terminal(stream))
def find_terminal_size():
"""
Determine the number of lines and columns visible in the terminal.
:returns: A tuple of two integers with the line and column count.
The result of this function is based on the first of the following three
methods that works:
1. First :func:`find_terminal_size_using_ioctl()` is tried,
2. then :func:`find_terminal_size_using_stty()` is tried,
3. finally :data:`DEFAULT_LINES` and :data:`DEFAULT_COLUMNS` are returned.
.. note:: The :func:`find_terminal_size()` function performs the steps
above every time it is called, the result is not cached. This is
because the size of a virtual terminal can change at any time and
the result of :func:`find_terminal_size()` should be correct.
`Pre-emptive snarky comment`_: It's possible to cache the result
of this function and use :data:`signal.SIGWINCH` to refresh the
cached values!
Response: As a library I don't consider it the role of the
:py:mod:`humanfriendly.terminal` module to install a process wide
signal handler ...
.. _Pre-emptive snarky comment: http://blogs.msdn.com/b/oldnewthing/archive/2008/01/30/7315957.aspx
"""
# The first method. Any of the standard streams may have been redirected
# somewhere and there's no telling which, so we'll just try them all.
for stream in sys.stdin, sys.stdout, sys.stderr:
try:
result = find_terminal_size_using_ioctl(stream)
if min(result) >= 1:
return result
except Exception:
pass
# The second method.
try:
result = find_terminal_size_using_stty()
if min(result) >= 1:
return result
except Exception:
pass
# Fall back to conservative defaults.
return DEFAULT_LINES, DEFAULT_COLUMNS
def find_terminal_size_using_ioctl(stream):
"""
Find the terminal size using :func:`fcntl.ioctl()`.
:param stream: A stream connected to the terminal (a file object with a
``fileno`` attribute).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
Based on an `implementation found on StackOverflow <http://stackoverflow.com/a/3010495/788200>`_.
"""
if not HAVE_IOCTL:
raise NotImplementedError("It looks like the `fcntl' module is not available!")
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(stream, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
return h, w
def find_terminal_size_using_stty():
"""
Find the terminal size using the external command ``stty size``.
:param stream: A stream connected to the terminal (a file object).
:returns: A tuple of two integers with the line and column count.
:raises: This function can raise exceptions but I'm not going to document
them here, you should be using :func:`find_terminal_size()`.
"""
stty = subprocess.Popen(['stty', 'size'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = stty.communicate()
tokens = stdout.split()
if len(tokens) != 2:
raise Exception("Invalid output from `stty size'!")
return tuple(map(int, tokens))
def usage(usage_text):
"""
Print a human friendly usage message to the terminal.
:param text: The usage message to print (a string).
This function does two things:
1. If :data:`sys.stdout` is connected to a terminal (see
:func:`connected_to_terminal()`) then the usage message is formatted
using :func:`.format_usage()`.
2. The usage message is shown using a pager (see :func:`show_pager()`).
"""
if terminal_supports_colors(sys.stdout):
usage_text = format_usage(usage_text)
show_pager(usage_text)
def show_pager(formatted_text, encoding='UTF-8'):
"""
Print a large text to the terminal using a pager.
:param formatted_text: The text to print to the terminal (a string).
:param encoding: The name of the text encoding used to encode the formatted
text if the formatted text is a Unicode string (a string).
The use of a pager helps to avoid the wall of text effect where the user
has to scroll up to see where the output began (not very user friendly).
If :data:`sys.stdout` is not connected to a terminal (see
:func:`connected_to_terminal()`) then the text is printed directly without
invoking a pager.
If the given text contains ANSI escape sequences the command ``less
--RAW-CONTROL-CHARS`` is used, otherwise ``$PAGER`` is used (if ``$PAGER``
isn't set the command ``less`` is used).
"""
if connected_to_terminal(sys.stdout):
if ANSI_CSI in formatted_text:
pager_command = ['less', '--RAW-CONTROL-CHARS']
else:
pager_command = [os.environ.get('PAGER', 'less')]
if is_unicode(formatted_text):
formatted_text = formatted_text.encode(encoding)
pager = subprocess.Popen(pager_command, stdin=subprocess.PIPE)
pager.communicate(input=formatted_text)
else:
print(formatted_text)
|
|
# -*- coding: utf-8 -*-
# Author: Florian Mayer <[email protected]>
#
# This module was developed with funding provided by
# the ESA Summer of Code (2011).
# The template can be found in tools/hektemplate.py
# Unless you are editing the template, DO NOT EDIT THIS FILE.
# ALL CHANGES WILL BE LOST THE NEXT TIME IT IS GENERATED FROM THE TEMPLATE.
"""
Attributes that can be used to construct HEK queries. They are different to
the VSO ones in that a lot of them are wrappers that conveniently expose
the comparisions by overloading Python operators. So, e.g., you are able
to say AR & AR.NumSpots < 5 to find all active regions with less than 5 spots.
As with the VSO query, you can use the fundamental logic operators AND and OR
to construct queries of almost arbitrary complexity. Note that complex queries
result in multiple requests to the server which might make them less efficient.
"""
from __future__ import absolute_import
from datetime import datetime
from sunpy.net import attr
from sunpy.time import parse_time
class _ParamAttr(attr.Attr):
""" A _ParamAttr is used to represent equality or inequality checks
for certain parameters. It stores the attribute's name, the operator to
compare with, and the value to compare to. """
def __init__(self, name, op, value):
attr.Attr.__init__(self)
self.name = name
self.op = op
self.value = value
def collides(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.name == other.name
# XXX: Why is this here but never used.
class _BoolParamAttr(_ParamAttr):
def __init__(self, name, value='true'):
_ParamAttr.__init__(self, name, '=', value)
def __neg__(self):
if self.value == 'true':
return _BoolParamAttr(self.name, 'false')
else:
return _BoolParamAttr(self.name)
def __pos__(self):
return _BoolParamAttr(self.name)
class _ListAttr(attr.Attr):
""" A _ListAttr is used when the server expects a list of things with
the name (GET parameter name) key. By adding the _ListAttr to the query,
item is added to that list. Please note that the server must treat items
of the list as AND in order for the query to be semantically correct. """
def __init__(self, key, item):
attr.Attr.__init__(self)
self.key = key
self.item = item
def collides(self, other):
return False
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return vars(self) == vars(other)
def __hash__(self):
return hash(tuple(vars(self).itervalues()))
class EventType(attr.Attr):
def __init__(self, item):
attr.Attr.__init__(self)
self.item = item
def collides(self, other):
return isinstance(other, EventType)
def __or__(self, other):
if isinstance(other, EventType):
return EventType(self.item + ',' + other.item)
else:
return super(EventType, self).__or__(other)
# XXX: XOR
class Time(attr.Attr):
""" Restrict query to time range between start and end. """
def __init__(self, start, end):
attr.Attr.__init__(self)
self.start = start
self.end = end
def collides(self, other):
return isinstance(other, Time)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return vars(self) == vars(other)
def __hash__(self):
return hash(tuple(vars(self).itervalues()))
@classmethod
def dt(cls, start, end):
return cls(datetime(*start), datetime(*end))
# pylint: disable=R0913
class SpatialRegion(attr.Attr):
def __init__(
self, x1=-1200, y1=-1200, x2=1200, y2=1200, sys='helioprojective'):
attr.Attr.__init__(self)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.sys = sys
def collides(self, other):
return isinstance(other, SpatialRegion)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return vars(self) == vars(other)
def __hash__(self):
return hash(tuple(vars(self).itervalues()))
class Contains(attr.Attr):
def __init__(self, *types):
attr.Attr.__init__(self)
self.types = types
def collides(self, other):
return False
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return vars(self) == vars(other)
def __hash__(self):
return hash(tuple(vars(self).itervalues()))
class _ComparisonParamAttrWrapper(object):
def __init__(self, name):
self.name = name
def __lt__(self, other):
return _ParamAttr(self.name, '<', other)
def __le__(self, other):
return _ParamAttr(self.name, '<=', other)
def __gt__(self, other):
return _ParamAttr(self.name, '>', other)
def __ge__(self, other):
return _ParamAttr(self.name, '>=', other)
def __eq__(self, other):
return _ParamAttr(self.name, '=', other)
def __ne__(self, other):
return _ParamAttr(self.name, '!=', other)
class _StringParamAttrWrapper(_ComparisonParamAttrWrapper):
def like(self, other):
return _ParamAttr(self.name, 'like', other)
class _NumberParamAttrWrapper(_ComparisonParamAttrWrapper):
pass
# The walker is what traverses the attribute tree and converts it to a format
# that is understood by the server we are querying. The HEK walker builds up
# a dictionary of GET parameters to be sent to the server.
walker = attr.AttrWalker()
@walker.add_applier(Contains)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
dct['type'] = 'contains'
if not Contains in state:
state[Contains] = 1
nid = state[Contains]
n = 0
for n, type_ in enumerate(root.types):
dct['event_type%d' % (nid + n)] = type_
state[Contains] += n
return dct
@walker.add_creator(
Time, SpatialRegion, EventType, _ParamAttr, attr.AttrAnd, Contains)
# pylint: disable=E0102,C0103,W0613
def _c(wlk, root, state):
value = {}
wlk.apply(root, state, value)
return [value]
@walker.add_applier(Time)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
dct['event_starttime'] = parse_time(root.start).strftime('%Y-%m-%dT%H:%M:%S')
dct['event_endtime'] = parse_time(root.end).strftime('%Y-%m-%dT%H:%M:%S')
return dct
@walker.add_applier(SpatialRegion)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
dct['x1'] = root.x1
dct['y1'] = root.y1
dct['x2'] = root.x2
dct['y2'] = root.y2
dct['event_coordsys'] = root.sys
return dct
@walker.add_applier(EventType)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
if dct.get('type', None) == 'contains':
raise ValueError
dct['event_type'] = root.item
return dct
@walker.add_applier(_ParamAttr)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
if not _ParamAttr in state:
state[_ParamAttr] = 0
nid = state[_ParamAttr]
dct['param%d' % nid] = root.name
dct['op%d' % nid] = root.op
dct['value%d' % nid] = root.value
state[_ParamAttr] += 1
return dct
@walker.add_applier(attr.AttrAnd)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
for attribute in root.attrs:
wlk.apply(attribute, state, dct)
@walker.add_creator(attr.AttrOr)
# pylint: disable=E0102,C0103,W0613
def _c(wlk, root, state):
blocks = []
for attribute in root.attrs:
blocks.extend(wlk.create(attribute, state))
return blocks
@walker.add_creator(attr.DummyAttr)
# pylint: disable=E0102,C0103,W0613
def _c(wlk, root, state):
return {}
@walker.add_applier(attr.DummyAttr)
# pylint: disable=E0102,C0103,W0613
def _a(wlk, root, state, dct):
pass
@apply
class AR(EventType):
CompactnessCls = _StringParamAttrWrapper('AR_CompactnessCls')
IntensKurt = _StringParamAttrWrapper('AR_IntensKurt')
IntensMax = _StringParamAttrWrapper('AR_IntensMax')
IntensMean = _StringParamAttrWrapper('AR_IntensMean')
IntensMin = _StringParamAttrWrapper('AR_IntensMin')
IntensSkew = _StringParamAttrWrapper('AR_IntensSkew')
IntensTotal = _StringParamAttrWrapper('AR_IntensTotal')
IntensUnit = _StringParamAttrWrapper('AR_IntensUnit')
IntensVar = _StringParamAttrWrapper('AR_IntensVar')
McIntoshCls = _StringParamAttrWrapper('AR_McIntoshCls')
MtWilsonCls = _StringParamAttrWrapper('AR_MtWilsonCls')
NOAANum = _StringParamAttrWrapper('AR_NOAANum')
NOAAclass = _StringParamAttrWrapper('AR_NOAAclass')
NumSpots = _StringParamAttrWrapper('AR_NumSpots')
PenumbraCls = _StringParamAttrWrapper('AR_PenumbraCls')
Polarity = _StringParamAttrWrapper('AR_Polarity')
SpotAreaRaw = _StringParamAttrWrapper('AR_SpotAreaRaw')
SpotAreaRawUncert = _StringParamAttrWrapper('AR_SpotAreaRawUncert')
SpotAreaRawUnit = _StringParamAttrWrapper('AR_SpotAreaRawUnit')
SpotAreaRepr = _StringParamAttrWrapper('AR_SpotAreaRepr')
SpotAreaReprUncert = _StringParamAttrWrapper('AR_SpotAreaReprUncert')
SpotAreaReprUnit = _StringParamAttrWrapper('AR_SpotAreaReprUnit')
ZurichCls = _StringParamAttrWrapper('AR_ZurichCls')
def __init__(self):
EventType.__init__(self, 'ar')
@apply
class CE(EventType):
Accel = _StringParamAttrWrapper('CME_Accel')
AccelUncert = _StringParamAttrWrapper('CME_AccelUncert')
AccelUnit = _StringParamAttrWrapper('CME_AccelUnit')
AngularWidth = _StringParamAttrWrapper('CME_AngularWidth')
AngularWidthUnit = _StringParamAttrWrapper('CME_AngularWidthUnit')
Mass = _StringParamAttrWrapper('CME_Mass')
MassUncert = _StringParamAttrWrapper('CME_MassUncert')
MassUnit = _StringParamAttrWrapper('CME_MassUnit')
RadialLinVel = _StringParamAttrWrapper('CME_RadialLinVel')
RadialLinVelMax = _StringParamAttrWrapper('CME_RadialLinVelMax')
RadialLinVelMin = _StringParamAttrWrapper('CME_RadialLinVelMin')
RadialLinVelStddev = _StringParamAttrWrapper('CME_RadialLinVelStddev')
RadialLinVelUncert = _StringParamAttrWrapper('CME_RadialLinVelUncert')
RadialLinVelUnit = _StringParamAttrWrapper('CME_RadialLinVelUnit')
def __init__(self):
EventType.__init__(self, 'ce')
@apply
class CD(EventType):
Area = _StringParamAttrWrapper('CD_Area')
AreaUncert = _StringParamAttrWrapper('CD_AreaUncert')
AreaUnit = _StringParamAttrWrapper('CD_AreaUnit')
Mass = _StringParamAttrWrapper('CD_Mass')
MassUncert = _StringParamAttrWrapper('CD_MassUncert')
MassUnit = _StringParamAttrWrapper('CD_MassUnit')
Volume = _StringParamAttrWrapper('CD_Volume')
VolumeUncert = _StringParamAttrWrapper('CD_VolumeUncert')
VolumeUnit = _StringParamAttrWrapper('CD_VolumeUnit')
def __init__(self):
EventType.__init__(self, 'cd')
CH = EventType('ch')
CW = EventType('cw')
@apply
class FI(EventType):
BarbsL = _StringParamAttrWrapper('FI_BarbsL')
BarbsR = _StringParamAttrWrapper('FI_BarbsR')
BarbsTot = _StringParamAttrWrapper('FI_BarbsTot')
Chirality = _StringParamAttrWrapper('FI_Chirality')
Length = _StringParamAttrWrapper('FI_Length')
LengthUnit = _StringParamAttrWrapper('FI_LengthUnit')
Tilt = _StringParamAttrWrapper('FI_Tilt')
def __init__(self):
EventType.__init__(self, 'fi')
FE = EventType('fe')
FA = EventType('fa')
@apply
class FL(EventType):
EFoldTime = _StringParamAttrWrapper('FL_EFoldTime')
EFoldTimeUnit = _StringParamAttrWrapper('FL_EFoldTimeUnit')
Fluence = _StringParamAttrWrapper('FL_Fluence')
FluenceUnit = _StringParamAttrWrapper('FL_FluenceUnit')
GOESCls = _StringParamAttrWrapper('FL_GOESCls')
PeakEM = _StringParamAttrWrapper('FL_PeakEM')
PeakEMUnit = _StringParamAttrWrapper('FL_PeakEMUnit')
PeakFlux = _StringParamAttrWrapper('FL_PeakFlux')
PeakFluxUnit = _StringParamAttrWrapper('FL_PeakFluxUnit')
PeakTemp = _StringParamAttrWrapper('FL_PeakTemp')
PeakTempUnit = _StringParamAttrWrapper('FL_PeakTempUnit')
def __init__(self):
EventType.__init__(self, 'fl')
LP = EventType('lp')
OS = EventType('os')
@apply
class SS(EventType):
SpinRate = _StringParamAttrWrapper('SS_SpinRate')
SpinRateUnit = _StringParamAttrWrapper('SS_SpinRateUnit')
def __init__(self):
EventType.__init__(self, 'ss')
@apply
class EF(EventType):
AspectRatio = _StringParamAttrWrapper('EF_AspectRatio')
AxisLength = _StringParamAttrWrapper('EF_AxisLength')
AxisOrientation = _StringParamAttrWrapper('EF_AxisOrientation')
AxisOrientationUnit = _StringParamAttrWrapper('EF_AxisOrientationUnit')
FluxUnit = _StringParamAttrWrapper('EF_FluxUnit')
LengthUnit = _StringParamAttrWrapper('EF_LengthUnit')
NegEquivRadius = _StringParamAttrWrapper('EF_NegEquivRadius')
NegPeakFluxOnsetRate = _StringParamAttrWrapper('EF_NegPeakFluxOnsetRate')
OnsetRateUnit = _StringParamAttrWrapper('EF_OnsetRateUnit')
PosEquivRadius = _StringParamAttrWrapper('EF_PosEquivRadius')
PosPeakFluxOnsetRate = _StringParamAttrWrapper('EF_PosPeakFluxOnsetRate')
ProximityRatio = _StringParamAttrWrapper('EF_ProximityRatio')
SumNegSignedFlux = _StringParamAttrWrapper('EF_SumNegSignedFlux')
SumPosSignedFlux = _StringParamAttrWrapper('EF_SumPosSignedFlux')
def __init__(self):
EventType.__init__(self, 'ef')
CJ = EventType('cj')
PG = EventType('pg')
OT = EventType('ot')
NR = EventType('nr')
@apply
class SG(EventType):
AspectRatio = _StringParamAttrWrapper('SG_AspectRatio')
Chirality = _StringParamAttrWrapper('SG_Chirality')
MeanContrast = _StringParamAttrWrapper('SG_MeanContrast')
Orientation = _StringParamAttrWrapper('SG_Orientation')
PeakContrast = _StringParamAttrWrapper('SG_PeakContrast')
Shape = _StringParamAttrWrapper('SG_Shape')
def __init__(self):
EventType.__init__(self, 'sg')
SP = EventType('sp')
CR = EventType('cr')
@apply
class CC(EventType):
AxisUnit = _StringParamAttrWrapper('CC_AxisUnit')
MajorAxis = _StringParamAttrWrapper('CC_MajorAxis')
MinorAxis = _StringParamAttrWrapper('CC_MinorAxis')
TiltAngleMajorFromRadial = _StringParamAttrWrapper('CC_TiltAngleMajorFromRadial')
TiltAngleUnit = _StringParamAttrWrapper('CC_TiltAngleUnit')
def __init__(self):
EventType.__init__(self, 'cc')
ER = EventType('er')
@apply
class TO(EventType):
Shape = _StringParamAttrWrapper('TO_Shape')
def __init__(self):
EventType.__init__(self, 'to')
@apply
class Wave(object):
DisplMaxAmpl = _StringParamAttrWrapper('WaveDisplMaxAmpl')
DisplMinAmpl = _StringParamAttrWrapper('WaveDisplMinAmpl')
DisplUnit = _StringParamAttrWrapper('WaveDisplUnit')
lMaxPower = _StringParamAttrWrapper('WavelMaxPower')
lMaxPowerUncert = _StringParamAttrWrapper('WavelMaxPowerUncert')
lMaxRange = _StringParamAttrWrapper('WavelMaxRange')
lMinRange = _StringParamAttrWrapper('WavelMinRange')
lUnit = _StringParamAttrWrapper('WavelUnit')
@apply
class Veloc(object):
MaxAmpl = _StringParamAttrWrapper('VelocMaxAmpl')
MaxPower = _StringParamAttrWrapper('VelocMaxPower')
MaxPowerUncert = _StringParamAttrWrapper('VelocMaxPowerUncert')
MinAmpl = _StringParamAttrWrapper('VelocMinAmpl')
Unit = _StringParamAttrWrapper('VelocUnit')
@apply
class Freq(object):
MaxRange = _StringParamAttrWrapper('FreqMaxRange')
MinRange = _StringParamAttrWrapper('FreqMinRange')
PeakPower = _StringParamAttrWrapper('FreqPeakPower')
Unit = _StringParamAttrWrapper('FreqUnit')
@apply
class Intens(object):
MaxAmpl = _StringParamAttrWrapper('IntensMaxAmpl')
MinAmpl = _StringParamAttrWrapper('IntensMinAmpl')
Unit = _StringParamAttrWrapper('IntensUnit')
@apply
class Area(object):
AtDiskCenter = _StringParamAttrWrapper('Area_AtDiskCenter')
AtDiskCenterUncert = _StringParamAttrWrapper('Area_AtDiskCenterUncert')
Raw = _StringParamAttrWrapper('Area_Raw')
Uncert = _StringParamAttrWrapper('Area_Uncert')
Unit = _StringParamAttrWrapper('Area_Unit')
@apply
class BoundBox(object):
C1LL = _StringParamAttrWrapper('BoundBox_C1LL')
C1UR = _StringParamAttrWrapper('BoundBox_C1UR')
C2LL = _StringParamAttrWrapper('BoundBox_C2LL')
C2UR = _StringParamAttrWrapper('BoundBox_C2UR')
@apply
class Bound(object):
ox_C1LL = _StringParamAttrWrapper('BoundBox_C1LL')
ox_C1UR = _StringParamAttrWrapper('BoundBox_C1UR')
ox_C2LL = _StringParamAttrWrapper('BoundBox_C2LL')
ox_C2UR = _StringParamAttrWrapper('BoundBox_C2UR')
CCNsteps = _StringParamAttrWrapper('Bound_CCNsteps')
CCStartC1 = _StringParamAttrWrapper('Bound_CCStartC1')
CCStartC2 = _StringParamAttrWrapper('Bound_CCStartC2')
@apply
class OBS(object):
ChannelID = _StringParamAttrWrapper('OBS_ChannelID')
DataPrepURL = _StringParamAttrWrapper('OBS_DataPrepURL')
FirstProcessingDate = _StringParamAttrWrapper('OBS_FirstProcessingDate')
IncludesNRT = _StringParamAttrWrapper('OBS_IncludesNRT')
Instrument = _StringParamAttrWrapper('OBS_Instrument')
LastProcessingDate = _StringParamAttrWrapper('OBS_LastProcessingDate')
LevelNum = _StringParamAttrWrapper('OBS_LevelNum')
MeanWavel = _StringParamAttrWrapper('OBS_MeanWavel')
Observatory = _StringParamAttrWrapper('OBS_Observatory')
Title = _StringParamAttrWrapper('OBS_Title')
WavelUnit = _StringParamAttrWrapper('OBS_WavelUnit')
@apply
class Skel(object):
Curvature = _StringParamAttrWrapper('Skel_Curvature')
Nsteps = _StringParamAttrWrapper('Skel_Nsteps')
StartC1 = _StringParamAttrWrapper('Skel_StartC1')
StartC2 = _StringParamAttrWrapper('Skel_StartC2')
@apply
class FRM(object):
Contact = _StringParamAttrWrapper('FRM_Contact')
HumanFlag = _StringParamAttrWrapper('FRM_HumanFlag')
Identifier = _StringParamAttrWrapper('FRM_Identifier')
Institute = _StringParamAttrWrapper('FRM_Institute')
Name = _StringParamAttrWrapper('FRM_Name')
ParamSet = _StringParamAttrWrapper('FRM_ParamSet')
SpecificID = _StringParamAttrWrapper('FRM_SpecificID')
URL = _StringParamAttrWrapper('FRM_URL')
VersionNumber = _StringParamAttrWrapper('FRM_VersionNumber')
@apply
class Event(object):
C1Error = _StringParamAttrWrapper('Event_C1Error')
C2Error = _StringParamAttrWrapper('Event_C2Error')
ClippedSpatial = _StringParamAttrWrapper('Event_ClippedSpatial')
ClippedTemporal = _StringParamAttrWrapper('Event_ClippedTemporal')
Coord1 = _StringParamAttrWrapper('Event_Coord1')
Coord2 = _StringParamAttrWrapper('Event_Coord2')
Coord3 = _StringParamAttrWrapper('Event_Coord3')
CoordSys = _StringParamAttrWrapper('Event_CoordSys')
CoordUnit = _StringParamAttrWrapper('Event_CoordUnit')
MapURL = _StringParamAttrWrapper('Event_MapURL')
MaskURL = _StringParamAttrWrapper('Event_MaskURL')
Npixels = _StringParamAttrWrapper('Event_Npixels')
PixelUnit = _StringParamAttrWrapper('Event_PixelUnit')
Probability = _StringParamAttrWrapper('Event_Probability')
TestFlag = _StringParamAttrWrapper('Event_TestFlag')
Type = _StringParamAttrWrapper('Event_Type')
@apply
class Outflow(object):
Length = _StringParamAttrWrapper('Outflow_Length')
LengthUnit = _StringParamAttrWrapper('Outflow_LengthUnit')
OpeningAngle = _StringParamAttrWrapper('Outflow_OpeningAngle')
Speed = _StringParamAttrWrapper('Outflow_Speed')
SpeedUnit = _StringParamAttrWrapper('Outflow_SpeedUnit')
TransSpeed = _StringParamAttrWrapper('Outflow_TransSpeed')
Width = _StringParamAttrWrapper('Outflow_Width')
WidthUnit = _StringParamAttrWrapper('Outflow_WidthUnit')
@apply
class Misc(object):
KB_Archivist = _StringParamAttrWrapper('KB_Archivist')
MaxMagFieldStrength = _StringParamAttrWrapper('MaxMagFieldStrength')
MaxMagFieldStrengthUnit = _StringParamAttrWrapper('MaxMagFieldStrengthUnit')
OscillNPeriods = _StringParamAttrWrapper('OscillNPeriods')
OscillNPeriodsUncert = _StringParamAttrWrapper('OscillNPeriodsUncert')
PeakPower = _StringParamAttrWrapper('PeakPower')
PeakPowerUnit = _StringParamAttrWrapper('PeakPowerUnit')
RasterScanType = _StringParamAttrWrapper('RasterScanType')
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, Dict, List
import requests
from pandas import DataFrame, concat, melt
from lib.case_line import convert_cases_to_time_series
from lib.cast import safe_int_cast
from lib.data_source import DataSource
from lib.net import download_snapshot
from lib.time import datetime_isoformat
from lib.utils import aggregate_admin_level, table_rename
class Covid19IndiaOrgL1DataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0]
data = table_rename(
data,
{
"Confirmed": "total_confirmed",
"Deceased": "total_deceased",
"Recovered": "total_recovered",
"Tested": "total_tested",
"Date": "date",
"State": "subregion1_name",
},
drop=True,
)
# Get rid of rows for country-level data.
data = data[data["subregion1_name"] != "India"]
for col in ("total_confirmed", "total_deceased", "total_tested", "total_recovered"):
data[col] = data[col].apply(safe_int_cast).astype("Int64")
data["subregion2_code"] = None
data["locality_code"] = None
data["country_code"] = "IN"
return data
L3_INDIA_REMOVE_SET = set(
[
"Delhi",
"CAPF Personnel",
"BSF Camp",
"Airport Quarantine",
"Evacuees",
"Foreign Evacuees",
"Italians",
"Other Region",
"Other State",
"Others",
"Railway Quarantine",
"Unknown",
]
)
# For some of these mappings both the "correct" metadata version and an incorrect version are used.
# Harmonize both here.
L3_INDIA_REPLACEMENTS = {
"Upper Dibang Valley": "Dibang",
"Dibang Valley": "Dibang",
"Kra-Daadi": "Kra Daadi",
"Kamrup Metropolitan": "Kamrup",
"Bametara": "Bemetara",
"Koriya": "Korea",
"Gariaband": "Gariyaband",
"Gaurela Pendra Marwahi": "Gaurella Pendra Marwahi",
"Janjgir Champa": "Janjgir-Champa",
"Kabeerdham": "Kabirdham",
"Uttar Bastar Kanker": "Bastar",
"Banaskantha": "Banas Kantha",
"Chhota Udaipur": "Chhotaudepur",
"Dahod": "Dohad",
"Kutch": "Kachchh",
"Mehsana": "Mahesana",
"Panchmahal": "Panch Mahals",
"Sabarkantha": "Sabar Kantha",
"Charkhi Dadri": "Charki Dadri",
"Lahaul and Spiti": "Lahul and Spiti",
"Punch": "Poonch",
"Shopiyan": "Shopian",
"Saraikela-Kharsawan": "Saraikela Kharsawan",
"Davanagere": "Davangere",
"Leh": "Leh Ladakh",
"LEH": "Leh Ladakh",
"Dakshin Bastar Dantewada": "Dantewada",
"Ribhoi": "Ri Bhoi",
"Balasore": "Baleshwar",
"Nabarangapur": "Nabarangpur",
"Viluppuram": "Villupuram",
"Sipahijala": "Sepahijala",
"Unokoti": "Unakoti",
"KARGIL": "Kargil",
"14 Mahindergarh": "Mahendragarh",
"Sahasra": "Saharsa",
"Central Delhi": "Central",
"East Delhi": "East",
"North Delhi": "North",
"North East Delhi": "North East",
"North West Delhi": "North West",
"South Delhi": "South",
"South East Delhi": "South East",
"South West Delhi": "South West",
"West Delhi": "West",
"Jagtial": "Jagitial",
"Jangaon": "Jangoan",
"Komaram Bheem": "Kumuram Bheem Asifabad",
"South Andaman": "South Andamans",
"U.S.Nagar": "Udam Singh Nagar",
}
# Data for districts in India taken from https://lgdirectory.gov.in/
# The following districts were missing a code, so @themonk911 gave them reasonable
# codes based on the name.
# LEPARADA ARUNACHAL PRADESH(State)
# PAKKE KESSANG ARUNACHAL PRADESH(State)
# SHI YOMI ARUNACHAL PRADESH(State)
# Gaurella Pendra Marwahi CHHATTISGARH(State)
# Hnahthial MIZORAM(State)
# KHAWZAWL MIZORAM(State)
# SAITUAL MIZORAM(State)
# CHENGALPATTU TAMIL NADU(State)
# KALLAKURICHI TAMIL NADU(State)
# Ranipet TAMIL NADU(State)
# TENKASI TAMIL NADU(State)
# Tirupathur TAMIL NADU(State)
# Mayiladuthurai TAMIL NADU(State)
# Thoothukkudi was missing from Tamil Nadu so was added.
class Covid19IndiaOrgL2DataSource(DataSource):
""" Add L3 data for India districts. """
def _replace_subregion(self, x):
if x in L3_INDIA_REPLACEMENTS:
return L3_INDIA_REPLACEMENTS[x]
return x
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0]
data = table_rename(
data,
{
"Confirmed": "total_confirmed",
"Deceased": "total_deceased",
"Recovered": "total_recovered",
"Tested": "total_tested",
"Date": "date",
"District": "match_string",
"State": "subregion1_name",
},
drop=True,
)
data.match_string = data.match_string.apply(self._replace_subregion)
# Correct the district Raigarh, Madhya Pradesh to Rajgarh.
# Can't use the existing L3 mechanism for this, since there is a Raigarh in Chattisgarh
# that needs to remain Raigarh.
data.loc[
(data.match_string == "Raigarh") & (data.subregion1_name == "Madhya Pradesh"),
"match_string",
] = "Rajgarh"
data = data[~data.match_string.isin(L3_INDIA_REMOVE_SET)]
data["country_code"] = "IN"
return data
class Covid19IndiaOrgCasesDataSource(DataSource):
def fetch(
self,
output_folder: Path,
cache: Dict[str, str],
fetch_opts: List[Dict[str, Any]],
skip_existing: bool = False,
) -> Dict[str, str]:
output = {}
curr_idx = 1
url_tpl = fetch_opts[0].get("url")
download_options = dict(fetch_opts[0].get("opts", {}), skip_existing=skip_existing)
while True:
try:
url = url_tpl.format(idx=curr_idx)
fname = download_snapshot(url, output_folder, **download_options)
output.update({curr_idx: fname})
curr_idx += 1
except requests.HTTPError:
break
assert len(output) > 0, "No data downloaded"
return output
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
cases = table_rename(
concat(dataframes.values()),
{
# "Patient Number": "",
# "State Patient Number": "",
"Date Announced": "date_new_confirmed",
# "Estimated Onset Date": "",
"Age Bracket": "age",
"Gender": "sex",
# "Detected City": "",
"Detected District": "subregion2_name",
"Detected State": "subregion1_name",
# "State code": "subregion1_code",
"Current Status": "_prognosis",
# "Notes": "",
# "Contracted from which Patient (Suspected)": "",
# "Nationality": "",
# "Type of transmission": "",
"Status Change Date": "_change_date",
# "Source_1": "",
# "Source_2": "",
# "Source_3": "",
# "Backup Notes": "",
"Num Cases": "new_confirmed",
"Entry_ID": "",
},
drop=True,
)
# Convert dates to ISO format
for col in [col for col in cases.columns if "date" in col]:
cases[col] = cases[col].apply(lambda x: datetime_isoformat(x, "%d/%m/%Y"))
cases["age"] = cases["age"].astype(str)
cases["age"] = cases["age"].str.lower()
cases["age"] = cases["age"].str.replace("\.0", "")
cases["age"] = cases["age"].str.replace(r"[\d\.]+ day(s)?", "1")
cases["age"] = cases["age"].str.replace(r"[\d\.]+ month(s)?", "1")
cases.loc[cases["age"].str.contains("-"), "age"] = None
sex_adapter = lambda x: {"M": "male", "F": "female"}.get(x, "sex_unknown")
cases["sex"] = cases["sex"].str.strip()
cases["sex"] = cases["sex"].apply(sex_adapter)
cases["date_new_deceased"] = None
deceased_mask = cases["_prognosis"] == "Deceased"
cases.loc[deceased_mask, "date_new_deceased"] = cases.loc[deceased_mask, "_change_date"]
cases["date_new_hospitalized"] = None
hosp_mask = cases["_prognosis"] == "Hospitalized"
cases.loc[hosp_mask, "date_new_hospitalized"] = cases.loc[hosp_mask, "_change_date"]
data = convert_cases_to_time_series(cases, ["subregion1_name", "subregion2_name"])
data["country_code"] = "IN"
# Aggregate country level and admin level 1
country = aggregate_admin_level(data, ["date", "age", "sex"], "country")
subregion1 = aggregate_admin_level(data, ["date", "age", "sex"], "subregion1")
subregion1 = subregion1[subregion1["subregion1_name"].str.lower() != "state unassigned"]
# Data for admin level 2 is too noisy and there are many mismatches, so we only return
# the aggregated country level and admin level 1 data
return concat([country, subregion1])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
# pylint: disable=invalid-name,unused-argument
import logging
import re
from tvm import _ffi, ir, te, topi
from tvm.target import generic_func, override_native_generic_func
from tvm.topi.utils import get_const_float, get_const_int, get_const_tuple, get_float_tuple
from .. import op as _op
logger = logging.getLogger("strategy")
def naive_schedule(_, outs, target):
"""Return the naive default schedule.
This function acts as a placeholder for op implementations that uses auto-scheduler.
Implemenations using this function should only be used along with auto-scheduler.
"""
if "gpu" in target.keys:
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
logger.debug(
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return te.create_schedule(outs[-1].op)
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def wrap_topi_compute(topi_compute):
"""Wrap TOPI compute which doesn't use attrs"""
def wrapper(attrs, inputs, out_type):
return [topi_compute(*inputs)]
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
"""Get conv2d input channels"""
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
# NCHW[8]c
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
"""Schedule injective ops"""
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
"""Schedule reduction ops"""
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
# concatenate
@generic_func
def schedule_concatenate(attrs, outs, target):
"""Schedule concatenate op"""
with target:
return topi.generic.schedule_injective(outs)
# pool
@generic_func
def schedule_pool(attrs, outs, target):
"""Schedule pooling ops"""
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
# pool_grad
@generic_func
def schedule_pool_grad(attrs, outs, target):
"""Schedule pooling gradient ops"""
with target:
return topi.generic.schedule_pool_grad(outs)
# adaptive pool
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
"""Schedule adaptive pooling ops"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
# softmax
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
"""softmax generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic",
)
return strategy
@override_native_generic_func("fast_softmax_strategy")
def fast_softmax_strategy(attrs, inputs, out_type, target):
"""fast softmax generic strategy"""
# NOTE: This op does not have an optimized manual schedule,
# so it should only be used together with auto-scheduler.
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.fast_softmax),
wrap_topi_schedule(topi.generic.schedule_fast_softmax),
name="fast_softmax.generic",
)
return strategy
@override_native_generic_func("log_softmax_strategy")
def log_softmax_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="log_softmax.generic",
)
return strategy
# lrn
@generic_func
def schedule_lrn(attrs, outs, target):
"""Schedule LRN op"""
with target:
return topi.generic.schedule_lrn(outs)
# bitpack
@generic_func
def schedule_bitpack(attrs, outs, target):
"""Schedule bitpack"""
with target:
return topi.generic.schedule_bitpack(outs)
get_auto_scheduler_rewritten_layout = _ffi.get_global_func(
"relay.attrs.get_auto_scheduler_rewritten_layout"
)
# conv2d
def wrap_compute_conv2d(
topi_compute,
need_data_layout=False,
need_out_layout=False,
has_groups=False,
need_auto_scheduler_layout=False,
):
"""Wrap conv2d topi compute"""
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
"""conv2d generic strategy"""
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic",
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
# conv2d_NCHWc
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""conv2d_NCHWc generic strategy"""
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic",
)
return strategy
# depthwise_conv2d_NCHWc
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""depthwise_conv2d generic strategy"""
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic",
)
return strategy
# conv2d_winograd_without_weight_transform
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
# conv2d_gemm_without_weight_transform
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_gemm_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
# conv2d_winograd_weight_transform
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
# conv2d_winograd_nnpack_weight_transform
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
# conv2d_gemm_weight_transform
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
"""Schedule conv2d_gemm_weight_transform"""
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
# deformable_conv2d
def wrap_compute_deformable_conv2d(topi_compute):
"""wrap deformable_conv2d topi compute"""
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
inputs[2],
strides,
padding,
dilation,
deformable_groups,
groups,
out_dtype,
)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
"""deformable_conv2d generic strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.generic",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout)
return strategy
# conv2d_transpose
def wrap_compute_conv2d_transpose(topi_compute, has_groups=False, add_layout=False):
"""wrap conv2d_transpose topi compute"""
def compute_conv2d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
# out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
args = [inputs[0], inputs[1], strides, padding, out_dtype, output_padding]
if add_layout:
args.append(attrs.data_layout)
if has_groups:
args.append(attrs.groups)
out = topi_compute(*args)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
"""conv2d_transpose generic strategy"""
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
if groups == 1:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
else: # group_conv2d_transpose
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.group_conv2d_transpose_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_transpose_nchw),
name="group_conv2d_transpose_nchw.generic",
)
return strategy
# conv3d_transpose
def wrap_compute_conv3d_transpose(topi_compute):
"""wrap conv3d_transpose topi compute"""
def compute_conv3d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv3d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
output_padding = get_const_tuple(attrs.output_padding)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
"""conv3d_transpose generic strategy"""
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic",
)
return strategy
# conv3d
def wrap_compute_conv3d(topi_compute, need_layout=False, need_auto_scheduler_layout=False):
"""wrap conv3d topi compute"""
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
args = [inputs[0], inputs[1], strides, padding, dilation, groups]
if need_layout:
args.append(layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
return [topi_compute(*args)]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
# conv3d_winograd_without_weight_transform
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
# conv3d_winograd_weight_transform
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv3d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
# conv1d
def wrap_compute_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
"""conv1d generic strategy"""
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
def wrap_compute_group_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_group_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [
topi_compute(inputs[0], inputs[1], strides, padding, dilation, attrs.groups, out_dtype)
]
return _compute_group_conv1d
@override_native_generic_func("group_conv1d_strategy")
def group_conv1d_strategy(attrs, inputs, out_type, target):
"""group_conv1d generic strategy"""
logger.warning("group_conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.group_conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_group_conv1d_ncw),
name="group_conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.group_conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_group_conv1d_nwc),
name="group_conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
# conv1d_transpose
def wrap_compute_conv1d_transpose(topi_compute):
"""wrap conv1d_transpose topi compute"""
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
"""conv1d_transpose generic strategy"""
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic",
)
return strategy
# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
def copy_if_identical(tensor_a, tensor_b):
"""
When two inputs to batch_matul or dense are the same tensor, e.g. batch_matmul(x, x),
compilation fails because TE thinks there is only one input tensor x, and doing
cache_read(x) on the same tensor twice results in an error.
To prevent such errors, we make the second tensor be the copy of the first one
when two input tensors are identical.
"""
if tensor_a == tensor_b:
return te.compute(tensor_a.shape, lambda *ind: tensor_a[ind])
return tensor_b
# matmul
def wrap_compute_matmul(topi_compute, need_auto_scheduler_layout=False):
"""wrap matmul topi compute"""
def _compute_matmul(attrs, inputs, out_type):
"""Compute definition of matmul"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
args = [
inputs[0],
inputs[1],
None,
out_dtype,
attrs.transpose_a,
attrs.transpose_b,
]
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_matmul
@override_native_generic_func("matmul_strategy")
def matmul_strategy(attrs, inputs, out_type, target):
"""matmul generic strategy"""
logger.warning("matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_matmul(topi.nn.matmul),
wrap_topi_schedule(topi.generic.schedule_matmul),
name="matmul.generic",
)
return strategy
# dense
def wrap_compute_dense(topi_compute, need_auto_scheduler_layout=False):
"""wrap dense topi compute"""
def _compute_dense(attrs, inputs, out_type):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
args = [inputs[0], inputs[1], None, out_dtype]
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
"""dense generic strategy"""
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
@override_native_generic_func("dense_pack_strategy")
def dense_pack_strategy(attrs, inputs, out_type, target):
"""dense_pack generic strategy"""
logger.warning("dense_pack is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense_pack),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense_pack.generic",
)
return strategy
# batch_matmul
def wrap_compute_batch_matmul(topi_compute, need_auto_scheduler_layout=False, need_out_dtype=False):
"""wrap batch_matmul topi compute"""
def _compute_batch_matmul(attrs, inputs, out_type):
args = [inputs[0], inputs[1], out_type.shape]
args.append(out_type.dtype if need_out_dtype else None)
args.append(attrs.transpose_a)
args.append(attrs.transpose_b)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
"""batch_matmul generic strategy"""
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic",
)
return strategy
# batch_norm
def wrap_compute_batch_norm(topi_compute):
"""wrap batch_norm topi compute"""
def _compute_batch_norm(attrs, inputs, out_type):
return topi_compute(*inputs, attrs.axis, attrs.epsilon, attrs.center, attrs.scale)
return _compute_batch_norm
@override_native_generic_func("batch_norm_strategy")
def batch_norm_strategy(attrs, inputs, out_type, target):
"""batch_norm generic strategy"""
logger.warning("batch_norm is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_norm(topi.nn.batch_norm),
wrap_topi_schedule(topi.generic.schedule_batch_norm),
name="batch_norm.generic",
)
return strategy
# sparse dense
def wrap_compute_sparse_dense(topi_compute):
"""wrap sparse dense topi compute"""
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
"""sparse dense generic strategy"""
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic",
)
return strategy
@override_native_generic_func("sparse_dense_padded_strategy")
def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
"""sparse dense padded generic strategy"""
raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
# sparse_add
def wrap_compute_sparse_add(topi_compute):
"""wrap sparse add topi compute"""
def _compute_sparse_add(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3])]
return _compute_sparse_add
@override_native_generic_func("sparse_add_strategy")
def sparse_add_strategy(attrs, inputs, out_type, target):
"""sparse add generic strategy"""
logger.warning("sparse add is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_add(topi.nn.sparse_add),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sparse_add.generic",
)
return strategy
# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
"""schedule sparse_transpose"""
with target:
return topi.generic.schedule_sparse_transpose(outs)
# sparse conv2d
def wrap_compute_sparse_conv2d(topi_compute):
"""wrap sparse conv2d topi compute"""
def _compute_sparse_conv2d(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["layout"])]
return _compute_sparse_conv2d
@override_native_generic_func("sparse_conv2d_strategy")
def sparse_conv2d_strategy(attrs, inputs, out_type, target):
"""sparse conv2d generic strategy"""
logger.warning("sparse conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_conv2d(topi.nn.sparse_conv2d),
wrap_topi_schedule(topi.generic.schedule_sparse_conv2d),
name="sparse_conv2d.generic",
)
return strategy
# sort
def wrap_compute_sort(topi_compute):
"""Wrap sort topi compute"""
def _compute_sort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend)]
return _compute_sort
@override_native_generic_func("sort_strategy")
def sort_strategy(attrs, inputs, out_type, target):
"""sort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.sort),
wrap_topi_schedule(topi.generic.schedule_sort),
name="sort.generic",
)
return strategy
# argsort
def wrap_compute_argsort(topi_compute):
"""Wrap argsort topi compute"""
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
"""argsort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic",
)
return strategy
# topk
def wrap_compute_topk(topi_compute):
"""Wrap topk compute"""
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
"""topk generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic",
)
return strategy
# searchsorted
def wrap_compute_searchsorted(topi_compute):
"""Wrap searchsorted compute"""
def _compute_searchsorted(attrs, inputs, out_type):
right = attrs.right
dtype = attrs.dtype
return [topi_compute(inputs[0], inputs[1], right, dtype)]
return _compute_searchsorted
# searchsorted_strategy
@override_native_generic_func("searchsorted_strategy")
def searchsorted_strategy(attrs, inputs, out_type, target):
"""searchsorted generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_searchsorted(topi.searchsorted),
wrap_topi_schedule(topi.generic.schedule_extern),
name="searchsorted.generic",
)
return strategy
# multibox_prior
def wrap_compute_multibox_prior(topi_compute):
"""Wrap multibox_prior compute"""
def _compute_multibox_prior(attrs, inputs, _):
"""Compute definition of multibox_prior"""
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
"""multibox_prior generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic",
)
return strategy
# multibox_transform_loc
def wrap_compute_multibox_transform_loc(topi_compute):
"""Wrap multibox_transform_loc compute"""
def _compute_multibox_transform_loc(attrs, inputs, _):
"""Compute definition of multibox_detection"""
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
"""schedule multibox_transform_loc"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic",
)
return strategy
# get_valid_counts
def wrap_compute_get_valid_counts(topi_compute):
"""wrap get_valid_counts topi compute"""
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = inputs[1]
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
if attrs.score_threshold is not None:
score_threshold = get_const_float(attrs.score_threshold)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
"""get_valid_counts generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic",
)
return strategy
# non-maximum suppression
def wrap_compute_nms(topi_compute):
"""wrap nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
iou_threshold = inputs[4]
return_indices = bool(get_const_int(attrs.return_indices))
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
"""nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic",
)
return strategy
def wrap_compute_all_class_nms(topi_compute):
"""wrap all class nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[2]
iou_threshold = inputs[3]
score_threshold = inputs[4]
output_format = attrs.output_format
return topi_compute(
inputs[0],
inputs[1],
max_output_size,
iou_threshold,
score_threshold,
output_format,
)
return _compute_nms
@override_native_generic_func("all_class_non_max_suppression_strategy")
def all_class_nms_strategy(attrs, inputs, out_type, target):
"""all class nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_all_class_nms(topi.vision.all_class_non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="all_class_nms.generic",
)
return strategy
# roi_align
def wrap_compute_roi_align(topi_compute):
"""wrap roi_align topi compute"""
def _compute_roi_align(attrs, inputs, out_type):
pooled_size = get_const_tuple(attrs.pooled_size)
mode = bytes(attrs.mode, "utf-8")
return [
topi_compute(
inputs[0],
inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio,
mode=mode,
)
]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
"""roi_align generic strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
else:
assert layout == "NHWC", "layout must be NCHW or NHWC."
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nhwc),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
return strategy
# sparse_fill_empty_rows
@override_native_generic_func("sparse_fill_empty_rows_strategy")
def sparse_fill_empty_rows_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_fill_empty_rows(topi.sparse_fill_empty_rows),
wrap_topi_schedule(topi.generic.schedule_sparse_fill_empty_rows),
name="sparse_fill_empty_rows.generic",
)
return strategy
def wrap_compute_sparse_fill_empty_rows(topi_compute):
"""Wrap sparse_fill_empty_rows compute"""
def _compute_sparse_fill_empty_rows(attrs, inputs, output_type):
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
inputs[3],
output_type.fields[0].shape,
output_type.fields[1].shape,
output_type.fields[2].shape,
)
return _compute_sparse_fill_empty_rows
# sparse_reshape
@override_native_generic_func("sparse_reshape_strategy")
def sparse_reshape_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_reshape(topi.sparse_reshape),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sparse_reshape.generic",
)
return strategy
def wrap_compute_sparse_reshape(topi_compute):
"""Wrap sparse_reshape compute"""
def _compute_sparse_reshape(attrs, inputs, output_type):
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
output_type.fields[0].shape,
output_type.fields[1].shape,
)
return _compute_sparse_reshape
# roi_pool
@generic_func
def schedule_roi_pool(attrs, outs, target):
"""schedule roi_pool"""
with target:
return topi.generic.schedule_roi_pool(outs)
# proposal
def wrap_compute_proposal(topi_compute):
"""wrap proposal topi compute"""
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
"""proposal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic",
)
return strategy
# scatter
@override_native_generic_func("scatter_strategy")
def scatter_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter.generic",
)
return strategy
def wrap_compute_scatter(topi_compute):
"""Wrap scatter topi compute"""
def _compute_scatter(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], attrs.axis)]
return _compute_scatter
@override_native_generic_func("scatter_add_strategy")
def scatter_add_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter_add),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter_add.generic",
)
return strategy
# scatter_nd
@override_native_generic_func("scatter_nd_strategy")
def scatter_nd_strategy(attrs, inputs, out_type, target):
"""scatter_nd generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.generic",
)
return strategy
def wrap_compute_scatter_nd(topi_compute):
"""Wrap scatter_nd topi compute"""
def _compute_scatter_nd(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], attrs.mode)]
return _compute_scatter_nd
# bitserial_conv2d
def wrap_compute_bitserial_conv2d(topi_compute):
"""wrap bitserial_conv2d topi compute"""
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0],
inputs[1],
strides,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
)
]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
"""bitserial_conv2d generic strategy"""
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
# bitserial_dense
def wrap_compute_bitserial_dense(topi_compute):
"""wrap bitserial_dense topi compute"""
def compute_bitserial_dense(attrs, inputs, out_type):
"""Compute definition of bitserial dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0], inputs[1], data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
"""bitserial_dense generic strategy"""
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic",
)
return strategy
# correlation
def wrap_compute_correlation(topi_compute):
"""wrap correlation topi compute"""
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [
topi_compute(
inputs[0],
inputs[1],
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
"""correlation generic strategy"""
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic",
)
return strategy
# argwhere
def wrap_compute_argwhere(topi_compute):
"""wrap argwhere topi compute"""
def _compute_argwhere(attrs, inputs, out_type):
output_shape = []
for s in out_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
output_shape.append(te.var("any_dim", "int32"))
new_output_type = ir.TensorType(output_shape, "int32")
return [topi_compute(new_output_type, inputs[0])]
return _compute_argwhere
@override_native_generic_func("argwhere_strategy")
def argwhere_strategy(attrs, inputs, out_type, target):
"""argwhere generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.argwhere),
wrap_topi_schedule(topi.generic.schedule_argwhere),
name="argwhere.generic",
)
return strategy
# threefry_generate
def wrap_compute_threefry_generate(topi_compute):
"""Wrap threefry_generate topi compute"""
def _compute_threefry_generate(attrs, inputs, _):
return topi_compute(inputs[0], attrs.out_shape)
return _compute_threefry_generate
@override_native_generic_func("threefry_generate_strategy")
def threefry_generate_strategy(attrs, inputs, out_type, target):
"""threefry_generate generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_threefry_generate(topi.random.threefry_generate),
wrap_topi_schedule(topi.generic.schedule_extern),
name="threefry_generate.generic",
)
return strategy
# threefry_split
def wrap_compute_threefry_split(topi_compute):
"""Wrap threefry_split topi compute"""
def _compute_threefry_split(attrs, inputs, _):
return topi_compute(inputs[0])
return _compute_threefry_split
@override_native_generic_func("threefry_split_strategy")
def threefry_split_strategy(attrs, inputs, out_type, target):
"""threefry_split generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_threefry_split(topi.random.threefry_split),
wrap_topi_schedule(topi.generic.schedule_extern),
name="threefry_split.generic",
)
return strategy
# uniform
def wrap_compute_uniform(topi_compute):
"""Wrap uniform topi compute"""
def _compute_uniform(attrs, inputs, _):
return list(topi_compute(inputs[0], inputs[1], inputs[2], attrs.out_shape, attrs.out_dtype))
return _compute_uniform
@override_native_generic_func("uniform_strategy")
def uniform_strategy(attrs, inputs, out_type, target):
"""uniform generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_uniform(topi.random.uniform),
wrap_topi_schedule(topi.generic.schedule_extern),
name="uniform.generic",
)
return strategy
# sliding_window
def wrap_compute_sliding_window():
"""Wrap sliding_window topi compute"""
def _compute_sliding_window(attrs, inputs, _):
return [topi.sliding_window(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]
return _compute_sliding_window
@override_native_generic_func("sliding_window_strategy")
def sliding_window_strategy(attrs, inputs, out_type, target):
"""sliding_window generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sliding_window(),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sliding_window.generic",
)
return strategy
@override_native_generic_func("normal_strategy")
def normal_strategy(attrs, inputs, out_type, target):
"""normal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_uniform(topi.random.normal),
wrap_topi_schedule(topi.generic.schedule_extern),
name="normal.generic",
)
return strategy
def wrap_compute_scanop(topi_compute):
"""Wrap scanop style topi compute"""
def _compute_scanop(attrs, inputs, _):
return [topi_compute(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)]
return _compute_scanop
@override_native_generic_func("cumsum_strategy")
def cumsum_strategy(attrs, inputs, out_type, target):
"""cumsum generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cumsum),
wrap_topi_schedule(topi.generic.schedule_extern),
name="cumsum.generic",
)
return strategy
@override_native_generic_func("cumprod_strategy")
def cumprod_strategy(attrs, inputs, out_type, target):
"""cumprod generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cumprod),
wrap_topi_schedule(topi.generic.schedule_extern),
name="cumprod.generic",
)
return strategy
def wrap_compute_unique(topi_compute):
"""Wrap unique topi compute"""
def _compute_unique(attrs, inputs, _):
return topi_compute(inputs[0], attrs.sorted, attrs.return_counts)
return _compute_unique
@override_native_generic_func("unique_strategy")
def unique_strategy(attrs, inputs, out_type, target):
"""unique generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_unique(topi.unique),
wrap_topi_schedule(topi.generic.schedule_unique),
name="unique.generic",
)
return strategy
@generic_func
def schedule_transpose(attrs, outs, target):
"""schedule transpose"""
with target:
return schedule_injective(attrs, outs, target)
# invert_permutation
def wrap_compute_invert_permutation(topi_compute):
"""wrap invert_permutation topi compute"""
def _compute_invert_permutation(attrs, inputs, out_type):
return [topi_compute(inputs[0])]
return _compute_invert_permutation
@override_native_generic_func("invert_permutation_strategy")
def invert_permutation_strategy(attrs, inputs, out_type, target):
"""invert_permutation generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_invert_permutation(topi.invert_permutation),
wrap_topi_schedule(topi.generic.schedule_injective),
name="invert_permutation.generic",
)
return strategy
def wrap_compute_einsum(topi_compute):
"""Wrap einsum topi compute"""
def _compute_einsum(attrs, inputs, _):
return [topi_compute(attrs.equation, *inputs)]
return _compute_einsum
@override_native_generic_func("einsum_strategy")
def einsum_strategy(attrs, inputs, out_type, target):
"""einsum generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_einsum(topi.einsum),
wrap_topi_schedule(topi.generic.schedule_einsum),
name="einsum.generic",
)
return strategy
# conv2d_backward_weight
def wrap_compute_conv2d_backward_weight(topi_compute):
"""wrap conv2d_backward_weight topi compute"""
def _compute_conv2d_backward_weight(attrs, inputs, out_dtype):
kernel_size = get_const_tuple(attrs.kernel_size)
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
out_dtype = attrs.out_dtype
layout = attrs.data_layout
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
kernel_size,
padding,
strides,
dilation,
groups,
layout,
out_dtype,
)
return [out]
return _compute_conv2d_backward_weight
@override_native_generic_func("conv2d_backward_weight_strategy")
def conv2d_backward_weight_strategy(attrs, inputs, out_type, target):
"""wgrad generic strategy"""
raise RuntimeError(
"conv2d_backward_weight is currently only supported with cudnn. "
"Please run Legalize pass to decompose this op into supported ops."
)
|
|
# -*- coding: utf-8 -*-
#
# _to_gssha.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# License BSD 3-Clause
import logging
from datetime import timedelta
from os import mkdir, path, remove, rename
import xarray as xr
from .grid_to_gssha import GRIDtoGSSHA
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# HELPER FUNCTIONS
# ------------------------------------------------------------------------------
def download_era5_for_gssha(main_directory,
start_datetime,
end_datetime,
leftlon=-180,
rightlon=180,
toplat=90,
bottomlat=-90,
precip_only=False):
"""
Function to download ERA5 data for GSSHA
.. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets
Args:
main_directory(:obj:`str`): Location of the output for the forecast data.
start_datetime(:obj:`str`): Datetime for download start.
end_datetime(:obj:`str`): Datetime for download end.
leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180.
rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180.
toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90.
bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90.
precip_only(Optional[bool]): If True, will only download precipitation.
Example::
from gsshapy.grid.era_to_gssha import download_era5_for_gssha
era5_folder = '/era5'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
download_era5_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat)
"""
# parameters: https://software.ecmwf.int/wiki/display/CKB/ERA5_test+data+documentation#ERA5_testdatadocumentation-Parameterlistings
# import here to make sure it is not required to run
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
try:
mkdir(main_directory)
except OSError:
pass
download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat,
leftlon=leftlon,
bottomlat=bottomlat,
rightlon=rightlon)
download_datetime = start_datetime
while download_datetime <= end_datetime:
download_file = path.join(main_directory, "era5_gssha_{0}.nc".format(download_datetime.strftime("%Y%m%d")))
download_date = download_datetime.strftime("%Y-%m-%d")
if not path.exists(download_file) and not precip_only:
server.retrieve({
'dataset': "era5_test",
# 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc.
'stream': "oper",
# We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc)
'type': "an",
# Surface level, as opposed to pressure level (pl) or model level (ml)
'levtype': "sfc",
# For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db
'param': "2t/2d/sp/10u/10v/tcc",
# The spatial resolution in ERA5 is 31 km globally on a Gaussian grid.
# Here we us lat/long with 0.25 degrees, which is approximately the equivalent of 31km.
'grid': "0.25/0.25",
# ERA5 provides hourly analysis
'time': "00/to/23/by/1",
# area: N/W/S/E
'area': download_area,
'date': download_date,
'target': download_file,
'format': 'netcdf',
})
era5_request = {
'dataset': "era5_test",
'stream': "oper",
'type': "fc",
'levtype': "sfc",
'param': "tp/ssrd",
'grid': "0.25/0.25",
'area': download_area,
'format': 'netcdf',
}
prec_download_file = path.join(main_directory, "era5_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d")))
loc_download_file0 = path.join(main_directory, "era5_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d")))
loc_download_file1 = path.join(main_directory, "era5_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d")))
loc_download_file2 = path.join(main_directory, "era5_gssha_{0}_2_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if download_datetime <= start_datetime and not path.exists(loc_download_file0):
loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d")
# precipitation 0000-0600
era5_request['step'] = "6/to/12/by/1"
era5_request['time'] = "18"
era5_request['target'] = loc_download_file0
era5_request['date'] = loc_download_date
server.retrieve(era5_request)
if download_datetime == end_datetime and not path.exists(loc_download_file1):
loc_download_date = download_datetime.strftime("%Y-%m-%d")
# precipitation 0600-1800
era5_request['step'] = "1/to/12/by/1"
era5_request['time'] = "06"
era5_request['target'] = loc_download_file1
era5_request['date'] = loc_download_date
server.retrieve(era5_request)
if download_datetime == end_datetime and not path.exists(loc_download_file2):
loc_download_date = download_datetime.strftime("%Y-%m-%d")
# precipitation 1800-2300
era5_request['step'] = "1/to/5/by/1"
era5_request['time'] = "18"
era5_request['target'] = loc_download_file2
era5_request['date'] = loc_download_date
server.retrieve(era5_request)
if download_datetime < end_datetime and not path.exists(prec_download_file):
# precipitation 0600-0600 (next day)
era5_request['step'] = "1/to/12/by/1"
era5_request['time'] = "06/18"
era5_request['target'] = prec_download_file
era5_request['date'] = download_date
server.retrieve(era5_request)
download_datetime += timedelta(1)
def download_interim_for_gssha(main_directory,
start_datetime,
end_datetime,
leftlon=-180,
rightlon=180,
toplat=90,
bottomlat=-90,
precip_only=False):
"""
Function to download ERA5 data for GSSHA
.. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets
Args:
main_directory(:obj:`str`): Location of the output for the forecast data.
start_datetime(:obj:`str`): Datetime for download start.
end_datetime(:obj:`str`): Datetime for download end.
leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180.
rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180.
toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90.
bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90.
precip_only(Optional[bool]): If True, will only download precipitation.
Example::
from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha
era_interim_folder = '/era_interim'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat)
"""
# parameters: https://software.ecmwf.int/wiki/display/CKB/Details+of+ERA-Interim+parameters
# import here to make sure it is not required to run
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
try:
mkdir(main_directory)
except OSError:
pass
download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat,
leftlon=leftlon,
bottomlat=bottomlat,
rightlon=rightlon)
download_datetime = start_datetime
interim_request = {
'dataset': "interim",
# 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc.
'stream': "oper",
# Surface level, as opposed to pressure level (pl) or model level (ml)
'levtype': "sfc",
# The spatial resolution in ERA interim is 80 km globally on a Gaussian grid.
# Here we us lat/long with 0.75 degrees, which is approximately the equivalent of 80km.
'grid': "0.5/0.5",
'area': download_area,
'format': 'netcdf',
}
while download_datetime <= end_datetime:
interim_request['date'] = download_datetime.strftime("%Y-%m-%d")
if not precip_only:
download_file = path.join(main_directory, "erai_gssha_{0}_an.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
# We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc)
interim_request['type'] = "an"
# For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db
interim_request['param'] = "2t/2d/sp/10u/10v/tcc"
# step 0 is analysis, 3-12 is forecast
interim_request['step'] = "0"
# ERA Interim provides 6-hourly analysis
interim_request['time'] = "00/06/12/18"
interim_request['target'] = download_file
server.retrieve(interim_request)
download_file = path.join(main_directory, "erai_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
interim_request['type'] = "fc"
interim_request['param'] = "2t/2d/sp/10u/10v/tcc"
interim_request['step'] = "3"
interim_request['time'] = "00/06/12/18"
interim_request['target'] = download_file
server.retrieve(interim_request)
download_file = path.join(main_directory, "erai_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if not path.exists(download_file):
interim_request['type'] = "fc"
interim_request['param'] = "tp/ssrd"
interim_request['step'] = "3/6/9/12"
interim_request['time'] = "00/12"
interim_request['target'] = download_file
server.retrieve(interim_request)
# TODO: READ FILE AND MODIFY VALUES SO IT IS NOT INCREMENTAL
# https://software.ecmwf.int/wiki/pages/viewpage.action?pageId=56658233
# You need total precipitation for every 6 hours.
# Daily total precipitation (tp) is only available with a forecast base time 00:00 and 12:00,
# so to get tp for every 6 hours you will need to extract (and for the second and fourth period calculate):
# tp(00-06) = (time 00, step 6)
# tp(06-12) = (time 00, step 12) minus (time 00, step 6)
# tp(12-18) = (time 12, step 6)
# tp(18-24) = (time 12, step 12) minus (time 12, step 6)
# (Note the units for total precipitation is meters.)
tmp_download_file = download_file + '_tmp'
with xr.open_dataset(download_file) as xd:
diff_xd = xd.diff('time')
xd.tp[1:4] = diff_xd.tp[:3]
xd.tp[5:] = diff_xd.tp[4:]
xd.ssrd[1:4] = diff_xd.ssrd[:3]
xd.ssrd[5:] = diff_xd.ssrd[4:]
xd.to_netcdf(tmp_download_file)
remove(download_file)
rename(tmp_download_file, download_file)
download_file = path.join(main_directory, "erai_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d")))
if download_datetime <= start_datetime and not path.exists(download_file):
loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d")
interim_request['type'] = "fc"
interim_request['param'] = "tp/ssrd"
interim_request['step'] = "9/12"
interim_request['time'] = "12"
interim_request['target'] = download_file
interim_request['date'] = loc_download_date
server.retrieve(interim_request)
# convert to incremental (see above)
tmp_download_file = download_file + '_tmp'
with xr.open_dataset(download_file) as xd:
inc_xd = xd.diff('time')
inc_xd.to_netcdf(tmp_download_file)
remove(download_file)
rename(tmp_download_file, download_file)
download_datetime += timedelta(1)
# ------------------------------------------------------------------------------
# MAIN CLASS
# ------------------------------------------------------------------------------
class ERAtoGSSHA(GRIDtoGSSHA):
"""This class converts the ERA5 or ERA Interim output data to GSSHA formatted input.
This class inherits from class:`GRIDtoGSSHA`.
.. note:: https://software.ecmwf.int/wiki/display/CKB/How+to+download+ERA5+test+data+via+the+ECMWF+Web+API
Attributes:
gssha_project_folder(:obj:`str`): Path to the GSSHA project folder
gssha_project_file_name(:obj:`str`): Name of the GSSHA elevation grid file.
lsm_input_folder_path(:obj:`str`): Path to the input folder for the LSM files.
lsm_search_card(:obj:`str`): Glob search pattern for LSM files. Ex. "*.grib2".
lsm_lat_var(Optional[:obj:`str`]): Name of the latitude variable in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_var(Optional[:obj:`str`]): Name of the longitude variable in the LSM netCDF files. Defaults to 'lon'.
lsm_time_var(Optional[:obj:`str`]): Name of the time variable in the LSM netCDF files. Defaults to 'time'.
lsm_lat_dim(Optional[:obj:`str`]): Name of the latitude dimension in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_dim(Optional[:obj:`str`]): Name of the longitude dimension in the LSM netCDF files. Defaults to 'lon'.
lsm_time_dim(Optional[:obj:`str`]): Name of the time dimension in the LSM netCDF files. Defaults to 'time'.
output_timezone(Optional[:obj:`tzinfo`]): This is the timezone to output the dates for the data. Default is he GSSHA model timezone. This option does NOT currently work for NetCDF output.
download_start_datetime(Optional[:obj:`datetime.datetime`]): Datetime to start download.
download_end_datetime(Optional[:obj:`datetime.datetime`]): Datetime to end download.
era_download_data(Optional[:obj:`str`]): You can choose 'era5' or 'interim'. Defaults to 'era5'.
Example::
from datetime import datetime
from gsshapy.grid import ERA5toGSSHA
e2g = ERA5toGSSHA(gssha_project_folder='E:\\GSSHA',
gssha_project_file_name='gssha.prj',
lsm_input_folder_path='E:\\GSSHA\\era5-data',
lsm_search_card="*.grib",
#download_start_datetime=datetime(2016,1,2),
#download_end_datetime=datetime(2016,1,4),
)
out_gage_file = 'E:\\GSSHA\\era5_rain1.gag
e2g.lsm_precip_to_gssha_precip_gage(out_gage_file,
lsm_data_var="tp",
precip_type="GAGES")
data_var_map_array = [
['precipitation_inc', 'tp'],
['pressure', 'sp'],
['relative_humidity_dew', ['d2m','t2m']],
['wind_speed', ['u10', 'v10']],
['direct_radiation', 'aluvp'],
['diffusive_radiation', 'aluvd'],
['temperature', 't2m'],
['cloud_cover', 'tcc'],
]
e2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
def __init__(self,
gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card="*.nc",
lsm_lat_var='latitude',
lsm_lon_var='longitude',
lsm_time_var='time',
lsm_lat_dim='latitude',
lsm_lon_dim='longitude',
lsm_time_dim='time',
output_timezone=None,
download_start_datetime=None,
download_end_datetime=None,
era_download_data='era5',
):
"""
Initializer function for the HRRRtoGSSHA class
"""
self.download_start_datetime = download_start_datetime
self.download_end_datetime = download_end_datetime
if era_download_data.lower() not in ('era5', 'interim'):
raise ValueError("Invalid option for era_download_data. "
"Only 'era5' or 'interim' are supported")
self.era_download_data = era_download_data.lower()
super(ERAtoGSSHA, self).__init__(gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card,
lsm_lat_var,
lsm_lon_var,
lsm_time_var,
lsm_lat_dim,
lsm_lon_dim,
lsm_time_dim,
output_timezone)
def _download(self):
"""download ERA5 data for GSSHA domain"""
# reproject GSSHA grid and get bounds
min_x, max_x, min_y, max_y = self.gssha_grid.bounds(as_geographic=True)
if self.era_download_data == 'era5':
log.info("Downloading ERA5 data ...")
download_era5_for_gssha(self.lsm_input_folder_path,
self.download_start_datetime,
self.download_end_datetime,
leftlon=min_x-0.5,
rightlon=max_x+0.5,
toplat=max_y+0.5,
bottomlat=min_y-0.5)
else:
log.info("Downloading ERA Interim data ...")
download_interim_for_gssha(self.lsm_input_folder_path,
self.download_start_datetime,
self.download_end_datetime,
leftlon=min_x-1,
rightlon=max_x+1,
toplat=max_y+1,
bottomlat=min_y-1)
@property
def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
# download files if the user requests
if None not in (self.download_start_datetime, self.download_end_datetime):
self._download()
self._xd = super(ERAtoGSSHA, self).xd
self._xd.lsm.lon_to_180 = True
return self._xd
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None):
"""
This function loads data from LSM and converts to GSSHA format
"""
super(ERAtoGSSHA, self).\
_load_converted_gssha_data_from_lsm(gssha_var, lsm_var, load_type, time_step)
self.data.lsm.lon_to_180 = True
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaauser_vpntrafficpolicy_binding(base_resource) :
""" Binding class showing the vpntrafficpolicy that can be bound to aaauser.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._username = ""
self.___count = 0
@property
def priority(self) :
"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policy(self) :
"""The policy Name.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The policy Name.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def username(self) :
"""User account to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
"""User account to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaauser_vpntrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaauser_vpntrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.username) :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = aaauser_vpntrafficpolicy_binding()
updateresource.username = resource.username
updateresource.policy = resource.policy
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [aaauser_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].username = resource[i].username
updateresources[i].policy = resource[i].policy
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = aaauser_vpntrafficpolicy_binding()
deleteresource.username = resource.username
deleteresource.policy = resource.policy
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [aaauser_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].username = resource[i].username
deleteresources[i].policy = resource[i].policy
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, username) :
""" Use this API to fetch aaauser_vpntrafficpolicy_binding resources.
"""
try :
obj = aaauser_vpntrafficpolicy_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
""" Use this API to fetch filtered set of aaauser_vpntrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_vpntrafficpolicy_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
""" Use this API to count aaauser_vpntrafficpolicy_binding resources configued on NetScaler.
"""
try :
obj = aaauser_vpntrafficpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
""" Use this API to count the filtered set of aaauser_vpntrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_vpntrafficpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class aaauser_vpntrafficpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.aaauser_vpntrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaauser_vpntrafficpolicy_binding = [aaauser_vpntrafficpolicy_binding() for _ in range(length)]
|
|
"""
Data helper classes for constructing Transfer API documents. All classes should
extend ``dict``, so they can be passed seemlesly to
:class:`TransferClient <globus_sdk.TransferClient>` methods without
conversion.
"""
from __future__ import unicode_literals
import logging
from globus_sdk.base import safe_stringify
logger = logging.getLogger(__name__)
class TransferData(dict):
"""
Convenience class for constructing a transfer document, to use as the
`data` parameter to
:meth:`submit_transfer <globus_sdk.TransferClient.submit_transfer>`.
At least one item must be added using
:meth:`add_item <globus_sdk.TransferData.add_item>`.
For compatibility with older code and those knowledgeable about the API
sync_level can be ``0``, ``1``, ``2``, or ``3``, but it can also be
``"exists"``, ``"size"``, ``"mtime"``, or ``"checksum"`` if you want
greater clarity in client code.
If ``submission_id`` isn't passed, one will be fetched automatically. The
submission ID can be pulled out of here to inspect, but the document
can be used as-is multiple times over to retry a potential submission
failure (so there shouldn't be any need to inspect it).
See the
:meth:`submit_transfer <globus_sdk.TransferClient.submit_transfer>`
documentation for example usage.
"""
def __init__(self, transfer_client, source_endpoint, destination_endpoint,
label=None, submission_id=None, sync_level=None,
verify_checksum=False, preserve_timestamp=False,
encrypt_data=False, deadline=None,
recursive_symlinks="ignore", **kwargs):
source_endpoint = safe_stringify(source_endpoint)
destination_endpoint = safe_stringify(destination_endpoint)
logger.info("Creating a new TransferData object")
self["DATA_TYPE"] = "transfer"
self["submission_id"] = submission_id or \
transfer_client.get_submission_id()["value"]
logger.info("TransferData.submission_id = {}"
.format(self["submission_id"]))
self["source_endpoint"] = source_endpoint
logger.info("TransferData.source_endpoint = {}"
.format(source_endpoint))
self["destination_endpoint"] = destination_endpoint
logger.info("TransferData.destination_endpoint = {}"
.format(destination_endpoint))
self["verify_checksum"] = verify_checksum
logger.info("TransferData.verify_checksum = {}"
.format(verify_checksum))
self["preserve_timestamp"] = preserve_timestamp
logger.info("TransferData.preserve_timestamp = {}"
.format(preserve_timestamp))
self["encrypt_data"] = encrypt_data
logger.info("TransferData.encrypt_data = {}"
.format(encrypt_data))
self["recursive_symlinks"] = recursive_symlinks
logger.info("TransferData.recursive_symlinks = {}"
.format(recursive_symlinks))
if label is not None:
self["label"] = label
logger.debug("TransferData.label = {}".format(label))
if deadline is not None:
self["deadline"] = str(deadline)
logger.debug("TransferData.deadline = {}".format(deadline))
# map the sync_level (if it's a nice string) to one of the known int
# values
# you can get away with specifying an invalid sync level -- the API
# will just reject you with an error. This is kind of important: if
# more levels are added in the future this method doesn't become
# garbage overnight
if sync_level is not None:
sync_dict = {"exists": 0, "size": 1, "mtime": 2, "checksum": 3}
self['sync_level'] = sync_dict.get(sync_level, sync_level)
logger.info("TransferData.sync_level = {} ({})"
.format(self['sync_level'], sync_level))
self["DATA"] = []
self.update(kwargs)
for option, value in kwargs.items():
logger.info("TransferData.{} = {} (option passed in via kwargs)"
.format(option, value))
def add_item(self, source_path, destination_path, recursive=False):
"""
Add a file or directory to be transfered. If the item is a symlink
to a file or directory, the file or directory at the target of
the symlink will be transfered.
Appends a transfer_item document to the DATA key of the transfer
document.
"""
source_path = safe_stringify(source_path)
destination_path = safe_stringify(destination_path)
item_data = {
"DATA_TYPE": "transfer_item",
"source_path": source_path,
"destination_path": destination_path,
"recursive": recursive,
}
logger.debug('TransferData[{}, {}].add_item: "{}"->"{}"'
.format(self["source_endpoint"],
self["destination_endpoint"],
source_path, destination_path))
self["DATA"].append(item_data)
def add_symlink_item(self, source_path, destination_path):
"""
Add a symlink to be transfered as a symlink rather than as the
target of the symlink.
Appends a transfer_symlink_item document to the DATA key of the
transfer document.
"""
source_path = safe_stringify(source_path)
destination_path = safe_stringify(destination_path)
item_data = {
"DATA_TYPE": "transfer_symlink_item",
"source_path": source_path,
"destination_path": destination_path,
}
logger.debug('TransferData[{}, {}].add_symlink_item: "{}"->"{}"'
.format(self["source_endpoint"],
self["destination_endpoint"],
source_path, destination_path))
self["DATA"].append(item_data)
class DeleteData(dict):
"""
Convenience class for constructing a delete document, to use as the
`data` parameter to
:meth:`submit_delete <globus_sdk.TransferClient.submit_delete>`.
At least one item must be added using
:meth:`add_item <globus_sdk.DeleteData.add_item>`.
If ``submission_id`` isn't passed, one will be fetched automatically. The
submission ID can be pulled out of here to inspect, but the document
can be used as-is multiple times over to retry a potential submission
failure (so there shouldn't be any need to inspect it).
See the :meth:`submit_delete <globus_sdk.TransferClient.submit_delete>`
documentation for example usage.
"""
def __init__(self, transfer_client, endpoint, label=None,
submission_id=None, recursive=False, deadline=None, **kwargs):
endpoint = safe_stringify(endpoint)
logger.info("Creating a new DeleteData object")
self["DATA_TYPE"] = "delete"
self["submission_id"] = submission_id or \
transfer_client.get_submission_id()["value"]
logger.info("DeleteData.submission_id = {}"
.format(self["submission_id"]))
self["endpoint"] = endpoint
logger.info("DeleteData.endpoint = {}"
.format(endpoint))
self["recursive"] = recursive
logger.info("DeleteData.recursive = {}"
.format(recursive))
if label is not None:
self["label"] = label
logger.debug("DeleteData.label = {}".format(label))
if deadline is not None:
self["deadline"] = str(deadline)
logger.debug("DeleteData.deadline = {}".format(deadline))
self["DATA"] = []
self.update(kwargs)
for option, value in kwargs.items():
logger.info("DeleteData.{} = {} (option passed in via kwargs)"
.format(option, value))
def add_item(self, path):
"""
Add a file or directory or symlink to be deleted. If any of the paths
are directories, ``recursive`` must be set True on the top level
``DeleteData``. Symlinks will never be followed, only deleted.
Appends a delete_item document to the DATA key of the delete
document.
"""
path = safe_stringify(path)
item_data = {
"DATA_TYPE": "delete_item",
"path": path,
}
logger.debug('DeleteData[{}].add_item: "{}"'
.format(self["endpoint"], path))
self["DATA"].append(item_data)
|
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adaptive.py."""
from absl.testing import parameterized
import numpy as np
import scipy.stats
import tensorflow.compat.v2 as tf
from robust_loss import adaptive
from robust_loss import util
from robust_loss import wavelet
tf.enable_v2_behavior()
def generate_pixel_toy_image_data(image_width, num_samples, _):
"""Generates pixel data for _test_fitting_toy_image_data_is_correct().
Constructs a "mean" image in RGB pixel space (parametrized by `image_width`)
and draws `num_samples` samples from a normal distribution using that mean,
and returns those samples and their empirical mean as reference.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
_: Dummy argument so that this function's interface matches
generate_wavelet_toy_image_data()
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` of size
(`image_width`, `image_width`, 3).
color_space = 'RGB'
representation = 'PIXEL'
"""
color_space = 'RGB'
representation = 'PIXEL'
mu = np.random.uniform(size=(image_width, image_width, 3))
samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
reference = np.mean(samples, 0)
return samples, reference, color_space, representation
def generate_wavelet_toy_image_data(image_width, num_samples,
wavelet_num_levels):
"""Generates wavelet data for testFittingImageDataIsCorrect().
Constructs a "mean" image in the YUV wavelet domain (parametrized by
`image_width`, and `wavelet_num_levels`) and draws `num_samples` samples
from a normal distribution using that mean, and returns RGB images
corresponding to those samples and to the mean (computed in the
specified latent space) of those samples.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
wavelet_num_levels: The number of levels in the wavelet decompositions of
the generated images.
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` (computed in YUV Wavelet space
but returned as an RGB image) of size (`image_width`, `image_width`, 3).
color_space = 'YUV'
representation = 'CDF9/7'
"""
color_space = 'YUV'
representation = 'CDF9/7'
samples = []
reference = []
for level in range(wavelet_num_levels):
samples.append([])
reference.append([])
w = image_width // 2**(level + 1)
scaling = 2**level
for _ in range(3):
# Construct the ground-truth pixel band mean.
mu = scaling * np.random.uniform(size=(3, w, w))
# Draw samples from the ground-truth mean.
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
# Take the empirical mean of the samples as a reference.
band_reference = np.mean(band_samples, 0)
samples[-1].append(np.reshape(band_samples, [-1, w, w]))
reference[-1].append(band_reference)
# Handle the residual band.
mu = scaling * np.random.uniform(size=(3, w, w))
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
band_reference = np.mean(band_samples, 0)
samples.append(np.reshape(band_samples, [-1, w, w]))
reference.append(band_reference)
# Collapse and reshape wavelets to be ({_,} width, height, 3).
samples = wavelet.collapse(samples, representation)
reference = wavelet.collapse(reference, representation)
samples = tf.transpose(
tf.reshape(samples, [num_samples, 3, image_width, image_width]),
perm=[0, 2, 3, 1])
reference = tf.transpose(reference, perm=[1, 2, 0])
# Convert into RGB space.
samples = util.syuv_to_rgb(samples).numpy()
reference = util.syuv_to_rgb(reference).numpy()
return samples, reference, color_space, representation
def sample_cauchy_ppf(num_samples):
"""Draws ``num_samples'' samples from a Cauchy distribution.
Because actual sampling is expensive and requires many samples to converge,
here we sample by drawing `num_samples` evenly-spaced values in [0, 1]
and then interpolate into the inverse CDF (aka PPF) of a Cauchy
distribution. This produces "samples" where maximum-likelihood estimation
likely recovers the true distribution even if `num_samples` is small.
Args:
num_samples: The number of samples to draw.
Returns:
A numpy array containing `num_samples` evenly-spaced "samples" from a
zero-mean Cauchy distribution whose scale matches our distribution/loss
when our scale = 1.
"""
spacing = 1. / num_samples
p = np.arange(0., 1., spacing) + spacing / 2.
return scipy.stats.cauchy(0., np.sqrt(2.)).ppf(p)
def sample_normal_ppf(num_samples):
"""Draws ``num_samples'' samples from a Normal distribution.
Because actual sampling is expensive and requires many samples to converge,
here we sample by drawing `num_samples` evenly-spaced values in [0, 1]
and then interpolate into the inverse CDF (aka PPF) of a Normal
distribution. This produces "samples" where maximum-likelihood estimation
likely recovers the true distribution even if `num_samples` is small.
Args:
num_samples: The number of samples to draw.
Returns:
A numpy array containing `num_samples` evenly-spaced "samples" from a
zero-mean unit-scale Normal distribution.
"""
spacing = 1. / num_samples
p = np.arange(0., 1., spacing) + spacing / 2.
return scipy.stats.norm(0., 1.).ppf(p)
def sample_nd_mixed_data(n, m, float_dtype):
"""`n` Samples from `m` scaled+shifted Cauchy and Normal distributions."""
samples0 = sample_cauchy_ppf(n)
samples2 = sample_normal_ppf(n)
mu = np.random.normal(size=m)
alpha = (np.random.uniform(size=m) > 0.5) * 2
scale = np.exp(np.clip(np.random.normal(size=m), -3., 3.))
samples = (
np.tile(samples0[:, np.newaxis], [1, m]) *
(alpha[np.newaxis, :] == 0.) + np.tile(samples2[:, np.newaxis], [1, m]) *
(alpha[np.newaxis, :] == 2.)) * scale[np.newaxis, :] + mu[np.newaxis, :]
return [float_dtype(x) for x in [samples, mu, alpha, scale]]
class AdaptiveTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AdaptiveTest, self).setUp()
np.random.seed(0)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
"""Tests that `alpha` and `scale` are initialized as expected."""
for i in range(8):
# Generate random ranges for alpha and scale.
alpha_lo = float_dtype(np.random.uniform())
alpha_hi = float_dtype(np.random.uniform() + 1.)
# Half of the time pick a random initialization for alpha, the other half
# use the default value.
if i % 2 == 0:
alpha_init = float_dtype(alpha_lo + np.random.uniform() *
(alpha_hi - alpha_lo))
true_alpha_init = alpha_init
else:
alpha_init = None
true_alpha_init = (alpha_lo + alpha_hi) / 2.
scale_init = float_dtype(np.random.uniform() + 0.5)
scale_lo = float_dtype(np.random.uniform() * 0.1)
adaptive_lossfun = adaptive.AdaptiveLossFunction(
10,
float_dtype,
alpha_lo=alpha_lo,
alpha_hi=alpha_hi,
alpha_init=alpha_init,
scale_lo=scale_lo,
scale_init=scale_init)
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
self.assertAllClose(alpha, true_alpha_init * np.ones_like(alpha))
self.assertAllClose(scale, scale_init * np.ones_like(alpha))
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFixedAlphaAndScaleAreCorrect(self, float_dtype):
"""Tests that fixed alphas and scales do not change during optimization)."""
for _ in range(8):
alpha_lo = float_dtype(np.random.uniform() * 2.)
alpha_hi = alpha_lo
scale_init = float_dtype(np.random.uniform() + 0.5)
scale_lo = scale_init
samples = float_dtype(np.random.uniform(size=(10, 10)))
# We must construct some variable for TF to attempt to optimize.
mu = tf.Variable(
tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
adaptive_lossfun = adaptive.AdaptiveLossFunction(
mu.shape[0],
float_dtype,
alpha_lo=alpha_lo,
alpha_hi=alpha_hi,
scale_lo=scale_lo,
scale_init=scale_init)
trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]
optimizer = tf.keras.optimizers.SGD(learning_rate=1000)
# pylint: disable=cell-var-from-loop
optimizer.minimize(
lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
alpha_init = (alpha_lo + alpha_hi) / 2.
self.assertAllClose(alpha, alpha_init * np.ones_like(alpha))
self.assertAllClose(scale, scale_init * np.ones_like(alpha))
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFittingToyNdMixedDataIsCorrect(self, float_dtype):
"""Tests that minimizing the adaptive loss recovers the true model.
Here we generate a 2D array of samples drawn from a mix of scaled and
shifted Cauchy and Normal distributions. We then minimize our loss with
respect to the mean, scale, and shape of each distribution, and check that
after minimization the shape parameter is near-zero for the Cauchy data and
near 2 for the Normal data, and that the estimated means and scales are
accurate.
Args:
float_dtype: The type (np.float32 or np.float64) of data to test.
"""
samples, mu_true, alpha_true, scale_true = sample_nd_mixed_data(
100, 8, float_dtype)
mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
adaptive_lossfun = adaptive.AdaptiveLossFunction(mu.shape[0], float_dtype)
trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.1
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
alpha = adaptive_lossfun.alpha()[0, :].numpy()
scale = adaptive_lossfun.scale()[0, :].numpy()
for a, b in [(alpha, alpha_true), (scale, scale_true), (mu, mu_true)]:
self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testFittingToyNdMixedDataIsCorrectStudentsT(self, float_dtype):
"""Tests that minimizing the Student's T loss recovers the true model.
Here we generate a 2D array of samples drawn from a mix of scaled and
shifted Cauchy and Normal distributions. We then minimize our loss with
respect to the mean, scale, and shape of each distribution, and check that
after minimization the log-df parameter is near-zero for the Cauchy data and
very large for the Normal data, and that the estimated means and scales are
accurate.
Args:
float_dtype: The type (np.float32 or np.float64) of data to test.
"""
samples, mu_true, alpha_true, scale_true = sample_nd_mixed_data(
100, 8, float_dtype)
mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
students_lossfun = adaptive.StudentsTLossFunction(mu.shape[0], float_dtype)
trainable_variables = list(students_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.1
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(students_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
df = students_lossfun.df()[0, :].numpy()
scale = students_lossfun.scale()[0, :].numpy()
for ldf, a_true in zip(np.log(df), alpha_true):
if a_true == 0:
self.assertAllClose(ldf, 0., rtol=0.1, atol=0.1)
elif a_true == 2:
self.assertAllGreater(ldf, 4)
scale /= np.sqrt(2. - (alpha_true / 2.))
for a, b in [(scale, scale_true), (mu, mu_true)]:
self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testLossfunPreservesDtype(self, float_dtype):
"""Checks the loss's outputs have the same precisions as its input."""
num_dims = 8
samples, _, _, _ = sample_nd_mixed_data(100, num_dims, float_dtype)
lossfun = adaptive.AdaptiveLossFunction(num_dims, float_dtype)
loss = lossfun(samples)
self.assertDTypeEqual(loss, float_dtype)
self.assertDTypeEqual(lossfun.alpha(), float_dtype)
self.assertDTypeEqual(lossfun.scale(), float_dtype)
@parameterized.named_parameters(('Single', np.float32),
('Double', np.float64))
def testImageLossfunPreservesDtype(self, float_dtype):
"""Tests that the image lossfun's outputs precisions match its input."""
x = float_dtype(np.random.uniform(size=(10, 64, 64, 3)))
lossfun = adaptive.AdaptiveImageLossFunction(x.shape[1:], float_dtype)
loss = lossfun(x).numpy()
alpha = lossfun.alpha().numpy()
scale = lossfun.scale().numpy()
self.assertDTypeEqual(loss, float_dtype)
self.assertDTypeEqual(alpha, float_dtype)
self.assertDTypeEqual(scale, float_dtype)
@parameterized.named_parameters(('Wavelet', generate_wavelet_toy_image_data),
('Pixel', generate_pixel_toy_image_data))
def testFittingImageDataIsCorrect(self, image_data_callback):
"""Tests that minimizing the adaptive image loss recovers the true model.
Here we generate a stack of color images drawn from a normal distribution,
and then minimize image_lossfun() with respect to the mean and scale of each
distribution, and check that after minimization the estimated means are
close to the true means.
Args:
image_data_callback: The function used to generate the training data and
parameters used during optimization.
"""
# Generate toy data.
image_width = 4
num_samples = 10
wavelet_num_levels = 2 # Ignored by generate_pixel_toy_image_data().
(samples, reference, color_space,
representation) = image_data_callback(image_width, num_samples,
wavelet_num_levels)
# Construct the loss.
mu = tf.Variable(tf.zeros(tf.shape(reference), samples.dtype))
image_lossfun = adaptive.AdaptiveImageLossFunction(
[image_width, image_width, 3],
samples.dtype,
color_space=color_space,
representation=representation,
wavelet_num_levels=wavelet_num_levels,
alpha_lo=2,
alpha_hi=2)
trainable_variables = list(image_lossfun.trainable_variables) + [mu]
init_rate = 1.
final_rate = 0.01
num_iters = 201
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)
for _ in range(num_iters):
optimizer.minimize(
lambda: tf.reduce_mean(image_lossfun(samples - mu[tf.newaxis, :])),
trainable_variables)
mu = mu.numpy()
self.assertAllClose(mu, reference, rtol=0.01, atol=0.01)
def testLossfunChecksShape(self):
"""Tests that the image lossfun's checks input shapes."""
x1 = np.ones((10, 24), np.float32)
x2 = np.ones((10, 16), np.float32)
lossfun = adaptive.AdaptiveLossFunction(x1.shape[1], np.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
lossfun(x2)
def testImageLossfunChecksShape(self):
"""Tests that the image lossfun's checks input shapes."""
x1 = np.ones((10, 16, 24, 3), np.float32)
x2 = np.ones((10, 16, 16, 3), np.float32)
lossfun = adaptive.AdaptiveImageLossFunction(x1.shape[1:], np.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
lossfun(x2)
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2014, Huawei, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import context
from neutron.common import exceptions
import eventlet
from keystoneclient.v2_0 import client as kc
from keystoneclient.v3 import client as kc_v3
from oslo.config import cfg
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
logger = logging.getLogger(
'neutron.plugins.cascading_proxy_agent.keystoneclient')
class KeystoneClient(object):
"""
Wrap keystone client so we can encapsulate logic used in resources
Note this is intended to be initialized from a resource on a per-session
basis, so the session context is passed in on initialization
Also note that a copy of this is created every resource as self.keystone()
via the code in engine/client.py, so there should not be any need to
directly instantiate instances of this class inside resources themselves
"""
def __init__(self, context):
# We have to maintain two clients authenticated with keystone:
# - ec2 interface is v2.0 only
# - trusts is v3 only
# If a trust_id is specified in the context, we immediately
# authenticate so we can populate the context with a trust token
# otherwise, we delay client authentication until needed to avoid
# unnecessary calls to keystone.
#
# Note that when you obtain a token using a trust, it cannot be
# used to reauthenticate and get another token, so we have to
# get a new trust-token even if context.auth_token is set.
#
# - context.auth_url is expected to contain the v2.0 keystone endpoint
self.context = context
self._client_v2 = None
self._client_v3 = None
if self.context.trust_id:
# Create a connection to the v2 API, with the trust_id, this
# populates self.context.auth_token with a trust-scoped token
self._client_v2 = self._v2_client_init()
@property
def client_v3(self):
if not self._client_v3:
# Create connection to v3 API
self._client_v3 = self._v3_client_init()
return self._client_v3
@property
def client_v2(self):
if not self._client_v2:
self._client_v2 = self._v2_client_init()
return self._client_v2
def _v2_client_init(self):
kwargs = {
'auth_url': self.context.auth_url,
'insecure': self.context.insecure
}
auth_kwargs = {}
# Note try trust_id first, as we can't reuse auth_token in that case
if self.context.trust_id is not None:
# We got a trust_id, so we use the admin credentials
# to authenticate, then re-scope the token to the
# trust impersonating the trustor user.
# Note that this currently requires the trustor tenant_id
# to be passed to the authenticate(), unlike the v3 call
kwargs.update(self._service_admin_creds(api_version=2))
auth_kwargs['trust_id'] = self.context.trust_id
auth_kwargs['tenant_id'] = self.context.tenant_id
elif self.context.auth_token is not None:
kwargs['tenant_name'] = self.context.tenant
kwargs['token'] = self.context.auth_token
elif self.context.password is not None:
kwargs['username'] = self.context.username
kwargs['password'] = self.context.password
kwargs['tenant_name'] = self.context.tenant
kwargs['tenant_id'] = self.context.tenant_id
else:
logger.error("Keystone v2 API connection failed, no password or "
"auth_token!")
raise exceptions.NotAuthorized()
client_v2 = kc.Client(**kwargs)
client_v2.authenticate(**auth_kwargs)
# If we are authenticating with a trust auth_kwargs are set, so set
# the context auth_token with the re-scoped trust token
if auth_kwargs:
# Sanity check
if not client_v2.auth_ref.trust_scoped:
logger.error("v2 trust token re-scoping failed!")
raise exceptions.NotAuthorized()
# All OK so update the context with the token
self.context.auth_token = client_v2.auth_ref.auth_token
self.context.auth_url = kwargs.get('auth_url')
return client_v2
@staticmethod
def _service_admin_creds(api_version=2):
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystoneclient.middleware.auth_token')
creds = {
'username': cfg.CONF.keystone_authtoken.admin_user,
'password': cfg.CONF.keystone_authtoken.admin_password,
}
if api_version >= 3:
creds['auth_url'] =\
cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3')
creds['project_name'] =\
cfg.CONF.keystone_authtoken.admin_tenant_name
else:
creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri
creds['tenant_name'] =\
cfg.CONF.keystone_authtoken.admin_tenant_name
return creds
def _v3_client_init(self):
kwargs = {
'insecure': self.context.insecure
}
if self.context.auth_token is not None:
kwargs['project_name'] = self.context.tenant
kwargs['token'] = self.context.auth_token
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
kwargs['endpoint'] = kwargs['auth_url']
elif self.context.trust_id is not None:
# We got a trust_id, so we use the admin credentials and get a
# Token back impersonating the trustor user
kwargs.update(self._service_admin_creds(api_version=3))
kwargs['trust_id'] = self.context.trust_id
elif self.context.password is not None:
kwargs['username'] = self.context.username
kwargs['password'] = self.context.password
kwargs['project_name'] = self.context.tenant
kwargs['project_id'] = self.context.tenant_id
kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3')
kwargs['endpoint'] = kwargs['auth_url']
else:
logger.error("Keystone v3 API connection failed, no password or "
"auth_token!")
raise exceptions.NotAuthorized()
client = kc_v3.Client(**kwargs)
# Have to explicitly authenticate() or client.auth_ref is None
client.authenticate()
return client
def create_trust_context(self):
"""
If cfg.CONF.deferred_auth_method is trusts, we create a
trust using the trustor identity in the current context, with the
trustee as the heat service user and return a context containing
the new trust_id
If deferred_auth_method != trusts, or the current context already
contains a trust_id, we do nothing and return the current context
"""
if self.context.trust_id:
return self.context
# We need the service admin user ID (not name), as the trustor user
# can't lookup the ID in keystoneclient unless they're admin
# workaround this by creating a temporary admin client connection
# then getting the user ID from the auth_ref
admin_creds = self._service_admin_creds()
admin_client = kc.Client(**admin_creds)
trustee_user_id = admin_client.auth_ref.user_id
trustor_user_id = self.client_v3.auth_ref.user_id
trustor_project_id = self.client_v3.auth_ref.project_id
roles = cfg.CONF.trusts_delegated_roles
trust = self.client_v3.trusts.create(trustor_user=trustor_user_id,
trustee_user=trustee_user_id,
project=trustor_project_id,
impersonation=True,
role_names=roles)
trust_context = context.RequestContext.from_dict(
self.context.to_dict())
trust_context.trust_id = trust.id
trust_context.trustor_user_id = trustor_user_id
return trust_context
def delete_trust(self, trust_id):
"""
Delete the specified trust.
"""
self.client_v3.trusts.delete(trust_id)
def create_stack_user(self, username, password=''):
"""
Create a user defined as part of a stack, either via template
or created internally by a resource. This user will be added to
the heat_stack_user_role as defined in the config
Returns the keystone ID of the resulting user
"""
if(len(username) > 64):
logger.warning("Truncating the username %s to the last 64 "
"characters." % username)
# get the last 64 characters of the username
username = username[-64:]
user = self.client_v2.users.create(username,
password,
'%[email protected]' %
username,
tenant_id=self.context.tenant_id,
enabled=True)
# We add the new user to a special keystone role
# This role is designed to allow easier differentiation of the
# heat-generated "stack users" which will generally have credentials
# deployed on an instance (hence are implicitly untrusted)
roles = self.client_v2.roles.list()
stack_user_role = [r.id for r in roles
if r.name == cfg.CONF.heat_stack_user_role]
if len(stack_user_role) == 1:
role_id = stack_user_role[0]
logger.debug("Adding user %s to role %s" % (user.id, role_id))
self.client_v2.roles.add_user_role(user.id, role_id,
self.context.tenant_id)
else:
logger.error("Failed to add user %s to role %s, check role exists!"
% (username, cfg.CONF.heat_stack_user_role))
return user.id
def delete_stack_user(self, user_id):
user = self.client_v2.users.get(user_id)
# FIXME (shardy) : need to test, do we still need this retry logic?
# Copied from user.py, but seems like something we really shouldn't
# need to do, no bug reference in the original comment (below)...
# tempory hack to work around an openstack bug.
# seems you can't delete a user first time - you have to try
# a couple of times - go figure!
tmo = eventlet.Timeout(10)
status = 'WAITING'
reason = 'Timed out trying to delete user'
try:
while status == 'WAITING':
try:
user.delete()
status = 'DELETED'
except Exception as ce:
reason = str(ce)
logger.warning("Problem deleting user %s: %s" %
(user_id, reason))
eventlet.sleep(1)
except eventlet.Timeout as t:
if t is not tmo:
# not my timeout
raise
else:
status = 'TIMEDOUT'
finally:
tmo.cancel()
if status != 'DELETED':
raise exception.Error(reason)
def delete_ec2_keypair(self, user_id, accesskey):
self.client_v2.ec2.delete(user_id, accesskey)
def get_ec2_keypair(self, user_id):
# We make the assumption that each user will only have one
# ec2 keypair, it's not clear if AWS allow multiple AccessKey resources
# to be associated with a single User resource, but for simplicity
# we assume that here for now
cred = self.client_v2.ec2.list(user_id)
if len(cred) == 0:
return self.client_v2.ec2.create(user_id, self.context.tenant_id)
if len(cred) == 1:
return cred[0]
else:
logger.error("Unexpected number of ec2 credentials %s for %s" %
(len(cred), user_id))
def disable_stack_user(self, user_id):
# FIXME : This won't work with the v3 keystone API
self.client_v2.users.update_enabled(user_id, False)
def enable_stack_user(self, user_id):
# FIXME : This won't work with the v3 keystone API
self.client_v2.users.update_enabled(user_id, True)
def url_for(self, **kwargs):
return self.client_v2.service_catalog.url_for(**kwargs)
@property
def auth_token(self):
return self.client_v2.auth_token
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of the `Sequential` model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Sequential', 'keras.Sequential')
class Sequential(Model):
"""Linear stack of layers.
Arguments:
layers: list of layers to add to the model.
Example:
```python
# Optionally, the first layer can receive an `input_shape` argument:
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
# Afterwards, we do automatic shape inference:
model.add(Dense(32))
# This is identical to the following:
model = Sequential()
model.add(Dense(32, input_dim=500))
# And to the following:
model = Sequential()
model.add(Dense(32, batch_input_shape=(None, 500)))
# Note that you can also omit the `input_shape` argument:
# In that case the model gets built the first time you call `fit` (or other
# training and evaluation methods).
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.compile(optimizer=optimizer, loss=loss)
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
# Note that when using this delayed-build pattern (no input shape specified),
# the model doesn't have any weights until the first call
# to a training/evaluation method (since it isn't yet built):
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.weights # returns []
# Whereas if you specify the input shape, the model gets built continuously
# as you are adding layers:
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(32))
model.weights # returns list of length 4
When using the delayed-build pattern (no input shape specified), you can
choose to manually build your model by calling `build(batch_input_shape)`:
model = Sequential()
model.add(Dense(32))
model.add(Dense(32))
model.build((None, 500))
model.weights # returns list of length 4
```
"""
def __init__(self, layers=None, name=None):
super(Sequential, self).__init__(name=name)
# Add to the model any layers passed to the constructor.
if layers:
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
if self._layers and isinstance(self._layers[0], InputLayer):
return self._layers[1:]
return self._layers
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
if not isinstance(layer, base_layer.Layer):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
self.built = False
if not self._layers:
set_inputs = False
# First layer in model: check that it is an input layer.
if not isinstance(layer, InputLayer):
# Create an input tensor and call `layer` on the input tensor.
# First, we need to infer the expected input shape and dtype.
first_layer = layer
if isinstance(layer, (Model, Sequential)):
# We were passed a model as first layer.
# This requires a specific way to figure out the
# input shape and dtype.
if not layer.layers:
raise ValueError('Cannot add an empty model '
'to a `Sequential` model.')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
first_layer = layer.layers[0]
while isinstance(first_layer, (Model, Sequential)):
first_layer = first_layer.layers[0]
if hasattr(first_layer, '_batch_input_shape'):
batch_shape = first_layer._batch_input_shape
dtype = first_layer.dtype
# Instantiate the input layer.
x = Input(
batch_shape=batch_shape,
dtype=dtype,
name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
else:
# The layer doesn't know about its expected shape. We will have to
# build the model lazily on `fit`/etc.
batch_shape = None
else:
# Corner case where the user passes an InputLayer layer via `add`.
assert len(layer._inbound_nodes[-1].output_tensors) == 1
set_inputs = True
if set_inputs:
if len(layer._inbound_nodes[-1].output_tensors) != 1:
raise ValueError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [layer._inbound_nodes[-1].output_tensors[0]]
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
elif self.outputs:
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model '
'should have a single output tensor. '
'For multi-output layers, '
'use the functional API.')
self.outputs = [output_tensor]
if self.inputs:
self.build()
else:
self._layers.append(layer)
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
self._layers.pop()
self.built = False
if not self.layers:
self.outputs = None
self.inputs = None
elif self.outputs:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self.build()
def build(self, input_shape=None):
if input_shape and not self.inputs:
batch_shape = tuple(input_shape)
dtype = K.floatx()
x = Input(
batch_shape=batch_shape, dtype=dtype, name=self.name + '_input')
self.inputs = [x]
for layer in self._layers:
x = layer(x)
self.outputs = [x]
# Make sure that the model's input shape will be preserved during
# serialization.
if self._layers:
self._layers[0]._batch_input_shape = batch_shape
if self.inputs:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
if self._layers:
self._track_layers(self._layers)
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
config = []
for layer in self.layers:
config.append({
'class_name': layer.__class__.__name__,
'config': layer.get_config()
})
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
model = cls()
for conf in config:
layer = layer_module.deserialize(conf, custom_objects=custom_objects)
model.add(layer)
return model
|
|
# -*- coding: utf-8 -*-
import copy
import json
import sys
import traceback
from django.conf import settings
from django.core.files.storage import default_storage as storage
import mock
from nose.plugins.attrib import attr
from nose.tools import eq_
from pyquery import PyQuery as pq
import waffle
import amo
import amo.tests
from addons.models import Addon
from amo.tests import assert_no_validation_errors
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from applications.models import AppVersion, Application
from constants.applications import FIREFOX
from devhub.tasks import compatibility_check
from devhub.views import make_validation_result
from files.helpers import copyfileobj
from files.models import File, FileUpload, FileValidation
from files.tests.test_models import UploadTest as BaseUploadTest
from files.utils import parse_addon
from users.models import UserProfile
from zadmin.models import ValidationResult
class TestUploadValidation(BaseUploadTest):
fixtures = ['base/apps', 'base/users',
'devhub/invalid-id-uploaded-xpi.json']
def setUp(self):
super(TestUploadValidation, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
def test_no_html_in_messages(self):
upload = FileUpload.objects.get(name='invalid-id-20101206.xpi')
resp = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
eq_(resp.status_code, 200)
data = json.loads(resp.content)
msg = data['validation']['messages'][1]
eq_(msg['message'], 'The value of <em:id> is invalid.')
eq_(sorted(msg['context']),
[[u'<foo/>'], u'<em:description>...'])
def test_date_on_upload(self):
upload = FileUpload.objects.get(name='invalid-id-20101206.xpi')
resp = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid]))
eq_(resp.status_code, 200)
doc = pq(resp.content)
eq_(doc('td').text(), 'December 6, 2010')
class TestUploadErrors(BaseUploadTest):
fixtures = ('base/apps', 'base/addon_3615', 'base/users')
def setUp(self):
super(TestUploadErrors, self).setUp()
self.client.login(username='[email protected]',
password='password')
@mock.patch.object(waffle, 'flag_is_active')
def test_dupe_uuid(self, flag_is_active):
flag_is_active.return_value = True
addon = Addon.objects.get(pk=3615)
d = parse_addon(self.get_upload('extension.xpi'))
addon.update(guid=d['guid'])
dupe_xpi = self.get_upload('extension.xpi')
res = self.client.get(reverse('devhub.upload_detail',
args=[dupe_xpi.uuid, 'json']))
eq_(res.status_code, 400, res.content)
data = json.loads(res.content)
eq_(data['validation']['messages'],
[{'tier': 1, 'message': 'Duplicate UUID found.',
'type': 'error', 'fatal': True}])
eq_(data['validation']['ending_tier'], 1)
class TestFileValidation(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/platforms',
'devhub/addon-validation-1']
def setUp(self):
assert self.client.login(username='[email protected]', password='password')
self.user = UserProfile.objects.get(email='[email protected]')
self.file_validation = FileValidation.objects.get(pk=1)
self.file = self.file_validation.file
with storage.open(self.file.file_path, 'w') as f:
f.write('<pretend this is an xpi>\n')
self.addon = self.file.version.addon
args = [self.addon.slug, self.file.id]
self.url = reverse('devhub.file_validation', args=args)
self.json_url = reverse('devhub.json_file_validation', args=args)
def test_version_list(self):
r = self.client.get(self.addon.get_dev_url('versions'))
eq_(r.status_code, 200)
a = pq(r.content)('td.file-validation a')
eq_(a.text(), '0 errors, 0 warnings')
eq_(a.attr('href'), self.url)
def test_results_page(self):
r = self.client.get(self.url, follow=True)
eq_(r.status_code, 200)
eq_(r.context['addon'], self.addon)
doc = pq(r.content)
assert not doc('#site-nav').hasClass('app-nav'), (
'Expected add-ons devhub nav')
eq_(doc('header h2').text(),
u'Validation Results for searchaddon11102010-20101217.xml')
eq_(doc('#addon-validator-suite').attr('data-validateurl'),
self.json_url)
def test_only_dev_can_see_results(self):
self.client.logout()
assert self.client.login(username='[email protected]',
password='password')
eq_(self.client.head(self.url, follow=True).status_code, 403)
def test_only_dev_can_see_json_results(self):
self.client.logout()
assert self.client.login(username='[email protected]',
password='password')
eq_(self.client.head(self.json_url, follow=True).status_code, 403)
def test_editor_can_see_results(self):
self.client.logout()
assert self.client.login(username='[email protected]',
password='password')
eq_(self.client.head(self.url, follow=True).status_code, 200)
def test_editor_can_see_json_results(self):
self.client.logout()
assert self.client.login(username='[email protected]',
password='password')
eq_(self.client.head(self.json_url, follow=True).status_code, 200)
def test_no_html_in_messages(self):
r = self.client.post(self.json_url, follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
msg = data['validation']['messages'][0]
eq_(msg['message'], 'The value of <em:id> is invalid.')
eq_(sorted(msg['context']),
[[u'<foo/>'], u'<em:description>...'])
@mock.patch('files.models.File.has_been_validated')
def test_json_results_post(self, has_been_validated):
has_been_validated.__nonzero__.return_value = False
eq_(self.client.post(self.json_url).status_code, 200)
has_been_validated.__nonzero__.return_value = True
eq_(self.client.post(self.json_url).status_code, 200)
@mock.patch('files.models.File.has_been_validated')
def test_json_results_get(self, has_been_validated):
has_been_validated.__nonzero__.return_value = True
eq_(self.client.get(self.json_url).status_code, 200)
has_been_validated.__nonzero__.return_value = False
eq_(self.client.get(self.json_url).status_code, 405)
class TestValidateAddon(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestValidateAddon, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
def test_login_required(self):
self.client.logout()
r = self.client.get(reverse('devhub.validate_addon'))
eq_(r.status_code, 302)
def test_context(self):
r = self.client.get(reverse('devhub.validate_addon'))
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#upload-addon').attr('data-upload-url'),
reverse('devhub.standalone_upload'))
class TestValidateFile(BaseUploadTest):
fixtures = ['base/apps', 'base/users', 'base/addon_3615',
'devhub/addon-file-100456', 'base/platforms']
def setUp(self):
super(TestValidateFile, self).setUp()
assert self.client.login(username='[email protected]', password='password')
self.user = UserProfile.objects.get(email='[email protected]')
self.file = File.objects.get(pk=100456)
# Move the file into place as if it were a real file
with storage.open(self.file.file_path, 'w') as dest:
copyfileobj(open(self.file_path('invalid-id-20101206.xpi')),
dest)
self.addon = self.file.version.addon
def tearDown(self):
super(TestValidateFile, self).tearDown()
if storage.exists(self.file.file_path):
storage.delete(self.file.file_path)
@attr('validator')
def test_lazy_validate(self):
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
assert_no_validation_errors(data)
msg = data['validation']['messages'][0]
eq_(msg['message'], 'The value of <em:id> is invalid.')
def test_time(self):
r = self.client.post(reverse('devhub.file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
doc = pq(r.content)
assert doc('time').text()
@mock.patch.object(settings, 'EXPOSE_VALIDATOR_TRACEBACKS', False)
@mock.patch('devhub.tasks.run_validator')
def test_validator_errors(self, v):
v.side_effect = ValueError('catastrophic failure in amo-validator')
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['validation'], '')
eq_(data['error'].strip(),
'ValueError: catastrophic failure in amo-validator')
@mock.patch('devhub.tasks.run_validator')
def test_validator_sets_binary_flag_for_extensions(self, v):
v.return_value = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"contains_binary_extension": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes"
}
})
eq_(self.addon.binary, False)
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
assert_no_validation_errors(data)
addon = Addon.objects.get(pk=self.addon.id)
eq_(addon.binary, True)
@mock.patch('validator.validate.validate')
def test_validator_sets_binary_flag_for_extensions(self, v):
self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
assert not v.call_args[1].get('compat_test', True)
@mock.patch('devhub.tasks.run_validator')
def test_ending_tier_is_preserved(self, v):
v.return_value = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"ending_tier": 5,
"metadata": {
"contains_binary_extension": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes"
}
})
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['validation']['ending_tier'], 5)
@mock.patch('devhub.tasks.run_validator')
def test_validator_sets_binary_flag_for_content(self, v):
v.return_value = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"contains_binary_content": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes"
}
})
eq_(self.addon.binary, False)
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
assert_no_validation_errors(data)
addon = Addon.objects.get(pk=self.addon.id)
eq_(addon.binary, True)
@mock.patch('devhub.tasks.run_validator')
def test_linkify_validation_messages(self, v):
v.return_value = json.dumps({
"errors": 0,
"success": True,
"warnings": 1,
"notices": 0,
"message_tree": {},
"messages": [{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some warning",
"type": "warning",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
}],
"metadata": {}
})
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
assert_no_validation_errors(data)
doc = pq(data['validation']['messages'][0]['description'][0])
eq_(doc('a').text(), 'https://bugzilla.mozilla.org/')
@mock.patch.object(settings, 'VALIDATOR_MESSAGE_LIMIT', 10)
def test_limit_validator_warnings(self):
data = {
"error": None,
"validation": {
"errors": 0,
"success": True,
"warnings": 500,
"notices": 0,
"message_tree": {},
"messages": [{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some warning",
"type": "warning",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
}] * 12,
"metadata": {}
}
}
make_validation_result(data)
eq_(len(data['validation']['messages']), 11)
assert 'truncated' in data['validation']['messages'][-1]['message']
eq_(data['validation']['messages'][-1]['type'], 'warning')
@mock.patch.object(settings, 'VALIDATOR_MESSAGE_LIMIT', 10)
def test_limit_validator_compat_errors(self):
orig_data = {
"error": None,
"validation": {
"errors": 0,
"success": True,
"warnings": 100,
"notices": 0,
"message_tree": {},
"compatibility_summary": {"errors": 100, "warnings": 0, "notices": 0},
"messages": [{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some warning",
"type": "warning",
"compatibility_type": "warning",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
},
{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some error",
"type": "warning",
"compatibility_type": "warning",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
}] * 50,
"metadata": {}
}
}
data = copy.deepcopy(orig_data)
make_validation_result(data)
eq_(len(data['validation']['messages']), 11)
assert 'truncated' in data['validation']['messages'][-1]['message']
eq_(data['validation']['messages'][-1]['type'], 'warning')
data = copy.deepcopy(orig_data)
make_validation_result(data, is_compatibility=True)
eq_(len(data['validation']['messages']), 11)
assert 'truncated' in data['validation']['messages'][-1]['message']
eq_(data['validation']['messages'][-1]['type'], 'error')
@mock.patch.object(settings, 'VALIDATOR_MESSAGE_LIMIT', 10)
def test_limit_validator_errors(self):
data = {
"error": None,
"validation": {
"errors": 100,
"success": True,
"warnings": 100,
"notices": 0,
"message_tree": {},
"messages": [{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some warning",
"type": "warning",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
},
{
"context": ["<code>", None],
"description": [
"Something something, see https://bugzilla.mozilla.org/"],
"column": 0,
"line": 1,
"file": "chrome/content/down.html",
"tier": 2,
"message": "Some error",
"type": "error",
"id": [],
"uid": "bb9948b604b111e09dfdc42c0301fe38"
}] * 50,
"metadata": {}
}
}
make_validation_result(data)
eq_(len(data['validation']['messages']), 11)
assert 'truncated' in data['validation']['messages'][-1]['message']
eq_(data['validation']['messages'][-1]['type'], 'error')
@mock.patch.object(settings, 'EXPOSE_VALIDATOR_TRACEBACKS', False)
@mock.patch('devhub.tasks.run_validator')
def test_hide_validation_traceback(self, run_validator):
run_validator.side_effect = RuntimeError('simulated task error')
r = self.client.post(reverse('devhub.json_file_validation',
args=[self.addon.slug, self.file.id]),
follow=True)
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(data['validation'], '')
eq_(data['error'], 'RuntimeError: simulated task error')
@mock.patch.object(waffle, 'flag_is_active')
@mock.patch('devhub.tasks.run_validator')
def test_rdf_parse_errors_are_ignored(self, run_validator,
flag_is_active):
run_validator.return_value = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {}
})
flag_is_active.return_value = True
addon = Addon.objects.get(pk=3615)
xpi = self.get_upload('extension.xpi')
d = parse_addon(xpi.path)
# Set up a duplicate upload:
addon.update(guid=d['guid'])
res = self.client.get(reverse('devhub.validate_addon'))
doc = pq(res.content)
upload_url = doc('#upload-addon').attr('data-upload-url')
with storage.open(xpi.path, 'rb') as f:
# Simulate JS file upload
res = self.client.post(upload_url, {'upload': f}, follow=True)
data = json.loads(res.content)
# Simulate JS result polling:
res = self.client.get(data['url'])
data = json.loads(res.content)
# Make sure we don't see a dupe UUID error:
eq_(data['validation']['messages'], [])
# Simulate JS result polling on detail page:
res = self.client.get(data['full_report_url'], follow=True)
res = self.client.get(res.context['validate_url'], follow=True)
data = json.loads(res.content)
# Again, make sure we don't see a dupe UUID error:
eq_(data['validation']['messages'], [])
@mock.patch('devhub.tasks.run_validator')
def test_compatibility_check(self, run_validator):
run_validator.return_value = json.dumps({
'errors': 0,
'success': True,
'warnings': 0,
'notices': 0,
'message_tree': {},
'messages': [],
'metadata': {}
})
xpi = self.get_upload('extension.xpi')
AppVersion.objects.create(
application=Application.objects.get(guid=FIREFOX.guid),
version='10.0.*')
compatibility_check(xpi, FIREFOX.guid, '10.0.*')
eq_(run_validator.call_args[1]['compat'], True)
class TestCompatibilityResults(amo.tests.TestCase):
fixtures = ['base/users', 'devhub/addon-compat-results']
def setUp(self):
super(TestCompatibilityResults, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.addon = Addon.objects.get(slug='addon-compat-results')
self.result = ValidationResult.objects.get(
file__version__addon=self.addon)
self.job = self.result.validation_job
def validate(self, expected_status=200):
r = self.client.post(reverse('devhub.json_bulk_compat_result',
args=[self.addon.slug, self.result.id]),
follow=True)
eq_(r.status_code, expected_status)
return json.loads(r.content)
def test_login_protected(self):
self.client.logout()
r = self.client.get(reverse('devhub.bulk_compat_result',
args=[self.addon.slug, self.result.id]))
eq_(r.status_code, 302)
r = self.client.post(reverse('devhub.json_bulk_compat_result',
args=[self.addon.slug, self.result.id]))
eq_(r.status_code, 302)
def test_target_version(self):
r = self.client.get(reverse('devhub.bulk_compat_result',
args=[self.addon.slug, self.result.id]))
eq_(r.status_code, 200)
doc = pq(r.content)
ver = json.loads(doc('.results').attr('data-target-version'))
assert amo.FIREFOX.guid in ver, ('Unexpected: %s' % ver)
eq_(ver[amo.FIREFOX.guid], self.job.target_version.version)
def test_app_trans(self):
r = self.client.get(reverse('devhub.bulk_compat_result',
args=[self.addon.slug, self.result.id]))
eq_(r.status_code, 200)
doc = pq(r.content)
trans = json.loads(doc('.results').attr('data-app-trans'))
for app in amo.APPS.values():
eq_(trans[app.guid], app.pretty)
def test_app_version_change_links(self):
r = self.client.get(reverse('devhub.bulk_compat_result',
args=[self.addon.slug, self.result.id]))
eq_(r.status_code, 200)
doc = pq(r.content)
trans = json.loads(doc('.results').attr('data-version-change-links'))
eq_(trans['%s 4.0.*' % amo.FIREFOX.guid],
'https://developer.mozilla.org/en/Firefox_4_for_developers')
def test_validation_success(self):
data = self.validate()
eq_(data['validation']['messages'][3]['for_appversions'],
{'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}': ['4.0b3']})
def test_time(self):
r = self.client.post(reverse('devhub.bulk_compat_result',
args=[self.addon.slug, self.result.id]),
follow=True)
eq_(r.status_code, 200)
doc = pq(r.content)
assert doc('time').text()
eq_(doc('table tr td:eq(1)').text(), 'Firefox 4.0.*')
@mock.patch.object(settings, 'EXPOSE_VALIDATOR_TRACEBACKS', True)
def test_validation_error(self):
try:
raise RuntimeError('simulated task error')
except:
error = ''.join(traceback.format_exception(*sys.exc_info()))
self.result.update(validation='', task_error=error)
data = self.validate()
eq_(data['validation'], '')
eq_(data['error'], error)
@mock.patch.object(settings, 'EXPOSE_VALIDATOR_TRACEBACKS', False)
def test_hide_validation_traceback(self):
try:
raise RuntimeError('simulated task error')
except:
error = ''.join(traceback.format_exception(*sys.exc_info()))
self.result.update(validation='', task_error=error)
data = self.validate()
eq_(data['validation'], '')
eq_(data['error'], 'RuntimeError: simulated task error')
class TestUploadCompatCheck(BaseUploadTest):
fixtures = ['base/apps', 'base/appversions', 'base/addon_3615']
compatibility_result = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"compatibility_summary": {"notices": 0,
"errors": 0,
"warnings": 1},
"message_tree": {},
"messages": [],
"metadata": {}
})
def setUp(self):
super(TestUploadCompatCheck, self).setUp()
assert self.client.login(username='[email protected]', password='password')
self.app = Application.objects.get(pk=amo.FIREFOX.id)
self.appver = AppVersion.objects.get(application=self.app,
version='3.7a1pre')
self.upload_url = reverse('devhub.standalone_upload')
def poll_upload_status_url(self, upload_uuid):
return reverse('devhub.standalone_upload_detail', args=[upload_uuid])
def fake_xpi(self, filename=None):
"""Any useless file that has a name property (for Django)."""
if not filename:
return open(get_image_path('non-animated.gif'), 'rb')
return storage.open(filename, 'rb')
def upload(self, filename=None):
with self.fake_xpi(filename=filename) as f:
# Simulate how JS posts data w/ app/version from the form.
res = self.client.post(self.upload_url,
{'upload': f,
'app_id': self.app.pk,
'version_id': self.appver.pk},
follow=True)
return json.loads(res.content)
def test_compat_form(self):
res = self.client.get(reverse('devhub.check_addon_compatibility'))
eq_(res.status_code, 200)
doc = pq(res.content)
options = doc('#id_application option')
expected = [(str(a.id), unicode(a.pretty)) for a in amo.APP_USAGE]
for idx, element in enumerate(options):
e = pq(element)
val, text = expected[idx]
eq_(e.val(), val)
eq_(e.text(), text)
eq_(doc('#upload-addon').attr('data-upload-url'), self.upload_url)
# TODO(Kumar) actually check the form here after bug 671587
@mock.patch('devhub.tasks.run_validator')
def test_js_upload_validates_compatibility(self, run_validator):
run_validator.return_value = '' # Empty to simulate unfinished task.
data = self.upload()
kw = run_validator.call_args[1]
eq_(kw['for_appversions'], {self.app.guid: [self.appver.version]})
eq_(kw['overrides'],
{'targetapp_minVersion': {self.app.guid: self.appver.version},
'targetapp_maxVersion': {self.app.guid: self.appver.version}})
eq_(data['url'], self.poll_upload_status_url(data['upload']))
@mock.patch('devhub.tasks.run_validator')
def test_js_poll_upload_status(self, run_validator):
run_validator.return_value = self.compatibility_result
data = self.upload()
url = self.poll_upload_status_url(data['upload'])
res = self.client.get(url)
data = json.loads(res.content)
if data['validation'] and data['validation']['messages']:
raise AssertionError('Unexpected validation errors: %s'
% data['validation']['messages'])
@mock.patch('devhub.tasks.run_validator')
def test_compat_result_report(self, run_validator):
run_validator.return_value = self.compatibility_result
data = self.upload()
poll_url = self.poll_upload_status_url(data['upload'])
res = self.client.get(poll_url)
data = json.loads(res.content)
res = self.client.get(data['full_report_url'])
eq_(res.status_code, 200)
eq_(res.context['result_type'], 'compat')
doc = pq(res.content)
# Shows app/version on the results page.
eq_(doc('table tr td:eq(0)').text(), 'Firefox 3.7a1pre')
eq_(res.context['validate_url'], poll_url)
def test_compat_application_versions(self):
res = self.client.get(reverse('devhub.check_addon_compatibility'))
eq_(res.status_code, 200)
doc = pq(res.content)
data = {'application_id': amo.FIREFOX.id,
'csrfmiddlewaretoken':
doc('input[name=csrfmiddlewaretoken]').val()}
r = self.client.post(doc('#id_application').attr('data-url'),
data)
eq_(r.status_code, 200)
data = json.loads(r.content)
empty = True
for id, ver in data['choices']:
empty = False
eq_(AppVersion.objects.get(pk=id).version, ver)
assert not empty, "Unexpected: %r" % data
@mock.patch.object(waffle, 'flag_is_active')
@mock.patch('devhub.tasks.run_validator')
def test_rdf_parse_errors_are_ignored(self, run_validator,
flag_is_active):
run_validator.return_value = self.compatibility_result
flag_is_active.return_value = True
addon = Addon.objects.get(pk=3615)
dupe_xpi = self.get_upload('extension.xpi')
d = parse_addon(dupe_xpi)
# Set up a duplicate upload:
addon.update(guid=d['guid'])
data = self.upload(filename=dupe_xpi.path)
# Make sure we don't see a dupe UUID error:
eq_(data['validation']['messages'], [])
@mock.patch('devhub.tasks.run_validator')
def test_compat_summary_overrides(self, run_validator):
run_validator.return_value = json.dumps({
"success": True,
"errors": 0,
"warnings": 0,
"notices": 0,
"compatibility_summary": {"notices": 1,
"errors": 2,
"warnings": 3},
"message_tree": {},
"messages": [],
"metadata": {}
})
data = self.upload()
eq_(data['validation']['notices'], 1)
eq_(data['validation']['errors'], 2)
eq_(data['validation']['warnings'], 3)
res = self.client.get(self.poll_upload_status_url(data['upload']))
data = json.loads(res.content)
eq_(data['validation']['notices'], 1)
eq_(data['validation']['errors'], 2)
eq_(data['validation']['warnings'], 3)
@mock.patch('devhub.tasks.run_validator')
def test_compat_error_type_override(self, run_validator):
run_validator.return_value = json.dumps({
"success": True,
"errors": 0,
"warnings": 0,
"notices": 0,
"compatibility_summary": {"notices": 0,
"errors": 1,
"warnings": 0},
"message_tree": {},
"messages": [{"type": "warning",
"compatibility_type": "error",
"tier": 1},
{"type": "warning",
"compatibility_type": None,
"tier": 1}],
"metadata": {}
})
data = self.upload()
eq_(data['validation']['messages'][0]['type'], 'error')
eq_(data['validation']['messages'][1]['type'], 'warning')
|
|
from django.db import models
from jamjar.base.models import BaseModel
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.conf import settings
from datetime import datetime, timedelta
from math import log
from lilo import Lilo
import logging, uuid, os, shutil
import logging; logger = logging.getLogger(__name__)
# These are needed for the "hotness" score)
epoch = datetime(1970, 1, 1)
def epoch_seconds(date):
td = date - epoch
return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)
class VideoQuerySet(models.query.QuerySet):
def is_public(self):
return self.filter(is_private=False)
def is_uploaded(self):
return self.filter(uploaded=True)
class PublicVideoManager(models.Manager):
def get_queryset(self):
return VideoQuerySet(self.model, using=self._db).is_public().is_uploaded()
def for_user(self, user):
if hasattr(user, 'blocks'):
excluded = user.blocks.all().values_list('blocked_user_id', flat=True)
return self.exclude(user_id__in=excluded)
else:
return self
class Video(BaseModel):
user = models.ForeignKey('users.User', related_name='videos')
name = models.CharField(max_length=128)
uploaded = models.BooleanField(default=False)
concert = models.ForeignKey('concerts.Concert', related_name='videos')
uuid = models.UUIDField(default=uuid.uuid4,editable=False)
length = models.FloatField(null=True)
original_filename = models.CharField(max_length=256,null=True)
file_size = models.FloatField(null=True)
is_private = models.BooleanField(default=False)
is_cycle = models.BooleanField(default=False)
views = models.IntegerField(default=0)
artists = models.ManyToManyField('artists.Artist', related_name='videos',blank=True)
width = models.IntegerField(default=0)
height = models.IntegerField(default=0)
recorded_at = models.DateTimeField(null=True)
objects = PublicVideoManager()
all_objects = models.Manager()
class Meta:
ordering = ['-created_at',]
def get_video_dir(self):
" Get the local directory for the video (and other temp files) "
return '{:}/{:}'.format(settings.TMP_VIDEOS_PATH, self.uuid)
def get_video_filepath(self, extension, filename="video"):
" Get the local filepath for a file related to this video "
full_filename = '{:}.{:}'.format(filename, extension)
return os.path.join(self.get_video_dir(), full_filename)
def tmp_src(self):
return os.path.join(self.get_video_dir(), self.original_filename)
def hls_src(self):
return self.make_s3_path('video','m3u8') if self.uploaded else None
def web_src(self):
return self.make_s3_path('video','mp4') if self.uploaded else None
def thumb_src(self):
if self.uploaded:
thumbs = {}
for size in settings.THUMBNAIL_SIZES:
filename = 'thumb-{}'.format(size)
thumbs[size] = self.make_s3_path(filename,'jpg')
return thumbs
else:
return None
def do_upload(self, input_fh):
video_path = self.tmp_src()
logger.info("Writing uploaded file to {:}".format(video_path))
with open(video_path, 'wb+') as output_fh:
# Split file into chunks to handle upload
for chunk in input_fh.chunks():
output_fh.write(chunk)
# Check to make sure that this video hasn't been uploaded already
lilo = Lilo(settings.LILO_CONFIG, video_path, self.id)
already_fingerprinted = lilo.check_if_fingerprinted()
if already_fingerprinted:
logger.warn('Video re-upload attempted by user {} - Video id: {}'.format(self.user_id,self.id))
raise Exception('This video has already been uploaded.')
return video_path
def make_s3_path(self, filename, extension):
return settings.S3_URL.format(self.uuid, filename, extension)
def process_upload(self, input_fh):
"""
Get the local directory where the video will temporarily live until
uploaded to S3 and create it if it doesn't exist (it shouldn't)
"""
video_dir = self.get_video_dir()
if not os.path.exists(video_dir): os.makedirs(video_dir)
return self.do_upload(input_fh)
def hot(self, date):
"""
Hotness score based on reddit's "hot" algorithm
https://medium.com/hacking-and-gonzo/how-reddit-ranking-algorithms-work-ef111e33d0d9#.pphklly6z
Start date is based on April 12, 2016, 2:26 EST
"""
s = self.views
order = log(max(abs(s), 1), 10)
sign = 1 if s > 0 else -1 if s < 0 else 0
seconds = epoch_seconds(date) - 1460427950
return round(sign * order + seconds / 45000, 7)
"""
When deleting a video object, also delete the video files from the server
"""
@receiver(pre_delete, sender=Video)
def delete_file(sender, instance, **kwargs):
# Delete the local file folder
video_dir = instance.get_video_dir()
if os.path.exists(video_dir):
shutil.rmtree(video_dir)
# TODO: Delete the file from S3 if it exists
# TODO: Delete the fingerprints from lilo if there are any FOR THIS VIDEO ID
class PublicEdgeManager(models.Manager):
def get_queryset(self):
return super(PublicEdgeManager, self).get_queryset().filter(video1__is_private=False, video2__is_private=False)
class Edge(BaseModel):
video1 = models.ForeignKey(Video, related_name='video1', db_index=True)
video2 = models.ForeignKey(Video, related_name='video2', db_index=True)
offset = models.FloatField()
confidence = models.IntegerField()
objects = PublicEdgeManager()
@classmethod
def new(cls, video1_id, video2_id, offset, confidence):
edge = Edge(video1_id=video1_id, video2_id=video2_id, offset=offset, confidence=confidence)
edge.save()
return edge
class JamJarMap(models.Model):
video = models.ForeignKey(Video, related_name='jamjars')
start = models.ForeignKey(Video, related_name='startjars')
class Playlist(BaseModel):
user = models.ForeignKey('users.User',related_name='playlists')
name = models.CharField(max_length=100)
is_private = models.BooleanField(default=False)
videos = models.ManyToManyField(Video, related_name='playlists',through='PlaylistOrder')
class PlaylistOrder(models.Model):
number = models.PositiveIntegerField()
playlist = models.ForeignKey(Playlist)
video = models.ForeignKey(Video)
class Meta:
ordering = ('number',)
class VideoVote(BaseModel):
user = models.ForeignKey('users.User',related_name='votes')
video = models.ForeignKey(Video, related_name='votes')
vote = models.NullBooleanField(null=True) # True is upvote, False is downvote, null is blank (redacted vote)
FLAG_TYPES = (
('Q','Quality'),
('I','Inappropriate'),
('A','Accuracy'),
('U','Report User'),
)
class VideoFlag(BaseModel):
user = models.ForeignKey('users.User', related_name='flags_submitted')
video = models.ForeignKey(Video, related_name='flags')
flag_type = models.CharField(max_length=1, choices=FLAG_TYPES)
notes = models.CharField(max_length=500,null=True,blank=True)
class JamPick(BaseModel):
video = models.ForeignKey(Video, related_name='jampick')
|
|
# -*- coding: utf-8 -*-
"""
Communicating with docker via rancher
NOTE: to convert the output json and read it:
https://jsonformatter.curiousconcept.com/
API examples:
https://github.com/rancher/validation-tests/tree/master/tests/v2_validation/cattlevalidationtest/core
"""
import time
from seadata.apis.commons.cluster import CONTAINERS_VARS
from restapi.utilities.logs import log
# PERPAGE_LIMIT = 5
# PERPAGE_LIMIT = 50
PERPAGE_LIMIT = 1000
# Dev note:
# This object initialized in get_or_create_handle() in
# module "b2stage/backend/apis/commons/cluster.py".
# It receives all config that starts with "RESOURCES".
class Rancher(object):
def __init__(
self, key, secret, url, project, hub, hubuser, hubpass, localpath, qclabel
):
####################
# SET URL
self._url = url
self._project = project
# why? explained in http://bit.ly/2BBDJRj
self._project_uri = "{}/projects/{}/schemas".format(url, project)
self._hub_uri = hub
self._hub_credentials = (hubuser, hubpass)
self._localpath = localpath # default /nfs/share
self._qclabel = qclabel
self._hostlabel = 'io.rancher.scheduler.affinity:host_label'
####################
self.connect(key, secret)
# self.project_handle(project)
def connect(self, key, secret):
import gdapi
self._client = gdapi.Client(
url=self._project_uri, access_key=key, secret_key=secret
)
# def project_handle(self, project):
# return self._client.by_id_project(self._project)
def hosts(self):
"""
'state':'active'
'agentIpAddress':'130.186.13.150'
'hostname':'sdc01'
'driver':'openstack',
'openstackConfig':{
'username':'pdonorio'
'info':{
'osInfo':{
'dockerVersion':'Docker version 1.13.1, build 092cba3',
'kernelVersion':'4.4.0',
'operatingSystem':'Ubuntu 16.04 LTS'
'diskInfo':{
'fileSystems':{
'/dev/vda1':{
'capacity':29715
'cpuInfo':{
'count':8,
'memoryInfo':{
'memFree':20287,
'memTotal':24111,
"physicalHostId":"1ph3",
"""
hosts = {}
for data in self._client.list_host():
host = data.get('hostname')
if not data.get('state') == 'active':
log.warning("Host {} not active", host)
continue
hosts[data.get('physicalHostId').replace('p', '')] = {
'name': host,
'ip': data.get('agentIpAddress'),
'provider': data.get('driver'),
}
return hosts
def obj_to_dict(self, obj):
import json
return json.loads(obj.__repr__().replace("'", '"'))
def all_containers_available(self):
"""
Handle paginations properly
https://rancher.com/docs/rancher/v1.5/en/api/v2-beta/
"""
is_all = False
containers = []
while not is_all:
marker = len(containers)
onepage = self._client.list_container(
limit=PERPAGE_LIMIT, marker='m{}'.format(marker)
)
log.verbose('Containers list marker: {}', marker)
pagination = onepage.get('pagination', {})
# print(pagination)
is_all = not pagination.get('partial')
for element in onepage:
containers.append(element)
return containers
def containers(self):
"""
https://github.com/rancher/gdapi-python/blob/master/gdapi.py#L68
'io.rancher.container.system': 'true'
"""
system_label = 'io.rancher.container.system'
containers = {}
for info in self.all_containers_available():
# detect system containers
try:
labels = self.obj_to_dict(info.get('labels', {}))
if labels.get(system_label) is not None:
continue
except BaseException:
pass
# labels = info.get('data', {}).get('fields', {}).get('labels', {})
# info.get('externalId')
name = info.get('name')
cid = info.get('uuid')
if cid is None:
labels = info.get('labels', {})
cid = labels.get('io.rancher.container.uuid', None)
if cid is None:
log.warning("Container {} launching", name)
cid = name
containers[cid] = {
'name': name,
'image': info.get('imageUuid'),
'command': info.get('command'),
'host': info.get('hostId'),
}
return containers
def list(self):
resources = {}
containers = self.containers()
ckey = 'containers'
for host_id, host_data in self.hosts().items():
host_name = host_data.get('name')
if ckey not in host_data:
host_data[ckey] = {}
for container_id, container_data in containers.items():
if container_data.get('host') == host_id:
container_data.pop('host')
host_data['containers'][container_id] = container_data
resources[host_name] = host_data
return resources
def recover_logs(self, container_name):
import websocket as ws
container = self.get_container_object(container_name)
logs = container.logs(follow=False, lines=100)
uri = logs.url + '?token=' + logs.token
sock = ws.create_connection(uri, timeout=15)
out = ''
useless = "/bin/stty: 'standard input': Inappropriate ioctl for device"
while True:
try:
line = sock.recv()
if useless in line:
continue
except ws.WebSocketConnectionClosedException:
break
else:
out += line + '\n'
return out
def catalog_images(self):
""" check if container image is there """
catalog_url = "https://{}/v2/_catalog".format(self._hub_uri)
# print(catalog_url)
try:
import requests
r = requests.get(catalog_url, auth=self._hub_credentials)
catalog = r.json()
# print("TEST", catalog)
except BaseException:
return None
else:
return catalog.get('repositories', {})
def internal_labels(self, pull=True):
"""
Define Rancher docker labels
"""
# to launch containers only on selected host(s)
label_key = 'host_type'
label_value = self._qclabel
obj = {self._hostlabel: "{}={}".format(label_key, label_value)}
if pull:
# force to repull the image every time
pull_label = "io.rancher.container.pull_image"
obj[pull_label] = 'always'
return obj
def run(
self,
container_name,
image_name,
wait_running=None,
private=False,
extras=None,
wait_stopped=None,
pull=True,
):
############
if private:
image_name_no_tags = image_name.split(':')[0]
images_available = self.catalog_images()
error = None
if images_available is None:
error = {'catalog': "Not reachable"}
elif image_name_no_tags not in images_available:
error = {'image': "Not found in our private catalog"}
if error is not None:
return {'error': error}
# Add the prefix for private hub if it's there
image_name = "{}/{}".format(self._hub_uri, image_name)
############
params = {
'name': container_name,
'imageUuid': 'docker:' + image_name,
'labels': self.internal_labels(pull),
# entryPoint=['/bin/sh'],
# command=['sleep', '1234567890'],
}
############
if extras is not None and isinstance(extras, dict):
for key, value in extras.items():
if key not in params:
# NOTE: this may print passwords, watch out!
params[key] = value
############
from gdapi import ApiError
try:
container = self._client.create_container(**params)
except ApiError as e:
log.error("Rancher fail: {}", e.__dict__)
return e.__dict__
else:
# Should we wait for the container?
if wait_stopped is None:
x = CONTAINERS_VARS.get('wait_stopped')
wait_stopped = not (x.lower() == 'false' or int(x) == 0)
if wait_running is None:
x = CONTAINERS_VARS.get('wait_running')
wait_running = not (x.lower() == 'false' or int(x) == 0)
if wait_stopped or wait_running:
log.info(
'Launched container {}" (external id: {})!',
container_name, container.externalId
)
# Wait for container to stop...
while True:
co = self.get_container_object(container_name)
log.debug(
'Container {}": {} ({}, {}: {})',
container_name,
co.state,
co.transitioning,
co.transitioningMessage,
co.transitioningProgress,
)
# Add errors returned by rancher to the errors object:
if isinstance(co.transitioningMessage, str):
if 'error' in co.transitioningMessage.lower():
error = {'container': co.transitioningMessage}
# Simplify life of first-time deployers:
if (
self._hub_uri in co.transitioningMessage
and 'no basic auth credentials' in co.transitioningMessage
):
log.error(
'Message from Rancher: "{}". Possibly you first need to add the registry on the Rancher installation!',
co.transitioningMessage,
)
# Stop loop based on container state:
if co.state == 'error' or co.state == 'erroring':
log.error('Error in container!')
error = {'container': co.transitioningMessage}
log.info('Detailed container info {}', co)
break
elif co.state == 'stopped' and wait_stopped:
# even this does not guarantee success of operation inside container, of course!
log.info('Container has stopped!')
log.info('Detailed container info {}', co)
break
elif co.state == 'running' and wait_running:
log.info('Container is running!')
if not not wait_stopped:
log.info('Detailed container info {}', co)
break
else:
time.sleep(1)
# We will not wait for container to be created/running/stopped:
else:
log.info(
"Launched: {} (external id: {})!",
container_name, container.externalId
)
return None
def get_container_object(self, container_name):
containers = self.all_containers_available()
# containers = self._client.list_container(limit=PERPAGE_LIMIT)
# ####################################
# # should I clean a little bit?
# pagination = containers.get('pagination', {})
# # print(pagination)
# is_all = not pagination.get('partial')
# if not is_all:
# log.warning('More pages...')
####################################
for element in containers:
# NOTE: container name is unique in the whole cluster env
if element.name != container_name:
continue
# This patch does not work since Rancher is not able to
# execute containers with same name, also if deployed
# on different hosts. Also if verified here, the run will fail later"""
# 'host_type=qc'
# labels = element.labels
# host_label = labels.get(self._hostlabel)
# if host_label is not None:
# expected = self.internal_labels(pull=False).get(self._hostlabel)
# if host_label != expected:
# log.warning(
# "Found {} but deployed on {} (instead of {}). Skipping it",
# container_name, host_label, expected
# )
# continue
return element
return None
def remove_container_by_name(self, container_name):
obj = self.get_container_object(container_name)
if obj is not None:
self._client.delete(obj)
return True
else:
log.warning("Did not found container: {}", container_name)
return False
def test(self):
# client.list_host()
# client.list_project()
# client.list_service()
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Blue Box Blocks API
This driver implements all libcloud functionality for the Blue Box Blocks API.
Blue Box home page http://bluebox.net
Blue Box API documentation https://boxpanel.bluebox
.net/public/the_vault/index.php/Blocks_API
"""
import copy
import base64
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState, InvalidCredsError
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
# Current end point for Blue Box API.
BLUEBOX_API_HOST = "boxpanel.bluebox.net"
# The API doesn't currently expose all of the required values for libcloud,
# so we simply list what's available right now, along with all of the various
# attributes that are needed by libcloud.
BLUEBOX_INSTANCE_TYPES = {
'1gb': {
'id': '94fd37a7-2606-47f7-84d5-9000deda52ae',
'name': 'Block 1GB Virtual Server',
'ram': 1024,
'disk': 20,
'cpu': 0.5
},
'2gb': {
'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092',
'name': 'Block 2GB Virtual Server',
'ram': 2048,
'disk': 25,
'cpu': 1
},
'4gb': {
'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58',
'name': 'Block 4GB Virtual Server',
'ram': 4096,
'disk': 50,
'cpu': 2
},
'8gb': {
'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251',
'name': 'Block 8GB Virtual Server',
'ram': 8192,
'disk': 100,
'cpu': 4
}
}
RAM_PER_CPU = 2048
NODE_STATE_MAP = {'queued': NodeState.PENDING,
'building': NodeState.PENDING,
'running': NodeState.RUNNING,
'error': NodeState.TERMINATED,
'unknown': NodeState.UNKNOWN}
class BlueboxResponse(JsonResponse):
def parse_error(self):
if int(self.status) == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
return self.body
class BlueboxNodeSize(NodeSize):
def __init__(self, id, name, cpu, ram, disk, price, driver):
self.id = id
self.name = name
self.cpu = cpu
self.ram = ram
self.disk = disk
self.price = price
self.driver = driver
def __repr__(self):
return ((
'<NodeSize: id=%s, name=%s, cpu=%s, ram=%s, disk=%s, '
'price=%s, driver=%s ...>')
% (self.id, self.name, self.cpu, self.ram, self.disk,
self.price, self.driver.name))
class BlueboxConnection(ConnectionUserAndKey):
"""
Connection class for the Bluebox driver
"""
host = BLUEBOX_API_HOST
secure = True
responseCls = BlueboxResponse
def add_default_headers(self, headers):
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64)
return headers
class BlueboxNodeDriver(NodeDriver):
"""
Bluebox Blocks node driver
"""
connectionCls = BlueboxConnection
type = Provider.BLUEBOX
api_name = 'bluebox'
name = 'Bluebox Blocks'
website = 'http://bluebox.net'
def list_nodes(self):
result = self.connection.request('/api/blocks.json')
return [self._to_node(i) for i in result.object]
def list_sizes(self, location=None):
sizes = []
for key, values in list(BLUEBOX_INSTANCE_TYPES.items()):
attributes = copy.deepcopy(values)
attributes.update({'price': self._get_size_price(size_id=key)})
sizes.append(BlueboxNodeSize(driver=self.connection.driver,
**attributes))
return sizes
def list_images(self, location=None):
result = self.connection.request('/api/block_templates.json')
images = []
for image in result.object:
images.extend([self._to_image(image)])
return images
def create_node(self, **kwargs):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
size = kwargs["size"]
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
try:
auth = kwargs['auth']
except Exception:
raise Exception("SSH public key or password required.")
data = {
'hostname': name,
'product': size.id,
'template': image.id
}
ssh = None
password = None
if isinstance(auth, NodeAuthSSHKey):
ssh = auth.pubkey
data.update(ssh_public_key=ssh)
elif isinstance(auth, NodeAuthPassword):
password = auth.password
data.update(password=password)
if "ex_username" in kwargs:
data.update(username=kwargs["ex_username"])
if not ssh and not password:
raise Exception("SSH public key or password required.")
params = urlencode(data)
result = self.connection.request('/api/blocks.json', headers=headers,
data=params, method='POST')
node = self._to_node(result.object)
return node
def destroy_node(self, node):
url = '/api/blocks/%s.json' % (node.id)
result = self.connection.request(url, method='DELETE')
return result.status == 200
def list_locations(self):
return [NodeLocation(0, "Blue Box Seattle US", 'US', self)]
def reboot_node(self, node):
url = '/api/blocks/%s/reboot.json' % (node.id)
result = self.connection.request(url, method="PUT")
return result.status == 200
def _to_node(self, vm):
state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)]
n = Node(id=vm['id'],
name=vm['hostname'],
state=state,
public_ips=[ip['address'] for ip in vm['ips']],
private_ips=[],
extra={'storage': vm['storage'], 'cpu': vm['cpu']},
driver=self.connection.driver)
return n
def _to_image(self, image):
image = NodeImage(id=image['id'],
name=image['description'],
driver=self.connection.driver)
return image
|
|
"""Logging
"""
import sys
import os
import logging
from pip import backwardcompat
from pip._vendor import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
if sys.platform.startswith("win"):
for level, consumer in consumers:
if hasattr(consumer, "write"):
self.consumers.append(
(level, colorama.AnsiToWin32(consumer)),
)
else:
self.consumers.append((level, consumer))
else:
self.consumers.extend(consumers)
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message) - len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
|
|
#!/usr/bin/env python
# coding=utf-8
"""Dummy functions to replace functions from operations module.
It's a new module because i think that add dry-run cases to original operations
will be ugly and dirty.
Yes, i know, that separate module will be required more time energy for back
compability with original operations
"""
# This file is part of https://github.com/Friz-zy/factory
from __future__ import with_statement
import os
import re
from main import envs
from operations import write_message_to_log, run, command_patching_for_sudo
def run(command, use_sudo=False, user='', group='', freturn=False, err_to_out=False, input=None, use_which=True, sumout='', sumerr='', status=0):
"""Dummy executing command on host via ssh or subprocess.
If use_which is not False, original run command will be executed with 'which' command,
and it returns will be used as new sumout, somerr, status if original is not exists.
Args:
command (str): command for executing
use_sudo (bool): running with sudo prefix if True and current user not root, default is False
user (str): username for sudo -u prefix
group (str): group for sudo -g prefix
freturn (bool): return tuple if True, else return str, default is False
err_to_out (bool): redirect stderr to stdout if True, default is False
input (str or tuple of str): str will be flushed to stdin after executed command, default is None
use_which (bool): tries to strip command line and and run 'which' for each binary, default is True
works only for unix
sumout (str): fake string that contained all stdout messages, default is ''
sumerr (str): fake string that contained all stderr, default is ''
status (int): fake return code of command, default is 0
Return:
str if freturn is False: string that contained all stdout messages
tuple if freturn is True:
string that contained all stdout messages
string that contained all stderr
int that mean return code of command
"""
logger = envs.connect.logger
logger.debug('executing dry-run function')
logger.debug('arguments for executing and another locals: %s', locals())
original_command = command
command = command_patching_for_sudo(command, use_sudo, user, group)
# logging
write_message_to_log(command, 'dry-in: ')
if use_which:
# separate sudo modificator
if original_command != command:
st = command.find(original_command)
command = command[:st] + '|' + command[st:]
ncommand = ''
command = re.split('\\&|\\||\\;', command)
for part in command:
ncommand += '{0} {1}; '.format(
envs.common.which_binary,
re.findall(r"[\w']+", part)[0]
)
# import current run implementation
try:
run = envs.common.functions['run']
except KeyError:
from operations import run
if not (sumout and sumerr and status):
sumout, sumerr, status = run(ncommand, freturn=True, err_to_out=err_to_out, force=True)
else:
run(ncommand, err_to_out=err_to_out, force=True)
if freturn:
logger.debug('return sumout %s, sumerr %s, status %s', sumout, sumerr, status)
return (sumout, sumerr, status)
logger.debug('return sumout %s', sumout)
return sumout
def push(src, dst='~/', pull=False, use_test=True, status=0):
"""Dummy copying file or directory.
If use_test is not False, original run command will be executed with 'test' command,
and it returns will be used as status if original is not exists.
Args:
src (str): local file or directory
dst (str): destination path, default is '~/'
pull (bool): copy file from another host to localhost if True, default is False
use_test (bool): tries to run 'test -e' for each file, default is True
works only for unix
status (int): fake return code of command, default is 0
Return:
int that mean return code of command:
exception? 0 : errno on localhost
status of subprocess with scp
"""
logger = envs.connect.logger
logger.debug('executing push function')
logger.debug('arguments for executing and another locals: %s', locals())
if envs.connect.host in envs.common.localhost:
logger.debug('used shutil.copy*')
for p in (src, dst):
if os.path.exists(p):
logger.debug('os.path.exists(%s) is True', p)
if os.path.isfile(p):
logger.debug('os.path.isfile(%s) is True, used shutil.copy2', p)
write_message_to_log('file \'%s\' is exists' % p, 'dry-out: ')
elif os.path.isdir(p):
logger.debug('os.path.isdir(%s) is True, used shutil.copytree', p)
write_message_to_log('directory \'%s\' is exists' % p, 'dry-out: ')
else:
logger.debug('os.path.exists(%s) is False', p)
write_message_to_log('path \'%s\' is not exists' % p, 'dry-out: ')
if not os.path.exists(src) and not status:
return 2 # errno.ENOENT
return status
else:
logger.debug('used factory.run')
# import current run implementation
try:
run = envs.common.functions['run']
except KeyError:
from operations import run
if pull:
if use_test:
command = '{0} {1}'.format(
envs.common.test_binary,
src
)
if not status:
o, e, status = run(command, freturn=True, force=True)
else:
run(command, force=True)
if os.path.isfile(dst):
logger.debug('os.path.isfile(dst) is True, used shutil.copy2')
write_message_to_log('file \'%s\' is exists' % dst, 'dry-out: ')
elif os.path.isdir(dst):
logger.debug('os.path.isdir(dst) is True, used shutil.copytree')
write_message_to_log('directory \'%s\' is exists' % dst, 'dry-out: ')
else:
logger.debug('os.path.exists(dst) is False')
write_message_to_log('path \'%s\' is not exists' % dst, 'dry-out: ')
return status
else:
if os.path.isfile(src):
logger.debug('os.path.isfile(src) is True, used shutil.copy2')
write_message_to_log('file \'%s\' is exists' % src, 'dry-out: ')
elif os.path.isdir(src):
logger.debug('os.path.isdir(src) is True, used shutil.copytree')
write_message_to_log('directory \'%s\' is exists' % src, 'dry-out: ')
else:
logger.debug('os.path.exists(src) is False')
write_message_to_log('path \'%s\' is not exists' % src, 'dry-out: ')
if use_test:
command = '{0} {1}'.format(
envs.common.test_binary,
dst
)
run(command, force=True)
if not os.path.exists(src) and not status:
return 2 # errno.ENOENT
return status
def run_script(local_file, binary=None, freturn=False, err_to_out=False, input=None, use_which=True, sumout='', sumerr='', status=0):
"""Dummy excecuting script.
If use_which is not False, original run command will be executed with 'which' command,
and it returns will be used as new sumout, somerr, status if original is not exists.
Args:
local_file (str): script on localhost for executing
binary (str): shell for executing, first line of script or 'sh -s'
freturn (bool): return tuple if True, else return str, default is False
err_to_out (bool): redirect stderr to stdout if True, default is False
input (str): str will be flushed to stdin after executed command, default is None
use_which (bool): tries to strip command line and and run 'which' for each binary, default is True
works only for unix
sumout (str): fake string that contained all stdout messages, default is ''
sumerr (str): fake string that contained all stderr, default is ''
status (int): fake return code of command, default is 0
Return:
str if freturn is False: string that contained all stdout messages
tuple if freturn is True:
string that contained all stdout messages
string that contained all stderr
int that mean return code of command
"""
logger = envs.connect.logger
host_string = ''.join((envs.connect.user,
'@',
envs.connect.host))
logger.debug('executing run_script function')
logger.debug('arguments for executing and another locals: %s', locals())
if os.path.isfile(local_file):
logger.debug('os.path.isfile(local_file) is True, used shutil.copy2')
write_message_to_log('file \'%s\' is exists' % local_file, 'dry-out: ')
else:
write_message_to_log('path \'%s\' is not exists' % local_file, 'dry-out: ')
if not status:
status = 2 # errno.ENOENT
if not binary:
logger.debug('trying get binary from script file')
try:
with open(local_file) as f:
l = f.readline()
logger.debug('firs line from script file: %s', l)
if l.startswith('#'):
binary = l.strip()[2:]
logger.debug('binary: %s', binary)
else:
binary = 'sh -s'
logger.debug('used default binary: %s', binary)
except IOError:
binary = 'sh -s'
logger.debug('used default binary: %s', binary)
command = binary + " < " + local_file
# open new connect
logger.debug('run command: %s', command)
return run(command, err_to_out=err_to_out, use_which=use_which, sumout=sumout, sumerr=sumerr, status=status)
|
|
import datetime
from .converter import WaypointStyle
class Writer:
"""
A writer for SeeYou CUP files. Supports waypoints and tasks::
with open('competition.cup', 'wb') as fp:
writer = Writer(fp)
"""
HEADER = u'name,code,country,lat,lon,elev,style,rwdir,rwlen,freq,desc'
DIVIDER = u'-----Related Tasks-----'
DISTANCE_FORMAT_FLOAT = u'%.1f%s'
DISTANCE_FORMAT_INT = u'%d%s'
DISTANCE_FORMAT_OTHER = u'%s%s'
ANGLE_FORMAT_FLOAT = u'%.1f'
ANGLE_FORMAT_INT = u'%d'
ANGLE_FORMAT_OTHER = u'%s'
def __init__(self, fp, encoding='utf-8'):
self.fp = fp
self.encoding = encoding
self.wps = set()
self.in_task_section = False
self.write_line(self.HEADER)
def escape(self, field):
if not field:
return ''
return u'"%s"' % field.replace('\\', '\\\\').replace('"', '\\"')
def format_coordinate(self, value, is_latitude=True):
if is_latitude:
if not -90 <= value <= 90:
raise ValueError(u'Invalid latitude: %s' % value)
hemisphere = u'S' if value < 0 else u'N'
format = u'%02d%06.3f%s'
else:
if not -180 <= value <= 180:
raise ValueError(u'Invalid longitude: %s' % value)
hemisphere = u'W' if value < 0 else u'E'
format = u'%03d%06.3f%s'
value = abs(value)
degrees = int(value)
minutes = (value - degrees) * 60
return format % (degrees, minutes, hemisphere)
def format_latitude(self, value):
return self.format_coordinate(value, is_latitude=True)
def format_longitude(self, value):
return self.format_coordinate(value, is_latitude=False)
def format_angle(self, angle):
if angle is None or angle == '':
return u''
if isinstance(angle, float):
return self.ANGLE_FORMAT_FLOAT % angle
elif isinstance(angle, int):
return self.ANGLE_FORMAT_INT % angle
else:
return self.ANGLE_FORMAT_OTHER % angle
def format_distance(self, distance):
if distance is None or distance == '':
return u''
if isinstance(distance, tuple):
unit = distance[1]
distance = distance[0]
else:
unit = u'm'
if isinstance(distance, float):
return self.DISTANCE_FORMAT_FLOAT % (distance, unit)
elif isinstance(distance, int):
return self.DISTANCE_FORMAT_INT % (distance, unit)
else:
return self.DISTANCE_FORMAT_OTHER % (distance, unit)
def format_time(self, time):
if isinstance(time, datetime.datetime):
time = time.time()
if isinstance(time, datetime.time):
time = time.strftime(u'%H:%M:%S')
return time
def format_timedelta(self, timedelta):
if isinstance(timedelta, datetime.timedelta):
hours, remainder = divmod(timedelta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
timedelta = u'%02d:%02d:%02d' % (hours, minutes, seconds)
return timedelta
def write_line(self, line=u''):
self.fp.write((line + u'\r\n').encode(self.encoding))
def write_fields(self, fields):
self.write_line(u','.join(fields))
def write_waypoint(
self, name, shortname, country, latitude, longitude, elevation=u'',
style=WaypointStyle.NORMAL, runway_direction=u'', runway_length=u'',
frequency=u'', description=u''):
"""
Write a waypoint::
writer.write_waypoint(
'Meiersberg',
'MEIER',
'DE',
(51 + 7.345 / 60.),
(6 + 24.765 / 60.),
)
# -> "Meiersberg","MEIER",DE,5107.345N,00624.765E,,1,,,,
:param name: name of the waypoint (must not be empty)
:param shortname: short name for depending GPS devices
:param country: IANA top level domain country code (see
http://www.iana.org/cctld/cctld-whois.htm)
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param elevation: elevation of the waypoint in meters or as
``(elevation, unit)`` tuple
:param style: the waypoint type (see official specification for the
list of valid styles, defaults to "Normal")
:param runway_direction: heading of the runway in degrees if the
waypoint is landable
:param runway_length: length of the runway in meters or as ``(length,
unit)`` tuple if the waypoint is landable
:param frequency: radio frequency of the airport
:param description: optional description of the waypoint (no length
limit)
"""
if self.in_task_section:
raise RuntimeError(u'Waypoints must be written before any tasks')
if not name:
raise ValueError(u'Waypoint name must not be empty')
fields = [
self.escape(name),
self.escape(shortname),
country,
self.format_latitude(latitude),
self.format_longitude(longitude),
self.format_distance(elevation),
str(style),
str(runway_direction),
self.format_distance(runway_length),
self.escape(frequency),
self.escape(description),
]
self.write_fields(fields)
self.wps.add(name)
def write_task(self, description, waypoints):
"""
Write a task definition::
writer.write_task('500 km FAI', [
'MEIER',
'BRILO',
'AILER',
'MEIER',
])
# -> "500 km FAI","MEIER","BRILO","AILER","MEIER"
Make sure that the referenced waypoints have been written with
:meth:`~aerofiles.seeyou.Writer.write_waypoint` before writing the
task. The task section divider will be written to automatically when
:meth:`~aerofiles.seeyou.Writer.write_task` is called the first time.
After the first task is written
:meth:`~aerofiles.seeyou.Writer.write_waypoint` must not be called
anymore.
:param description: description of the task (may be blank)
:param waypoints: list of waypoints in the task (names must match the
long names of previously written waypoints)
"""
if not self.in_task_section:
self.write_line()
self.write_line(self.DIVIDER)
self.in_task_section = True
fields = [self.escape(description)]
for waypoint in waypoints:
if waypoint not in self.wps:
raise ValueError(u'Waypoint "%s" was not found' % waypoint)
fields.append(self.escape(waypoint))
self.write_fields(fields)
def write_task_options(self, **kw):
"""
Write an options line for a task definition::
writer.write_task_options(
start_time=time(12, 34, 56),
task_time=timedelta(hours=1, minutes=45, seconds=12),
waypoint_distance=False,
distance_tolerance=(0.7, 'km'),
altitude_tolerance=300.0,
)
# -> Options,NoStart=12:34:56,TaskTime=01:45:12,WpDis=False,NearDis=0.7km,NearAlt=300.0m
:param start_time: opening time of the start line as
:class:`datetime.time` or string
:param task_time: designated time for the task as
:class:`datetime.timedelta` or string
:param waypoint_distance: task distance calculation (``False``: use
fixes, ``True``: use waypoints)
:param distance_tolerance: distance tolerance in meters or as
``(distance, unit)`` tuple
:param altitude_tolerance: altitude tolerance in meters or as
``(distance, unit)`` tuple
:param min_distance: "uncompleted leg (``False``: calculate maximum
distance from last observation zone)"
:param random_order: if ``True``, then Random order of waypoints is
checked
:param max_points: maximum number of points
:param before_points: number of mandatory waypoints at the beginning.
``1`` means start line only, ``2`` means start line plus first
point in task sequence (Task line).
:param after_points: number of mandatory waypoints at the end. ``1``
means finish line only, ``2`` means finish line and one point
before finish in task sequence (Task line).
:param bonus: bonus for crossing the finish line
"""
if not self.in_task_section:
raise RuntimeError(
u'Task options have to be written in task section')
fields = ['Options']
if 'start_time' in kw:
fields.append(u'NoStart=' + self.format_time(kw['start_time']))
if 'task_time' in kw:
fields.append(u'TaskTime=' + self.format_timedelta(kw['task_time']))
if 'waypoint_distance' in kw:
fields.append(u'WpDis=%s' % kw['waypoint_distance'])
if 'distance_tolerance' in kw:
fields.append(
u'NearDis=' + self.format_distance(kw['distance_tolerance']))
if 'altitude_tolerance' in kw:
fields.append(
u'NearAlt=' + self.format_distance(kw['altitude_tolerance']))
if 'min_distance' in kw:
fields.append(u'MinDis=%s' % kw['min_distance'])
if 'random_order' in kw:
fields.append(u'RandomOrder=%s' % kw['random_order'])
if 'max_points' in kw:
fields.append(u'MaxPts=%d' % kw['max_points'])
if 'before_points' in kw:
fields.append(u'BeforePts=%d' % kw['before_points'])
if 'after_points' in kw:
fields.append(u'AfterPts=%d' % kw['after_points'])
if 'bonus' in kw:
fields.append(u'Bonus=%d' % kw['bonus'])
self.write_fields(fields)
def write_observation_zone(self, num, **kw):
"""
Write observation zone information for a taskpoint::
writer.write_task_options(
start_time=time(12, 34, 56),
task_time=timedelta(hours=1, minutes=45, seconds=12),
waypoint_distance=False,
distance_tolerance=(0.7, 'km'),
altitude_tolerance=300.0,
)
# -> Options,NoStart=12:34:56,TaskTime=01:45:12,WpDis=False,NearDis=0.7km,NearAlt=300.0m
:param num: consecutive number of a waypoint (``0``: Start)
:param style: direction (``0``: Fixed value, ``1``: Symmetrical, ``2``:
To next point, ``3``: To previous point, ``4``: To start point
:param radius: radius 1 in meter or as ``(radius, unit)`` tuple
:param angle: angle 1 in degrees
:param radius2: radius 2 in meter or as ``(radius, unit)`` tuple
:param angle 2: angle 2 in degrees
:param angle12: angle 12 in degress
:param line: should be ``True`` if start or finish line
"""
if not self.in_task_section:
raise RuntimeError(
u'Observation zones have to be written in task section')
fields = [u'ObsZone=%d' % num]
if 'style' in kw:
fields.append(u'Style=%d' % kw['style'])
if 'radius' in kw:
fields.append(u'R1=' + self.format_distance(kw['radius']))
if 'angle' in kw:
fields.append(u'A1=' + self.format_angle(kw['angle']))
if 'radius2' in kw:
fields.append(u'R2=' + self.format_distance(kw['radius2']))
if 'angle2' in kw:
fields.append(u'A2=' + self.format_angle(kw['angle2']))
if 'angle12' in kw:
fields.append(u'A12=' + self.format_angle(kw['angle12']))
if 'line' in kw:
fields.append(u'Line=' + ('1' if kw['line'] else '0'))
self.write_fields(fields)
|
|
import rrdtool
import os
import shutil
import logging
from database.model import Session
from database.definition_model import RoutingDefiniton
logger = logging.getLogger('fm.rrd')
class RRD():
def __init__(self, device_id, service_name, service_number=1):
self.folder = device_id + '/'
self.folder_path = None
self.file_name = service_name + '-' + str(service_number) + '.rrd'
self.file_path = None
self._build_paths()
return
def _build_paths(self):
session = Session()
rrd_location = session.query(RoutingDefiniton.route)\
.filter_by(name="rrdtool_location").scalar()
session.close()
self.folder_path = rrd_location + self.folder
self.file_path = self.folder_path + self.file_name
if not os.path.exists(self.folder_path):
os.makedirs(self.folder_path)
def _get_DS(self):
data = rrdtool.info(self.file_path)
data_sources = set()
for x in data.keys():
if x.startswith('ds['):
index = x.find(']')
if index > 3:
data_sources.add(x[3:index])
return list(data_sources)
def _fetch(self, RRA_name='LAST', start_time='end-1day', end='now'):
path = self.file_path
data = rrdtool.fetch(str(path), RRA_name, '-s', start_time, '-e', end)
return data
def create(self, sources):
"""
creates an rrd file
sources is a list of data sources to add.
Every item in the list must have a unique 'name' key
that identifies the item
"""
# raw data for the last 31 day
# average daily data for 1 year
# min daily data for 1 year
# max daily data for 1 year
data_archives = ["RRA:LAST:0.5:1:744",
"RRA:AVERAGE:0.5:24:365",
"RRA:MIN:0.5:24:365",
"RRA:MAX:0.5:24:365"]
data_source = []
for x in sources:
if 'name' in x:
data_source.append("DS:" + x['name'] + ":GAUGE:7200:-40:80")
path = self.file_path
logger.debug("Creating RRD file {0}".format(path))
rrdtool.create(path, "--step", '3600', data_source, data_archives)
return
def remove(self, remove_folder=False):
if remove_folder:
try:
logger.debug("Removing RRD folder {0}".format(self.folder_path))
shutil.rmtree(self.folder_path)
except OSError:
pass
else:
try:
logger.debug("Removing RRD file {0}".format(self.file_path))
os.remove(self.file_path)
except OSError:
pass
return
def update(self, values, timestamp='N'):
"""
update an rrd file
values = dictionary of all data sources in the rrd file.
The format is key = name of the data source and
value = value. Unknown values can be given as 'U'
All data sources in the RRD that are not present will be 'U'
"""
data_sources = self._get_DS()
template = ""
update = str(timestamp) + ":"
for k in values:
if k in data_sources:
template += "{}:".format(k)
update += "{}:".format(values[k])
update = update[:-1]
template = template[:-1]
path = self.file_path
logger.debug("Updating RRD file {0}".format(path))
rrdtool.update(path, "--template", template, update)
return
def retrieve_lastupdate(self):
logger.debug("Retrieving last update for RRD file {0}".format(self.file_path))
last_values = rrdtool.lastupdate(self.file_path)
return last_values
def retrieve(self, source_names, time_range='1D'):
"""
gets data from an rrd file
***inputs***
source_names = the names of the sources to get data for.
Format is a list of dictionaries with at least one key
being 'name' with the name of the source in the rrd file as the
value. All sources given in 'source_names' must be a data
source in the rrd file, but they do not need to contain all of
the data sources. A key called 'index' is added to each
dictionary with the value being the the order of the data
sources in the returned list
time_range = what range to fetch from the rrd file
current acceptable values are:
'1D' default
'7D'
'1M'
'3M'
'6M'
'1Y'
***outputs***
data = a list of dictionaries with a 'timestamp' key that
is the millisecond timestamp of the data point, and another
key for every source given in 'source_names'
OR
data = False if one of the sensors given in 'sensor_names'
is not a data source of the rrd file.
"""
logger.debug("Retrieving update for {0}".format(self.file_path))
data = []
RRA_name = 'LAST'
start_time = 'end-1day'
if time_range == '1D':
RRA_name = 'LAST'
start_time = 'end-1day'
elif time_range == '7D':
RRA_name = 'LAST'
start_time = 'end-7day'
elif time_range == '1M':
RRA_name = 'AVERAGE'
start_time = 'end-1month'
elif time_range == '3M':
RRA_name = 'AVERAGE'
start_time = 'end-3month'
elif time_range == '6M':
RRA_name = 'AVERAGE'
start_time = 'end-6month'
elif time_range == '1Y':
RRA_name = 'AVERAGE'
start_time = 'end-1year'
# fetch and split raw data
raw_data = self._fetch(RRA_name=RRA_name, start_time=start_time)
time = raw_data[0]
time_start = time[0]
time_end = time[1]
time_step = time[2]
sources = raw_data[1]
raw_data = raw_data[2]
# parse input source_names
# get index of source_names values in sources
# if a source is not in the sources of rrd_file, exit
for x in source_names:
try:
x['index'] = sources.index(x['name'])
except ValueError:
logger.error('{0} is not in {1}'.format(x['name'], self.file_path))
return False
step = 0
# calculate time_current by adding time_start with time_step
# to get the end of the first time window
time_current = time_start + time_step
while time_current < time_end:
point = {}
point['timestamp'] = time_current * 1000
# print(time_current)
for x in source_names:
if(raw_data[step][x['index']]):
point[x['name']] = round(raw_data[step][x['index']], 1)
data.append(point)
time_current += time_step
step += 1
return data
|
|
from __future__ import unicode_literals
from datetime import date
import unittest
from django.test import TestCase
from .models import Author
from django.db import models
from django.db import connection
class Div3Lookup(models.Lookup):
lookup_name = 'div3'
def as_sql(self, qn, connection):
lhs, params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
return '%s %%%% 3 = %s' % (lhs, rhs), params
def as_oracle(self, qn, connection):
lhs, params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
return 'mod(%s, 3) = %s' % (lhs, rhs), params
class Div3Transform(models.Transform):
lookup_name = 'div3'
def as_sql(self, qn, connection):
lhs, lhs_params = qn.compile(self.lhs)
return '%s %%%% 3' % (lhs,), lhs_params
def as_oracle(self, qn, connection):
lhs, lhs_params = qn.compile(self.lhs)
return 'mod(%s, 3)' % lhs, lhs_params
class YearTransform(models.Transform):
lookup_name = 'year'
def as_sql(self, qn, connection):
lhs_sql, params = qn.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_type(self):
return models.IntegerField()
class YearExact(models.lookups.Lookup):
lookup_name = 'exact'
def as_sql(self, qn, connection):
# We will need to skip the extract part, and instead go
# directly with the originating field, that is self.lhs.lhs
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
# Note that we must be careful so that we have params in the
# same order as we have the parts in the SQL.
params = lhs_params + rhs_params + lhs_params + rhs_params
# We use PostgreSQL specific SQL here. Note that we must do the
# conversions in SQL instead of in Python to support F() references.
return ("%(lhs)s >= (%(rhs)s || '-01-01')::date "
"AND %(lhs)s <= (%(rhs)s || '-12-31')::date" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
YearTransform.register_lookup(YearExact)
class YearLte(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, qn, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(qn, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params.extend(rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
YearTransform.register_lookup(YearLte)
# We will register this class temporarily in the test method.
class InMonth(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = 'inmonth'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" %
(lhs, rhs, lhs, rhs), params)
class LookupTests(TestCase):
def test_basic_lookup(self):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
models.IntegerField.register_lookup(Div3Lookup)
try:
self.assertQuerysetEqual(
Author.objects.filter(age__div3=0),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=1).order_by('age'),
[a1, a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=2),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=3),
[], lambda x: x
)
finally:
models.IntegerField._unregister_lookup(Div3Lookup)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_birthdate_month(self):
a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
models.DateField.register_lookup(InMonth)
try:
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)),
[a1], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)),
[a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)),
[], lambda x: x
)
finally:
models.DateField._unregister_lookup(InMonth)
def test_div3_extract(self):
models.IntegerField.register_lookup(Div3Transform)
try:
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__div3=2),
[a2], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__lte=3),
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[0, 2]),
[a2, a3], lambda x: x)
finally:
models.IntegerField._unregister_lookup(Div3Transform)
class YearLteTests(TestCase):
def setUp(self):
models.DateField.register_lookup(YearTransform)
self.a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
self.a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
self.a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
self.a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
def tearDown(self):
models.DateField._unregister_lookup(YearTransform)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte(self):
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=2012),
[self.a1, self.a2, self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__year=2012),
[self.a2, self.a3, self.a4], lambda x: x)
self.assertNotIn('BETWEEN', str(baseqs.filter(birthdate__year=2012).query))
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=2011),
[self.a1], lambda x: x)
# The non-optimized version works, too.
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lt=2012),
[self.a1], lambda x: x)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte_fexpr(self):
self.a2.age = 2011
self.a2.save()
self.a3.age = 2012
self.a3.save()
self.a4.age = 2013
self.a4.save()
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lte=models.F('age')),
[self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__year__lt=models.F('age')),
[self.a4], lambda x: x)
def test_year_lte_sql(self):
# This test will just check the generated SQL for __lte. This
# doesn't require running on PostgreSQL and spots the most likely
# error - not running YearLte SQL at all.
baseqs = Author.objects.order_by('name')
self.assertIn(
'<= (2011 || ', str(baseqs.filter(birthdate__year__lte=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__year__lte=2011).query))
def test_postgres_year_exact(self):
baseqs = Author.objects.order_by('name')
self.assertIn(
'= (2011 || ', str(baseqs.filter(birthdate__year=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__year=2011).query))
def test_custom_implementation_year_exact(self):
try:
# Two ways to add a customized implementation for different backends:
# First is MonkeyPatch of the class.
def as_custom_sql(self, qn, connection):
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(YearExact, 'as_' + connection.vendor, as_custom_sql)
self.assertIn(
'concat(',
str(Author.objects.filter(birthdate__year=2012).query))
finally:
delattr(YearExact, 'as_' + connection.vendor)
try:
# The other way is to subclass the original lookup and register the subclassed
# lookup instead of the original.
class CustomYearExact(YearExact):
# This method should be named "as_mysql" for MySQL, "as_postgresql" for postgres
# and so on, but as we don't know which DB we are running on, we need to use
# setattr.
def as_custom_sql(self, qn, connection):
lhs_sql, lhs_params = self.process_lhs(qn, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(CustomYearExact, 'as_' + connection.vendor, CustomYearExact.as_custom_sql)
YearTransform.register_lookup(CustomYearExact)
self.assertIn(
'CONCAT(',
str(Author.objects.filter(birthdate__year=2012).query))
finally:
YearTransform._unregister_lookup(CustomYearExact)
YearTransform.register_lookup(YearExact)
|
|
import tornado
import cProfile
import pstats
import time
from tornado import gen
import logging
from mutornadomon import net
from tornado.ioloop import IOLoop
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
logger = logging.getLogger('mutornadomon_profiler')
def LOCALHOST(request):
if not net.is_local_address(request.remote_ip):
return False
xff = request.headers.get('X-Forwarded-For', None)
if not xff or net.is_local_address(xff):
return True
return False
class HTTPEndpointMuTornadoMonProfiler(object):
"""Handles external HTTP requests for Profiler"""
def __init__(self, request_filter):
if request_filter is None:
self.request_filter = LOCALHOST
else:
self.request_filter = request_filter
def start(self, monitor, server_port):
self.server_port = server_port
self.profiler_app = tornado.web.Application([
(r'/profiler', TornadoStatsHandler, {
'monitor': monitor,
'request_filter': self.request_filter
}),
])
# If listening is started directly then IOLoop started by service will
# cause issue resulting in high CPU usage. So start listening after
# IOLoop is started by the service
io_loop = IOLoop.current(instance=False)
if io_loop is None:
logger.error('Cannot initialize Mutornadomon without IOLoop')
else:
io_loop.add_callback(self.start_listen)
def start_listen(self):
self.profiler_app.listen(self.server_port)
logger.info('MuTornadoMon Profiler Listening on port %s',
self.server_port)
def stop(self):
pass
class TornadoStatsHandler(tornado.web.RequestHandler):
"""
Profile Tornado IOLoop.
Profiler will be started when the url end point is hit & stopped after
profiletime or default profile collection time expires.
waittime starts the profiling periodically, profiling is done for the
duration of profiletime after waiting for a period of waittime.
Params for the url are
:param sortby: specifies how the profiling data will be sorted
(ex: tottime or cumtime)
:param profiletime: specifies how long profiling will be done (msec)
:param waittime: specifies how long to wait when profiling periodically
ex: curl "localhost:5951/profiler?sortby=cumtime&&profiletime=4000"
ex: curl "localhost:5951/profiler?profiletime=200&&waittime=10000"
"""
def initialize(self, monitor, request_filter):
self.monitor = monitor
self.request_filter = request_filter
self.monitor.stop_profiler = False
def prepare(self):
if not self.request_filter(self.request):
self.send_error(403)
def print_profile_data(self, sortby, wait_time):
ps = None
# Stats fails if there is no profile data collected
try:
strm = StringIO()
ps = pstats.Stats(self.monitor.profiler, stream=strm)
except (TypeError, ValueError):
self.write("No profiling data collected")
return
if ps is not None:
ps.sort_stats(sortby)
ps.print_stats()
if wait_time == 0.0:
self.write(strm.getvalue())
else:
logger.info(time.time())
logger.info(strm.getvalue())
self.monitor.profiler.clear()
def set_options(self):
valid_sortby = ['calls', 'cumulative', 'cumtime', 'file', 'filename',
'module', 'ncalls', 'pcalls', 'line', 'name', 'nfl',
'stdname', 'time', 'tottime']
sortby = 'time'
profile_time = 2.0
wait_time = 0.0
# Dictates how the profile data is sorted
if 'sortby' in self.request.arguments:
sortby = self.request.arguments['sortby'][0]
if sortby not in valid_sortby:
sortby = 'time'
# profiletime(msec) indicates for how long each of the profiling is
# done
if 'profiletime' in self.request.arguments:
profile_time = float(self.request.arguments['profiletime'][0])/1000
# waittime(msec) indicates how long to wait between profiling
if 'waittime' in self.request.arguments:
wait_time = float(self.request.arguments['waittime'][0])/1000
self.write("Profiling will be done for every " +
str(wait_time * 1000) + " msec\n")
return sortby, profile_time, wait_time
def disable_profiler(self):
self.monitor.profiler_init = False
self.monitor.profiler_running = False
self.monitor.profiler.disable()
@gen.coroutine
def get(self):
# Dictates whether to stop any on going profiling
if 'stopprofiler' in self.request.arguments:
self.monitor.profiler_init = False
self.monitor.stop_profiler = True
self.write("Stopped Profiling")
return
sortby, profile_time, wait_time = self.set_options()
# If profiling is not started, start it
if self.monitor.profiler_init is False:
self.write("Profiling done for " + str(profile_time * 1000) +
" msec\n")
if self.monitor.profiler is None:
self.monitor.profiler = cProfile.Profile()
else:
self.monitor.profiler.clear()
while True:
# enable proflier for profile_time
self.monitor.profiler_init = True
yield gen.Task(self.monitor.io_loop.add_timeout,
time.time() + profile_time)
# disable profiling
self.disable_profiler()
self.print_profile_data(sortby, wait_time)
# Stop profiling for the duration of the wait_time
yield gen.Task(self.monitor.io_loop.add_timeout,
time.time() + wait_time)
# If wait_time is specified then continue profiling
# All the profiling data will be logged using the logger
if ((wait_time == 0) or (self.monitor.stop_profiler is True)):
break
class StatusHandler(tornado.web.RequestHandler):
def initialize(self, monitor, request_filter):
self.monitor = monitor
self.request_filter = request_filter
def prepare(self):
if not self.request_filter(self.request):
self.send_error(403)
def get(self):
self.write(self.monitor.metrics)
class HTTPEndpointExternalInterface(object):
"""External interface that exposes HTTP endpoints for polling by an
external process.
"""
def __init__(self, app, host_limit=None, request_filter=None):
self.app = app
if request_filter is None:
self.request_filter = LOCALHOST
else:
self.request_filter = request_filter
if host_limit is None:
self._host_limit = r'.*'
else:
self._host_limit = host_limit
def start(self, monitor):
self.app.add_handlers(self._host_limit, [
(r'/api/status', StatusHandler, {
'monitor': monitor,
'request_filter': self.request_filter
})
])
def stop(self):
pass
|
|
# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from distutils.command.upload import upload as UploadCommand
from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py as _build_py
from setuptools.command.test import test as TestCommand
from distutils.spawn import find_executable
from glob import glob
import os.path
import imp
import subprocess
import sys
THRIFT = find_executable('thrift1')
if THRIFT is None:
THRIFT = find_executable('thrift')
NAME = 'sparts'
ROOT = os.path.abspath(os.path.dirname(__file__))
def read(fname):
"""Read a file relative to the repository root"""
return open(os.path.join(ROOT, fname)).read()
def exists(fname):
"""Returns True if `fname` relative to `ROOT` exists"""
return os.path.exists(os.path.join(ROOT, fname))
def version():
"""Return the version number from sparts/__version__.py"""
file, pathname, description = imp.find_module(NAME, [ROOT])
return imp.load_module(NAME, file, pathname, description).__version__
# Initialize custom command handlers
cmdclass = {}
# These files are shadowed in the source repository from
# externals. If you are developing sparts, you can use git submodule to make
# sure you have the latest/greatest fb303 from thrift.
WANT_COPY = {
'externals/thrift/contrib/fb303/if/fb303.thrift':
'thrift/fb303.thrift',
}
# Let's figure out which files exist in which submodules...
CAN_COPY = []
for src in WANT_COPY:
if exists(src):
CAN_COPY.append(src)
class submodule_copy(Command):
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for src in CAN_COPY:
self.copy_file(os.path.join(ROOT, src),
os.path.join(ROOT, WANT_COPY[src]))
if CAN_COPY:
cmdclass['submodule_copy'] = submodule_copy
# If we have a thrift compiler installed, let's use it to re-generate
# the .py files. If not, we'll use the pre-generated ones.
if THRIFT is not None:
class gen_thrift(Command):
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.mkpath(os.path.join(ROOT, 'sparts', 'gen'))
for f in glob(os.path.join(ROOT, 'thrift', '*.thrift')):
self.spawn([THRIFT, '-out', os.path.join(ROOT, 'sparts', 'gen'),
'-v', '--gen', 'py:new_style',
os.path.join(ROOT, 'thrift', f)])
cmdclass['gen_thrift'] = gen_thrift
# Custom build_py handler. Triggers submodule_copy and gen_thrift
# if the environment is right.
class build_py(_build_py):
def run(self):
if CAN_COPY:
self.run_command('submodule_copy')
if THRIFT is not None:
self.run_command('gen_thrift')
_build_py.run(self)
cmdclass['build_py'] = build_py
# Custom PyTest Test command, per https://pytest.org/latest/goodpractises.html
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '-rfEsx']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
cmdclass['test'] = PyTest
class NoDirtyUpload(UploadCommand):
def run(self):
result = subprocess.check_output("git status -z", shell=True)
for fstat in result.split(b"\x00"):
# Skip empty line (potentially at the end of the output)
if not len(fstat):
continue
stat = fstat[0:2]
fn = fstat[3:]
# New files are ok for now.
if stat == b'??':
continue
raise AssertionError("Unexpected git status (%s) for %s" %
(stat, fn))
UploadCommand.run(self)
cmdclass['upload'] = NoDirtyUpload
install_requires = [
'six>=1.5', # 1.5 required for bugfix in six.moves.queue import
'daemonize',
]
if sys.version < '2.7':
install_requires.append('ordereddict')
if sys.version < '3.2':
install_requires.append('futures')
tests_require = install_requires + [
'pytest',
'tornado>=1.2',
]
if sys.version < '2.7':
tests_require.append('unittest2')
if sys.version < '3.3':
# mock added to 3.3 as unittest.mock
tests_require.append('mock')
if sys.version < '3.0':
twisted_version = 'Twisted'
# twisted > 15.1 stop supporting python 2.6
if sys.version < '2.7':
twisted_version += '<15.5.0'
tests_require.append(twisted_version)
tests_require.append('thrift')
else:
# Py3k requires Twisted >= 14.0
twisted_version = 'Twisted>=14.0.0'
if sys.version < '3.3':
# Twisted-15.2 breaks support for python3.2 in a lot of ways
twisted_version += ', <15.2'
tests_require.append(twisted_version)
# TODO: for py3k use fbthrift instead of thrift?
VERSION = version()
setup(
name=NAME,
version=VERSION,
packages=find_packages(exclude=['tests', 'tests.*']),
description="Build services in python with as little code as possible",
long_description=read("README.rst"),
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'thrift': ['thrift'],
'tornado': ['tornado'],
'twisted': ['Twisted'],
},
author='Peter Ruibal',
author_email='[email protected]',
license='BSD+',
keywords='service boostrap daemon thrift tornado',
url='http://github.com/facebook/sparts',
download_url='https://github.com/facebook/sparts/archive/%s.tar.gz' % VERSION,
test_suite="tests",
cmdclass=cmdclass,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
|
|
# Copyright 2011 Gilt Groupe, INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mothership.validate
a library of validation methods
for various types of data
"""
# imports
import base64
import struct
import types
import re
# All of the models and sqlalchemy are brought in
# to simplify referencing
from mothership.mothership_models import *
class ValidationError(Exception):
pass
# Validates realm input data
def v_realm(cfg, realm):
"""
[description]
validates realm input data
[parameter info]
required:
cfg: the config object. useful everywhere
realm: the realm we're trying to validate
[return value]
True/False based on success of validation
"""
realm_set = set(cfg.realms)
if realm in realm_set:
return True
else:
print 'valid realms are:'
print ' '.join(cfg.realms)
return False
# Validates site_id input data
def v_site_id(cfg, site_id):
"""
[description]
validates site_id input data
[parameter info]
required:
cfg: the config object. useful everywhere
site_id: the site_id we're trying to validate
[return value]
True/False based on success of validation
"""
site_id_set = set(cfg.site_ids)
if site_id in site_id_set:
return True
else:
print 'valid site_ids are:'
print ' '.join(cfg.site_ids)
return False
# Validates domain input data
def v_domain(cfg, domain):
"""
[description]
validates domain input data (probably not necessary)
[parameter info]
required:
cfg: the config object. useful everywhere
domain: the domain we're trying to validate
[return value]
True/False based on success of validation
"""
if domain == cfg.domain:
return True
else:
return False
# Validates ssh2 pubkeys
def v_ssh2_pubkey(cfg, key):
"""
[description]
validates ssh2 public keys
[parameter info]
required:
cfg: the config object. useful everywhere
key: the ssh2 public key we're trying to validate
[return value]
True/False based on success of validation
"""
DSA_KEY_ID="ssh-dss"
RSA_KEY_ID="ssh-rsa"
if re.match(DSA_KEY_ID+'|'+RSA_KEY_ID, key):
k = key.split(' ')
else:
return False
if k:
try:
data = base64.decodestring(k[1])
except IndexError:
return False
int_len = 4
str_len = struct.unpack('>I', data[:int_len])[0] # this should return 7
if DSA_KEY_ID in key:
if data[int_len:int_len+str_len] == DSA_KEY_ID:
return True
else:
return False
else:
if data[int_len:int_len+str_len] == RSA_KEY_ID:
return True
else:
return False
else:
return False
# Validates UNIX uids
def v_uid(cfg, uid):
"""
[description]
validates UNIX UIDs
[parameter info]
required:
cfg: the config object. useful everywhere
uid: the UID we're trying to validate
[return value]
True/False based on success of validation
"""
if type(uid) == types.IntType:
if uid >= cfg.uid_start and uid <= cfg.uid_end:
return True
else:
print "UID is outside the allowed range (%s to %s)" % (cfg.uid_start, cfg.uid_end)
return False
elif uid == None:
return False
else:
print "UID must be an integer!"
return False
# Looks for a UID in the db and returns true if present, false if absent
def v_uid_in_db(cfg, uid, realm, site_id):
"""
[description]
looks for a UID in the db
[parameter info]
required:
cfg: the config object. useful everywhere
uid: the UID we're trying to find
realm: the realm we're trying to find it in
site_id: the site_id we're trying to find it in
[return value]
True/False based on success of validation
"""
uidlist = []
u = cfg.dbsess.query(Users).\
filter(Users.realm==realm).\
filter(Users.site_id==site_id).all()
for userentry in u:
uidlist.append(userentry.uid)
uid_set = set(uidlist)
if uid in uid_set:
return True
else:
return False
# Looks for a GID in the db and returns true if present, false if absent
def v_gid_in_db(cfg, gid, realm, site_id):
"""
[description]
looks for a GID in the db
[parameter info]
required:
cfg: the config object. useful everywhere
gid: the GID we're looking for
realm: the realm we're trying to find it in
site_id: the site_id we're trying to find it in
[return value]
returns an integer representing the GID if the GID is in the db
returns False if the GID is not in the db
"""
gidlist = []
g = cfg.dbsess.query(Groups).\
filter(Groups.realm==realm).\
filter(Groups.site_id==site_id).all()
for groupentry in g:
gidlist.append(groupentry.gid)
gid_set = set(gidlist)
if gid in gid_set:
return True
else:
return False
# Validates UNIX gids
def v_gid(cfg, gid):
"""
[description]
validates UNIX GIDs
[parameter info]
required:
cfg: the config object. useful everywhere
gid: the GID we're trying to validate
[return value]
True/False based on success of validation
"""
if type(gid) == types.IntType:
if gid >= cfg.gid_start and gid <= cfg.gid_end:
return True
else:
print "GID is outside the allowed range (%s to %s)" % (cfg.gid_start, cfg.gid_end)
return False
elif gid == None:
return False
else:
print "GID must be an integer!"
return False
# get an unqualified or fully-qualified name (host or user)
# for either servers or users
def v_get_fqn(cfg, name):
"""
[description]
get an unqualified or fully-qualified name (host or user) for either servers or users. depending on what we're supplied with it will either return a fqn or present the user with a menu to pick the unqn and then return a fqn
[parameter info]
required:
cfg: the config object. useful everywhere
name: the user/group name we're fetching the fqn for OR part of an unqualified name
[return value]
returns a fully-qualified user/group name if we're supplied with a name
returns an un-qualified name if we're not supplied with a name
"""
count = 1
select = {}
sub = name.split('.')
# if we got a fully-qualified name/hostname
if len(sub) == 5:
n = sub[0]
r = sub[1]
s = sub[2]
d = sub[3]+'.'+sub[4]
# check to see if the domain is valid
if not v_domain(cfg, d):
raise ValidationError("invalid domain \"%s\", aborting" % d)
# check to see if the site_id is valid
if not v_site_id(cfg, s):
raise ValidationError("invalid site_id \"%s\", aborting" % s)
# check to see if the realm is valid
if not v_realm(cfg, r):
raise ValidationError("invalid realm \"%s\", aborting" % r)
# if everything is valid, fire back name.realm.site_id.domain
return n+'.'+r+'.'+s+'.'+d
# if we got everything but the name
elif len(sub) == 4:
r = sub[0]
s = sub[1]
d = sub[2]+'.'+sub[3]
# check to see if the domain is valid
if not v_domain(cfg, d):
raise ValidationError("invalid domain \"%s\", aborting" % d)
# check to see if the site_id is valid
if not v_site_id(cfg, s):
raise ValidationError("invalid site_id \"%s\", aborting" % s)
# check to see if the realm is valid
if not v_realm(cfg, r):
raise ValidationError("invalid realm \"%s\", aborting" % r)
# if everything is valid, fire back realm.site_id.domain
return r+'.'+s+'.'+d
# 3 items could be either site_id.domain.tld or name.realm.site_id
# let's figure out which it is...
elif len(sub) == 3:
s = sub[0]
d = sub[1]+'.'+sub[2]
n = sub[0]
r = sub[1]
sid = sub[2]
# validate the domain
if not v_domain(cfg, d):
# if the domain is invalid, maybe it's a realm.site_id
if not v_realm(cfg, r) and not v_site_id(cfg, sid):
raise ValidationError("invalid domain \"%s\" or realm.site_id \"%s.%s\", aborting" % (d, r, s))
# check both again to make sure both are valid
elif v_realm(cfg, r) and v_site_id(cfg, sid):
# we only have one domain configured, tack it on and
# fire back name.realm.site_id.domain
return n+'.'+r+'.'+sid+'.'+cfg.domain
# both domain and one of either realm or site_id is bad
else:
raise ValidationError("site_id \"%s\" or realm \"%s\" info is bad, aborting" % (s, r))
# if we got site_id.domain.tld, and the domain checks out
# validate the site_id
elif not v_site_id(cfg, s):
raise ValidationError("invalid site_id \"%s\", aborting" % s)
# if the site_id and domain check out, present the user with a
# menu to pick the realm
else:
menu = ''
for realm in cfg.realms:
menu += str(count)+') '+realm+'.'+s+'.'+d+'\n'
select[count] = realm+'.'+s+'.'+d
count += 1
menu += "Please select the one you would like to use: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError("selection aborted")
else:
# return the fqn without the name
return select[int(ans)]
# if we got two items, it could be either domain.tld or
# realm.site_id, let's find out which...
elif len(sub) == 2:
d = sub[0]+'.'+sub[1]
r = sub[0]
s = sub[1]
# validate the domain
if not v_domain(cfg, d):
# if it's not a domain, validate the realm and site_id
if not v_realm(cfg, r) and not v_site_id(cfg, s):
raise ValidationError("entry was not a realm.site_id or domain.tld, aborting")
# we only have one domain configured, tack it on
else:
return r+'.'+s+'.'+cfg.domain
# if we got a valid domain, present the user with a menu
# to pick the realm and site_id
else:
menu = "\nMultiple options found for %s:\n-----------------------\n" % name
for realm in cfg.realms:
for site_id in cfg.site_ids:
menu += str(count)+') '+realm+'.'+site_id+'.'+d+'\n'
select[count] = realm+'.'+site_id+'.'+d
count += 1
menu += "Please select the one you would like to use: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError("selection aborted")
else:
return select[int(ans)]
# if we only got one item, it's gotta be a name/hostname.
# present the user with a menu to pick everything
elif len(sub) == 1:
menu = "\nMultiple options found for \"%s\":\n-----------------------\n" % name
for realm in cfg.realms:
for site_id in cfg.site_ids:
menu += str(count)+') '+realm+'.'+site_id+'.'+cfg.domain+'\n'
select[count] = realm+'.'+site_id+'.'+cfg.domain
count += 1
menu += "Please select the one you would like to use: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError("selection aborted")
else:
# return the fully-qualified name, only if we were supplied
# a name to begin with
return sub[0]+'.'+select[int(ans)]
# if we got input that's too long, let the user know then bail
elif len(sub) > 5:
print sub
raise ValidationError("name.realm.site_id.domain.tld is the maximum length of a name")
# if we got some sort of wierd (zero-length, probably) input, blow up.
else:
raise ValidationError("get_fqn() called incorrectly!")
# split a fqn into realm, site_id, domain
# this assumes you've validated the fqn first
def v_split_fqn(fqn):
"""
[description]
split a fqn into realm, site_id, domain
this assumes you've validated the fqn first
[parameter info]
required:
fqn: the fully-qualified or unqualified name we're splitting
[return value]
returns a fully-qualified user/group name if we're supplied with a name
returns an un-qualified name if we're not supplied with a name
"""
if not fqn:
raise ValidationError("split_fqn() called with no fqn!")
else:
f = fqn.split('.')
# if we got a fully-qualified name (5 items), return all items
if len(f) == 5:
return f[0], f[1], f[2], f[3]+'.'+f[4]
# if we got just realm.site_id.domain
elif len(f) == 4:
return f[0], f[1], f[2]+'.'+f[3]
# if we get anything else, blow up
else:
raise ValidationError("v_split_fqn() called incorrectly")
# find out if a realm.site_id is in the Server table
def v_unqn_in_servers(cfg, realm, site_id):
"""
[description]
looks for realm and site_id in the Server table
[parameter info]
required:
cfg: the almighty config object. useful everywhere
realm: the realm we're looking for
site_id: the site_id we're looking for
[return value]
True/False based on success/failure
"""
# gather realm, site_id data
d = cfg.dbsess.query(Server).\
filter(Server.realm==realm).\
filter(Server.site_id==site_id).first()
if d:
return True
else:
return False
# find out if a realm.site_id is in the Users table
def v_unqn_in_users(cfg, realm, site_id):
"""
[description]
looks for realm and site_id in the Users table
[parameter info]
required:
cfg: the almighty config object. useful everywhere
realm: the realm we're looking for
site_id: the site_id we're looking for
[return value]
True/False based on success/failure
"""
# gather realm, site_id data
d = cfg.dbsess.query(Users).\
filter(Users.realm==realm).\
filter(Users.site_id==site_id).first()
if d:
return True
else:
return False
# find out if a realm.site_id is in the Groups table
def v_unqn_in_groups(cfg, realm, site_id):
"""
[description]
looks for realm and site_id in the Groups table
[parameter info]
required:
cfg: the almighty config object. useful everywhere
realm: the realm we're looking for
site_id: the site_id we're looking for
[return value]
True/False based on success/failure
"""
# gather realm, site_id data
d = cfg.dbsess.query(Groups).\
filter(Groups.realm==realm).\
filter(Groups.site_id==site_id).first()
if d:
return True
else:
return False
# find out if a realm.site_id is in the KV table
def v_unqn_in_kv(cfg, realm, site_id):
"""
[description]
looks for realm and site_id in the KV table
[parameter info]
required:
cfg: the almighty config object. useful everywhere
realm: the realm we're looking for
site_id: the site_id we're looking for
[return value]
True/False based on success/failure
"""
# gather realm, site_id data
d = cfg.dbsess.query(KV).\
filter(KV.realm==realm).\
filter(KV.site_id==site_id).first()
if d:
return True
else:
return False
# find out if a realm.site_id is in the dns_addendum table
def v_unqn_in_dns_addendum(cfg, realm, site_id):
"""
[description]
looks for realm and site_id in the DnsAddendum table
[parameter info]
required:
cfg: the almighty config object. useful everywhere
realm: the realm we're looking for
site_id: the site_id we're looking for
[return value]
True/False based on success/failure
"""
# gather realm, site_id data
d = cfg.dbsess.query(DnsAddendum).\
filter(DnsAddendum.realm==realm).\
filter(DnsAddendum.site_id==site_id).first()
if d:
return True
else:
return False
def v_get_user_obj(cfg, username):
"""
[description]
user names can be passed to functions in several ways, sometimes containing realm and/or site_id information. this function takes arbitrary input and parses it, then calls v_user_picker() to select a user object from the database and returns it.
[parameter info]
required:
cfg: the config object. useful everywhere
username: the username we want to parse
[return value]
returns a Users object
"""
# create a list of all the users with this name in the db
# we explicitly use the list function because the return behaves
# differently depending on the number of user instances in the db
# just one instance returns a user object, more than one returns a
# list of user objects so we force it to be a list in either case
f = username.split('.')
if len(f) == 1:
u = list(cfg.dbsess.query(Users).\
filter(Users.username==username))
elif len(f) > 1:
# validate/construct/get the realm.site_id.domain data
fqun = v_get_fqn(cfg, name=username)
username, realm, site_id, domain = v_split_fqn(fqun)
fqn = realm+'.'+site_id+'.'+domain
u = list(cfg.dbsess.query(Users).\
filter(Users.username==username).\
filter(Users.realm==realm).\
filter(Users.site_id==site_id))
else:
raise ValidationError("v_get_user_obj() called incorrectly")
if u:
u = v_user_picker(cfg, u)
if u:
return u
else:
raise ValidationError("something has gone terribly wrong in the v_get_user_obj() function")
else:
return False
def v_get_group_obj(cfg, groupname):
"""
[description]
group names can be passed to functions in several ways, sometimes containing realm and/or site_id information. this function takes arbitrary input and parses it, then calls v_group_picker() to select a group object from the database and returns it.
[parameter info]
required:
cfg: the config object. useful everywhere
groupname: the groupname we want to parse
[return value]
returns a Groups object
"""
# create a list of all the groups with this name in the db
# we explicitly use the list function because the return behaves
# differently depending on the number of group instances in the db
# just one instance returns a group object, more than one returns a
# list of group objects
f = groupname.split('.')
if len(f) == 1:
g = list(cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname))
elif len(f) > 1:
# validate/construct/get the realm.site_id.domain data
fqgn = v_get_fqn(cfg, name=groupname)
groupname, realm, site_id, domain = v_split_fqn(fqgn)
fqn = realm+'.'+site_id+'.'+domain
g = list(cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname).\
filter(Groups.realm==realm).\
filter(Groups.site_id==site_id))
else:
raise ValidationError('v_get_group_obj() called incorrectly')
if g:
g = v_group_picker(cfg, g)
if g:
return g
else:
raise ValidationError('something has gone terribly wrong in the v_get_group_obj() function')
else:
return False
def v_get_host_obj(cfg, hostname):
"""
[description]
host names can be passed to functions in several ways, sometimes containing realm and/or site_id information. this function takes arbitrary input and parses it, then calls v_host_picker() to select a server object from the database and returns it.
[parameter info]
required:
cfg: the config object. useful everywhere
hostname: the hostname we want to parse
[return value]
returns a Servers object
"""
# create a list of all the hosts with this name in the db
# we explicitly use the list function because the return behaves
# differently depending on the number of host instances in the db
# just one instance returns a host object, more than one returns a
# list of host objects
h = hostname.split('.')
if len(h) == 1:
h = list(cfg.dbsess.query(Server).\
filter(Server.hostname==hostname))
elif len(h) > 1:
# validate/construct/get the realm.site_id.domain data
fqdn = v_get_fqn(cfg, name=hostname)
hostname, realm, site_id, domain = v_split_fqn(fqdn)
fqn = realm+'.'+site_id+'.'+domain
h = list(cfg.dbsess.query(Server).\
filter(Server.hostname==hostname).\
filter(Server.realm==realm).\
filter(Server.site_id==site_id))
if h:
h = v_host_picker(cfg, h)
if h:
return h
else:
raise ValidationError('something has gone terribly wrong in the v_get_host_obj() function')
else:
return False
def v_parse_name(cfg, username=None, groupname=None, hostname=None):
"""
this function is deprecated. please stop using it
"""
raise ValidationError("v_parse_name() is deprecated. please use one of the following:\nv_get_user_obj()\nv_get_group_obj()\nv_get_host_obj()")
# User picker, used in the event we need to present the operator with a
# menu of multiple user entries. this takes a list of Users objects
# used mainly by parse_name() to pick user/group entries
def v_user_picker(cfg, u):
"""
[description]
used in the event we need to present the operator with a menu of multiple user entries. used mainly by parse_name() to pick user/group entries. returns a Users object
[parameter info]
required:
cfg: the config object. useful everywhere
u: list of Users objects to pick from
[return value]
returns a Users object
"""
if len(u) == 1:
# if we only get one, return it
return u[0]
elif len(u) > 1:
count = 1
menu = "\nUser found in multiple areas"
if count <= len(u):
for user in u:
menu += '\n%s) %s.%s.%s' % (count, user.username, user.realm, user.site_id)
count += 1
menu += "\nPlease pick one: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError('invalid selection, aborting')
else:
# set the user object...this creeps me out, but works -dk
u = u[int(ans)-1]
if u:
return u
else:
raise ValidationError('oops, something went wrong in user_picker()!')
else:
raise ValidationError('user_picker() called with zero-length user list')
# Group picker, used in the event we need to present the operator with a
# menu of multiple group entries. this takes a list of Groups objects
# used mainly by parse_name() to pick user/group entries
def v_group_picker(cfg, g):
"""
[description]
used in the event we need to present the operator with a menu of multiple group entries. used mainly by parse_name() to pick user/group entries. returns a Groups object
[parameter info]
required:
cfg: the config object. useful everywhere
g: list of Groups objects to pick from
[return value]
returns a Groups object
"""
if len(g) == 1:
# if we only get one, return it
return g[0]
elif len(g) >1:
count = 1
menu = "\nGroup found in multiple areas"
if count <= len(g):
for group in g:
menu += '\n%s) %s.%s.%s' % (count, group.groupname, group.realm, group.site_id)
count += 1
menu += "\nPlease pick one: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError('invalid selection, aborting')
else:
# set the group object...this creeps me out, but works -dk
g = g[int(ans)-1]
if g:
return g
else:
raise ValidationError('oops, something went wrong in v_group_picker()!')
else:
raise ValidationError('v_group_picker() called with zero-length group list')
# Host picker, used in the event we need to present the operator with a
# menu of multiple host entries. this takes a list of Server objects
# used mainly by parse_name() to pick host entries
def v_host_picker(cfg, h):
"""
[description]
used in the event we need to present the operator with a menu of multiple host entries. used mainly by parse_name() to pick user/host entries. returns a Server object
[parameter info]
required:
cfg: the config object. useful everywhere
g: list of Server objects to pick from
[return value]
returns a Server object
"""
if len(h) == 1:
# if we only get one, return it
return h[0]
elif len(h) >1:
count = 1
menu = "\nHost found in multiple areas"
if count <= len(h):
for host in h:
menu += '\n%s) %s.%s.%s' % (count, host.hostname, host.realm, host.site_id)
count += 1
menu += "\nPlease pick one: "
ans = raw_input(menu)
if not ans or int(ans) < 1 or int(ans) > count:
raise ValidationError('invalid selection, aborting')
else:
# set the host object...this creeps me out, but works -dk
h = h[int(ans)-1]
if h:
return h
else:
raise ValidationError('oops, something went wrong in v_host_picker()!')
else:
raise ValidationError('v_host_picker() called with zero-length host list')
# VERY basic validation of user- group- or host-name input
def v_validate_name(cfg, name):
"""
[description]
VERY basic validation of user- group- or host-name input
"""
if not name:
raise ValidationError('v_validate_name() called without a name!')
if re.search("[^A-Za-z0-9_\-.]", name):
print 'name contains illegal characters! allowed characters are: A-Z a-z 0-9 _ - .'
return False
if len(name) < 4:
print 'too short! name must have more than 3 characters'
return False
return True
|
|
"""Sensor for the Open Sky Network."""
from __future__ import annotations
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
LENGTH_KILOMETERS,
LENGTH_METERS,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import distance as util_distance, location as util_location
CONF_ALTITUDE = "altitude"
ATTR_ICAO24 = "icao24"
ATTR_CALLSIGN = "callsign"
ATTR_ALTITUDE = "altitude"
ATTR_ON_GROUND = "on_ground"
ATTR_SENSOR = "sensor"
ATTR_STATES = "states"
DOMAIN = "opensky"
DEFAULT_ALTITUDE = 0
EVENT_OPENSKY_ENTRY = f"{DOMAIN}_entry"
EVENT_OPENSKY_EXIT = f"{DOMAIN}_exit"
SCAN_INTERVAL = timedelta(seconds=12) # opensky public limit is 10 seconds
OPENSKY_ATTRIBUTION = (
"Information provided by the OpenSky Network (https://opensky-network.org)"
)
OPENSKY_API_URL = "https://opensky-network.org/api/states/all"
OPENSKY_API_FIELDS = [
ATTR_ICAO24,
ATTR_CALLSIGN,
"origin_country",
"time_position",
"time_velocity",
ATTR_LONGITUDE,
ATTR_LATITUDE,
ATTR_ALTITUDE,
ATTR_ON_GROUND,
"velocity",
"heading",
"vertical_rate",
"sensors",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RADIUS): vol.Coerce(float),
vol.Optional(CONF_NAME): cv.string,
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_ALTITUDE, default=DEFAULT_ALTITUDE): vol.Coerce(float),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Open Sky platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
add_entities(
[
OpenSkySensor(
hass,
config.get(CONF_NAME, DOMAIN),
latitude,
longitude,
config.get(CONF_RADIUS),
config.get(CONF_ALTITUDE),
)
],
True,
)
class OpenSkySensor(SensorEntity):
"""Open Sky Network Sensor."""
def __init__(self, hass, name, latitude, longitude, radius, altitude):
"""Initialize the sensor."""
self._session = requests.Session()
self._latitude = latitude
self._longitude = longitude
self._radius = util_distance.convert(radius, LENGTH_KILOMETERS, LENGTH_METERS)
self._altitude = altitude
self._state = 0
self._hass = hass
self._name = name
self._previously_tracked = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def _handle_boundary(self, flights, event, metadata):
"""Handle flights crossing region boundary."""
for flight in flights:
if flight in metadata:
altitude = metadata[flight].get(ATTR_ALTITUDE)
longitude = metadata[flight].get(ATTR_LONGITUDE)
latitude = metadata[flight].get(ATTR_LATITUDE)
icao24 = metadata[flight].get(ATTR_ICAO24)
else:
# Assume Flight has landed if missing.
altitude = 0
longitude = None
latitude = None
icao24 = None
data = {
ATTR_CALLSIGN: flight,
ATTR_ALTITUDE: altitude,
ATTR_SENSOR: self._name,
ATTR_LONGITUDE: longitude,
ATTR_LATITUDE: latitude,
ATTR_ICAO24: icao24,
}
self._hass.bus.fire(event, data)
def update(self):
"""Update device state."""
currently_tracked = set()
flight_metadata = {}
states = self._session.get(OPENSKY_API_URL).json().get(ATTR_STATES)
for state in states:
flight = dict(zip(OPENSKY_API_FIELDS, state))
callsign = flight[ATTR_CALLSIGN].strip()
if callsign != "":
flight_metadata[callsign] = flight
else:
continue
missing_location = (
flight.get(ATTR_LONGITUDE) is None or flight.get(ATTR_LATITUDE) is None
)
if missing_location:
continue
if flight.get(ATTR_ON_GROUND):
continue
distance = util_location.distance(
self._latitude,
self._longitude,
flight.get(ATTR_LATITUDE),
flight.get(ATTR_LONGITUDE),
)
if distance is None or distance > self._radius:
continue
altitude = flight.get(ATTR_ALTITUDE)
if altitude > self._altitude and self._altitude != 0:
continue
currently_tracked.add(callsign)
if self._previously_tracked is not None:
entries = currently_tracked - self._previously_tracked
exits = self._previously_tracked - currently_tracked
self._handle_boundary(entries, EVENT_OPENSKY_ENTRY, flight_metadata)
self._handle_boundary(exits, EVENT_OPENSKY_EXIT, flight_metadata)
self._state = len(currently_tracked)
self._previously_tracked = currently_tracked
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: OPENSKY_ATTRIBUTION}
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return "flights"
@property
def icon(self):
"""Return the icon."""
return "mdi:airplane"
|
|
#!/usr/bin/python
from pygame.sprite import DirtySprite
import random
import time
import logging
from pygame.surface import Surface
from src.Properties import Color, Size
from tts_client import invoke_tts
from utils import *
RATE_PERCENTAGE_MAX = 25
RATE_PERCENTAGE_MIN = -25
# RANGE_PERCENTAGE_MAX = 50 # RG
# RANGE_PERCENTAGE_MIN = -50 # RG
ENERGY_PERCENTAGE_MAX = 5 # RG
ENERGY_PERCENTAGE_MIN = -5 # RG
FOMEAN_PERCENTAGE_MAX = 10 # RG
FOMEAN_PERCENTAGE_MIN = -10 # RG
#mpatacchiola: include for the robot libraries
import threading
import sys
sys.path.insert(1, "./pynaoqi-python2.7-2.1.3.3-linux64") #import this module for the nao.py module
from naoqi import ALProxy
import random #randint to generate random advice
import csv #to read the configuration file with the robot IP and PORT
#mpatacchiola: move the head
def robot_move_head(direction, sleep, avatar_name, csv_path='./robot.csv'):
"""
Move the head of the robot
I moves the head of the robot in the direction of the screen or the participant.
Before talking it looks to the participant. At the end it looks back to the screen.
It requires the robot.csv file which must contain where the participant is placed.
@param direction string identifying where to look ('screen' or 'participant')
@parma sleep how many second sleep before the movement
@param avatar_name the name of the avatar to move
@param csv_path the path of the CSV file
"""
avatar_found = False
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
conf_avatar_name = row[0]
conf_nao_ip = row[1]
conf_nao_port = row[2]
conf_nao_movement = row[3]
conf_participant_position = row[4]
if(conf_avatar_name == avatar_name):
avatar_found = True
NAO_IP = conf_nao_ip
NAO_PORT = conf_nao_port
NAO_MOVE = conf_nao_movement
break
if(avatar_found == False):
print("ROBOT ERROR: avatar '" + str(avatar_name) + "' not found!")
return 0
try:
al_motion_proxy = ALProxy("ALMotion", NAO_IP, int(NAO_PORT))
except Exception, e:
print "ROBOT ERROR: Error creating the ALMotion proxy!"
print str(e)
#self._al_motion_proxy.setAngles("HeadPitch", angle, HEAD_SPEED)
time.sleep(sleep)
head_speed = 0.3 # change the speed if needed
if(conf_participant_position == "left" and direction=="screen"):
angle = +1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "left" and direction=="participant"):
angle = -1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "right" and direction=="screen"):
angle = +1.0 # 60 degrees = 1.0 radians
elif(conf_participant_position == "right" and direction=="participant"):
angle = -1.0 # 60 degrees = 1.0 radians
else:
print "ROBOT ERROR: Error the combination does not exist participant_position=" + str(conf_participant_position) + "; direction=" + str(direction)
# Move the head in the direction
if NAO_MOVE == "True" or NAO_MOVE == "true" or NAO_MOVE == "TRUE":
al_motion_proxy.setAngles("HeadYaw", angle, head_speed)
#mpatacchiola: creating the motion object
def robot_animation(advice, avatar_name, csv_path='./robot.csv', verbose=True):
"""Given the name of the avatar and an advice it animates one of the robots.
The gestures are sampled among the NAO animations.
The function will look for a file called 'robot.csv' containing:
comma separated values for [Avatar name, IP address, PORT number, MOVE True/False].
The file must be in the root folder.
@param advice the advice string
@param avatar_name the name of the avatar (Veronika, Monika, Tereza)
@param csv_path the path where the CSV file is located (default is root)
@param verbose if True it prints the steps on terminal
"""
avatar_found = False
with open(csv_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
conf_avatar_name = row[0]
conf_nao_ip = row[1]
conf_nao_port = row[2]
conf_nao_movement = row[3]
if(conf_avatar_name == avatar_name):
avatar_found = True
NAO_IP = conf_nao_ip
NAO_PORT = conf_nao_port
NAO_MOVE = conf_nao_movement
break
if(avatar_found == False):
if verbose: print("ROBOT ERROR: AVATAR '" + str(avatar_name) + "' NOT FOUND!")
return 0
if verbose: print "ROBOT init..."
if verbose: print("ROBOT IP: " + str(NAO_IP))
if verbose: print("ROBOT PORT: " + str(NAO_PORT))
if verbose: print("ROBOT MOVE: " + str(NAO_MOVE))
if verbose: print("ROBOT avatar: " + str(avatar_name))
if verbose: print("ROBOT advice: " + str(advice))
# If the movements are enabled it moves during the speech
if NAO_MOVE == "True" or NAO_MOVE == "true" or NAO_MOVE == "TRUE":
animated_speech_proxy = ALProxy("ALAnimatedSpeech", NAO_IP, int(NAO_PORT))
#set the local configuration
configuration = {"bodyLanguageMode":"contextual"}
#say the text with the local configuration
gesture_list = list()
gesture_list.append("^start(animations/Stand/Gestures/Choice_1) ")
gesture_list.append("^start(animations/Stand/Gestures/Choice_2) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_1) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_2) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_4) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_6) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_7) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_8) ")
gesture_list.append("^start(animations/Stand/Gestures/Explain_9) ")
sampled_gesture = gesture_list[random.randint(0,len(gesture_list)-1)]
full_string = sampled_gesture + advice #the gesture plus the advice
animated_speech_proxy.say(full_string, configuration)
class HelperUnknownSignal(Surface):
def __init__(self, names=''):
super(HelperUnknownSignal, self).__init__(Size.HELPER_UNKNOWN_SIGNAL, pygame.SRCALPHA)
self.fill(color=Color.DIRECTION)
center_x, center_y = self.get_rect().center
mytext = _('not-understood')
line1 = largeText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('please-repeat')
line2 = smallText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('and-remember-who1')
line3 = miniText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
mytext = _('and-remember-who2') % names
line4 = miniText.render(unicode(mytext.decode('utf8')), True, Color.WHITE)
#AG line1 = largeText.render(_('not-understood'), True, Color.WHITE)
#AG line2 = smallText.render(_('please-repeat'), True, Color.WHITE)
#AG line3 = miniText.render(_('and-remember-who1'), True, Color.WHITE)
#AG line4 = miniText.render(_('and-remember-who2') % names, True, Color.WHITE)
self.blit(line1, (center_x - (line1.get_width() / 2), 10))
self.blit(line2, (center_x - (line2.get_width() / 2), 50))
self.blit(line3, (center_x - (line3.get_width() / 2), 90))
self.blit(line4, (center_x - (line4.get_width() / 2), 120))
CHARACTERS = {0: ('Tereza',
pygame.image.load('resources/characters/eugenia.png'),
'+0%', # RG it used to say +5%
pygame.image.load('resources/characters/thumb_eugenia.png'),
'Tereza'),
1: ('Monika',
pygame.image.load('resources/characters/amanda.png'),
'-10%', # RG it used to say -20%
pygame.image.load('resources/characters/thumb_amanda.png'),
'Monika'),
2: ('Veronika',
pygame.image.load('resources/characters/veronica.png'),
'+0%', # RG it used to say +5%
pygame.image.load('resources/characters/thumb_veronica.png'),
'Veronika'),#####AG:FIX!
3: ('undetermined',
HelperUnknownSignal(),
None,
None,
'')}
advice_prefixes = [_('advice-prefix-1'),
_('advice-prefix-2'),
_('advice-prefix-3'),
_('advice-prefix-4'),
_('advice-prefix-5'),
_('advice-prefix-6'),
_('advice-prefix-7'),
_('advice-prefix-8'),
_('advice-prefix-9'),
_('advice-prefix-10')]
advices_suffixes = {1: _('advice-suffix-1'),
2: _('advice-suffix-2'),
3: _('advice-suffix-3'),
4: _('advice-suffix-4'),
5: _('advice-suffix-5'),
6: _('advice-suffix-6'),
7: _('advice-suffix-7'),
8: _('advice-suffix-8'),
9: _('advice-suffix-9'),
10: _('advice-suffix-10'),
11: _('advice-suffix-11'),
12: _('advice-suffix-12'),
13: _('advice-suffix-13')}
class Helper(DirtySprite):
def __init__(self, id, game, initial_score=0):
self.game = game
super(Helper, self).__init__()
self.last_advice = 0
self.score = initial_score
self.rate = 0
# self.pitch_range = 0 # RG
self.energy = 0 # RG
self.f0mean = 0 # RG
self.id = id
self.name, self.image, self.pitch, self.thumb, self.nombre = CHARACTERS[id]
logging.info('Helper=> [%(name)s] initial score: [%(score)i]',
{'name': self.name, 'score': self.score})
self.rect = self.image.get_rect()
self.hide()
# def set_configuration(self, config):
# self.conf = config
def get_possible_advices(self):
advs = {}
possibles = list(set(values_of(self.game.human_player.hand)))
for val in possibles:
if val not in values_of(self.game.comp_player.hand):
key = -15
else:
a = cant_of_same_rank(self.game.human_player.hand, val)
b = cant_of_same_rank(self.game.comp_player.hand, val)
if a + b == 4:
key = 5
else:
key = b
# Si no esta en el dict, la creo
if key in advs:
advs[key].append(val)
else:
advs[key] = [val]
return advs
def get_an_advice(self):
advices = self.get_possible_advices()
# candidates = list(set(self.game.human_player.hand))
s = self.choose_better_advs(advices.keys())
candidates = advices[s]
self.score += s
logging.info('Helper=> [%(name)s] updated score to: [%(score)i]',
{'name': self.name, 'score': self.score})
# if not candidates:
# candidates = values_of(self.game.human_player.hand) # if no intersection between those two, random guess
return random.choice(candidates)
def speech_advice(self):
#mpatacchiola: (sharedscreen) allows the movement of the robot head (it loos to the participant)
direction = "participant"
sleep = 0.0
t = threading.Thread(target=robot_move_head, args=(direction, sleep, self.name,))
t.start()
advice = random.choice(advice_prefixes) + advices_suffixes[self.last_advice]
logging.info('Helper=> [%(nombre)s] giving advice: %(advice)s', {'nombre': self.name, 'advice': advice})
invoke_tts(filename=self.game.get_response_filename(),
rate_change=self.calculate_percentage_rate(),
# range_change=self.calculate_percentage_range(),
energy_change=self.calculate_percentage_energy(),
f0mean_change=self.calculate_percentage_f0mean(),
pitch=self.pitch,
advice=advice) # RG
#mpatacchiola: calling the robot animation function
t = threading.Thread(target=robot_animation, args=(advice, self.name,))
t.start()
#mpatacchiola: (sharedscreen) allows the movement of the robot head (it loos to the screen)
direction = "screen"
sleep = 3.0 # adjust as you want
t = threading.Thread(target=robot_move_head, args=(direction, sleep, self.name,))
t.start()
pygame.mixer.music.play()
def calculate_percentage_rate(self):
if self.rate < 0:
return str(self.rate)+'%'
else:
return '+'+str(self.rate)+'%'
# def calculate_percentage_range(self): # RG
if self.pitch_range < 0:
return str(self.pitch_range)+'%'
else:
return '+'+str(self.pitch_range)+'%'
def calculate_percentage_energy(self): # RG
if self.energy < 0:
return str(self.energy)+'%'
else:
return '+'+str(self.energy)+'%'
def calculate_percentage_f0mean(self): # RG
if self.f0mean < 0:
return str(self.f0mean)+'%'
else:
return '+'+str(self.f0mean)+'%'
def help(self):
nro = self.get_an_advice()
self.last_advice = nro
logging.info('Audio=> [%(nombre)s] giving advice: %(rank)02d', {'nombre': self.name, 'rank': nro})
self.speech_advice()
self.game.human_player.update_enabled_cards(nro)
return True
def choose_better_advs(self, keys):
score = keys[0]
for k in keys:
if abs(self.score + k) < abs(self.score + score):
score = k
return score
def hide(self):
self.visible = False
def show(self):
self.visible = True
def is_talking(self):
return pygame.mixer.music.get_busy()
class PracticeHelper(Helper):
def __init__(self, id, game, initial_score=0):
super(PracticeHelper, self).__init__(id, game, initial_score)
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> Practice helper does not adapt %s', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
pass
class EntrainingHelper(Helper):
def __init__(self, id, game, initial_score):
super(EntrainingHelper, self).__init__(id, game, initial_score)
self.initial_rate = game.rate_base
# self.initial_range = game.range_base # RG
self.initial_energy = game.energy_base # RG
self.initial_f0mean = game.f0mean_base # RG
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> ## Adapting %s ## ', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
initial_ap = self.initial_rate
percentage_max = RATE_PERCENTAGE_MAX
percentage_min = RATE_PERCENTAGE_MIN
# elif ap_feature == 'range':
# initial_ap = self.initial_range
# percentage_max = RANGE_PERCENTAGE_MAX
# percentage_min = RANGE_PERCENTAGE_MIN
elif ap_feature == 'energy':
initial_ap = self.initial_energy
percentage_max = ENERGY_PERCENTAGE_MAX
percentage_min = ENERGY_PERCENTAGE_MIN
elif ap_feature == 'f0mean':
initial_ap = self.initial_f0mean
percentage_max = FOMEAN_PERCENTAGE_MAX
percentage_min = FOMEAN_PERCENTAGE_MIN
pt = (new_ap_value - initial_ap) / initial_ap
partial = int(round(pt, 2) * 100)
ap_change = max(min(partial, percentage_max), percentage_min) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
self.rate = ap_change
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
# self.pitch_range = ap_change
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
self.energy = ap_change
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
self.f0mean = ap_change
logging.info('Audio=> Measured %(ap_feature)s: [%(new_ap_value)g] - Change: [%(percent_change)g percent] - Base value: [%(base_value)g]',
{'ap_feature': ap_feature, 'new_ap_value': new_ap_value, 'percent_change': ap_change, 'base_value': initial_ap}) # RG
class DisentrainingHelper(Helper):
def __init__(self, id, game, initial_score):
super(DisentrainingHelper, self).__init__(id, game, initial_score)
self.initial_rate = game.rate_base
# self.initial_range = game.range_base # RG
self.initial_energy = game.energy_base # RG
self.initial_f0mean = game.f0mean_base # RG
def adapt_rates(self, new_ap_value, ap_feature): # RG
logging.info('Audio=> ## DE-Adapting %s ## ', ap_feature.upper()) # RG
if ap_feature == 'rate': # RG
initial_ap = self.initial_rate
percentage_max = RATE_PERCENTAGE_MAX
percentage_min = RATE_PERCENTAGE_MIN
# elif ap_feature == 'range':
# initial_ap = self.initial_range
# percentage_max = RANGE_PERCENTAGE_MAX
# percentage_min = RANGE_PERCENTAGE_MIN
elif ap_feature == 'energy':
initial_ap = self.initial_energy
percentage_max = ENERGY_PERCENTAGE_MAX
percentage_min = ENERGY_PERCENTAGE_MIN
elif ap_feature == 'f0mean':
initial_ap = self.initial_f0mean
percentage_max = FOMEAN_PERCENTAGE_MAX
percentage_min = FOMEAN_PERCENTAGE_MIN
pt = (new_ap_value - initial_ap) / initial_ap
partial = int(round(pt, 2) * -100) # RG: this must be one of the most important minus signs in science! And it's well hidden!
ap_change = max(min(partial, percentage_max), percentage_min) # RG
if ap_feature == 'rate': # RG
self.game.historic_rate.append(new_ap_value)
self.rate = ap_change
# elif ap_feature == 'range':
# self.game.historic_range.append(new_ap_value)
# self.pitch_range = ap_change
elif ap_feature == 'energy':
self.game.historic_energy.append(new_ap_value)
self.energy = ap_change
elif ap_feature == 'f0mean':
self.game.historic_f0mean.append(new_ap_value)
self.f0mean = ap_change
logging.info('Audio=> Measured %(ap_feature)s: [%(new_ap_value)g] - Change: [%(percent_change)g percent] - Base value: [%(base_value)g]',
{'ap_feature': ap_feature, 'new_ap_value': new_ap_value, 'percent_change': ap_change, 'base_value': initial_ap}) # RG
class UnknownHelper(Helper):
def __init__(self, id, game):
super(UnknownHelper, self).__init__(id, game)
self.image = HelperUnknownSignal(game.helper_names)
self.clock = None
def help(self):
self.clock = time.time()
return False
def adapt_rates(self, new_ap_value, ap_feature):
logging.error('Audio=> Not adapting %(ap_feature)s because the helper is %(helper_name)s',
{'helper_name': self.name, 'ap_feature': ap_feature})
def is_talking(self):
return time.time() - self.clock < 3
|
|
"""Support for Sass's namespacing rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import logging
import six
from scss.types import Undefined
from scss.types import Value
log = logging.getLogger(__name__)
def normalize_var(name):
assert isinstance(name, six.string_types)
return name.replace('_', '-')
class Scope(object):
"""Implements Sass variable scoping.
Similar to `ChainMap`, except that assigning a new value will replace an
existing value, not mask it.
"""
def __init__(self, maps=()):
maps = list(maps)
self.maps = [dict()] + maps
def __repr__(self):
return "<%s(%s) at 0x%x>" % (type(self).__name__, ', '.join(repr(map) for map in self.maps), id(self))
def __getitem__(self, key):
for map in self.maps:
if key in map:
return map[key]
raise KeyError(key)
def __setitem__(self, key, value):
self.set(key, value)
def __contains__(self, key):
for map in self.maps:
if key in map:
return True
return False
def keys(self):
# For mapping interface
keys = set()
for map in self.maps:
keys.update(map.keys())
return list(keys)
def set(self, key, value, force_local=False):
if not force_local:
for map in self.maps:
if key in map:
if isinstance(map[key], Undefined):
break
map[key] = value
return
self.maps[0][key] = value
def new_child(self):
return type(self)(self.maps)
class VariableScope(Scope):
pass
class FunctionScope(Scope):
def __repr__(self):
return "<%s(%s) at 0x%x>" % (type(self).__name__, ', '.join('[%s]' % ', '.join('%s:%s' % (f, n) for f, n in sorted(map.keys())) for map in self.maps), id(self))
class MixinScope(Scope):
def __repr__(self):
return "<%s(%s) at 0x%x>" % (type(self).__name__, ', '.join('[%s]' % ', '.join('%s:%s' % (f, n) for f, n in sorted(map.keys())) for map in self.maps), id(self))
class ImportScope(Scope):
pass
class Namespace(object):
"""..."""
_mutable = True
def __init__(self, variables=None, functions=None, mixins=None, mutable=True):
self._mutable = mutable
if variables is None:
self._variables = VariableScope()
else:
# TODO parse into sass values once that's a thing, or require them
# all to be
self._variables = VariableScope([variables])
if functions is None:
self._functions = FunctionScope()
else:
self._functions = FunctionScope([functions._functions])
self._mixins = MixinScope()
self._imports = ImportScope()
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This Namespace instance is immutable")
@classmethod
def derive_from(cls, *others):
self = cls()
if len(others) == 1:
self._variables = others[0]._variables.new_child()
self._functions = others[0]._functions.new_child()
self._mixins = others[0]._mixins.new_child()
self._imports = others[0]._imports.new_child()
else:
# Note that this will create a 2-dimensional scope where each of
# these scopes is checked first in order. TODO is this right?
self._variables = VariableScope(other._variables for other in others)
self._functions = FunctionScope(other._functions for other in others)
self._mixins = MixinScope(other._mixins for other in others)
self._imports = ImportScope(other._imports for other in others)
return self
def derive(self):
"""Return a new child namespace. All existing variables are still
readable and writeable, but any new variables will only exist within a
new scope.
"""
return type(self).derive_from(self)
def declare(self, function):
"""Insert a Python function into this Namespace, detecting its name and
argument count automatically.
"""
self._auto_register_function(function, function.__name__)
return function
def declare_alias(self, name):
"""Insert a Python function into this Namespace with an
explicitly-given name, but detect its argument count automatically.
"""
def decorator(f):
self._auto_register_function(f, name)
return f
return decorator
def declare_internal(self, function):
"""Like declare(), but the registered function will also receive the
current namespace as its first argument. Useful for functions that
inspect the state of the compilation, like ``variable-exists()``.
Probably not so useful for anything else.
"""
function._pyscss_needs_namespace = True
self._auto_register_function(function, function.__name__, 1)
return function
def _auto_register_function(self, function, name, ignore_args=0):
name = name.replace('_', '-').rstrip('-')
argspec = inspect.getargspec(function)
if argspec.varargs or argspec.keywords:
# Accepts some arbitrary number of arguments
arities = [None]
else:
# Accepts a fixed range of arguments
if argspec.defaults:
num_optional = len(argspec.defaults)
else:
num_optional = 0
num_args = len(argspec.args) - ignore_args
arities = range(num_args - num_optional, num_args + 1)
for arity in arities:
self.set_function(name, arity, function)
@property
def variables(self):
return dict((k, self._variables[k]) for k in self._variables.keys())
def variable(self, name, throw=False):
name = normalize_var(name)
return self._variables[name]
def set_variable(self, name, value, local_only=False):
self._assert_mutable()
name = normalize_var(name)
if not isinstance(value, Value):
raise TypeError("Expected a Sass type, while setting %s got %r" % (name, value,))
self._variables.set(name, value, force_local=local_only)
def has_import(self, source):
return source.path in self._imports
def add_import(self, source, parent_rule):
self._assert_mutable()
self._imports[source.path] = [
0,
parent_rule.source_file.path,
parent_rule.file_and_line,
]
def use_import(self, import_key):
self._assert_mutable()
if import_key and import_key in self._imports:
imports = self._imports[import_key]
imports[0] += 1
self.use_import(imports[1])
def unused_imports(self):
unused = []
for import_key in self._imports.keys():
imports = self._imports[import_key]
if not imports[0]:
unused.append((import_key[0], imports[2]))
return unused
def _get_callable(self, chainmap, name, arity):
name = normalize_var(name)
if arity is not None:
# With explicit arity, try the particular arity before falling back
# to the general case (None)
try:
return chainmap[name, arity]
except KeyError:
pass
return chainmap[name, None]
def _set_callable(self, chainmap, name, arity, cb):
name = normalize_var(name)
chainmap[name, arity] = cb
def mixin(self, name, arity):
return self._get_callable(self._mixins, name, arity)
def set_mixin(self, name, arity, cb):
self._assert_mutable()
self._set_callable(self._mixins, name, arity, cb)
def function(self, name, arity):
return self._get_callable(self._functions, name, arity)
def set_function(self, name, arity, cb):
self._assert_mutable()
self._set_callable(self._functions, name, arity, cb)
|
|
from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger("module_rss")
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
""" Initialize updater """
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect("sqlite:///:memory:")
else:
DATABASE = dataset.connect("sqlite:///databases/rss.db")
logger.info("RSS module initialized")
botref = bot
config = bot.config.get("rss", {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
""" Finalize updater (rehash etc) so we don't leave an updater running """
global updater
global logger
logger.info("RSS module finalized")
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
""" Get feeds from database """
return [
Feed(f["network"], f["channel"], f["id"])
for f in list(DATABASE["feeds"].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
""" Find specific feed from database """
f = DATABASE["feeds"].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f["network"], f["channel"], f["id"])
def add_feed(network, channel, url):
""" Add feed to database """
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
""" Remove feed from database """
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE["feeds"].delete(id=f.id)
DATABASE["items_%i" % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
""" Update all feeds in the DB """
global config
global updater
global logger
logger.info("Updating RSS feeds started")
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
def command_rss(bot, user, channel, args):
commands = ["list", "add", "remove", "latest", "update"]
args = args.split()
if not args or args[0] not in commands:
return bot.say(channel, "rss: valid arguments are [%s]" % (", ".join(commands)))
command = args[0]
network = bot.network.alias
# Get latest feed item from database
# Not needed? mainly for debugging
# Possibly useful for checking if feed still exists?
if command == "latest":
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss latest <id from list>"')
feed = find_feed(network=network, channel=channel, id=int(args[1]))
if not feed:
return bot.say(channel, "feed not found, no action taken")
item = feed.get_latest()
if not item:
return bot.say(channel, "no items in feed")
return bot.say(channel, feed.get_item_str(item))
# List all feeds for current network && channel
if command == "list":
feeds = get_feeds(network=network, channel=channel)
if not feeds:
return bot.say(channel, "no feeds set up")
for f in feeds:
bot.say(channel, "%02i: %s <%s>" % (f.id, f.name, f.url))
return
# Rest of the commands are only for admins
if not bot.factory.isAdmin(user):
return bot.say(channel, 'only "latest" and "list" available for non-admins')
# Add new feed for channel
if command == "add":
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss add url"')
init, items = add_feed(network, channel, url=args[1])
if not init:
return bot.say(channel, "feed already added")
return bot.say(channel, "feed added with %i items" % len(items))
# remove feed from channel
if command == "remove":
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss remove <id from list>"')
feed = remove_feed(network, channel, id=args[1])
if not feed:
return bot.say(channel, "feed not found, no action taken")
return bot.say(channel, 'feed "%s" <%s> removed' % (feed.name, feed.url))
# If there's no args, update all feeds (even for other networks)
# If arg exists, try to update the feed...
if command == "update":
if len(args) < 2:
bot.say(channel, "feeds updating")
update_feeds()
return
feed = find_feed(network, channel, id=int(args[1]))
if not feed:
return bot.say(channel, "feed not found, no action taken")
feed.update()
return
class Feed(object):
""" Feed object to simplify feed handling """
def __init__(self, network, channel, id=None, url=None):
# Not sure if (this complex) init is needed...
self.id = id
self.network = network
self.channel = channel
self.url = url
if url:
self.url = url
self.initialized = False
# load feed details from database
self._get_feed_from_db()
def __repr__(self):
return "(%s, %s, %s)" % (self.url, self.channel, self.network)
def __unicode__(self):
return "%i - %s" % (self.id, self.url)
def __init_feed(self):
""" Initialize databases for feed """
DATABASE["feeds"].insert(
{
"network": self.network,
"channel": self.channel,
"url": self.url,
"name": "",
}
)
# Update feed to match the created
feed = self._get_feed_from_db()
# Initialize item-database for feed
self.__save_item(
{
"title": "PLACEHOLDER",
"link": "https://github.com/lepinkainen/pyfibot/",
"printed": True,
}
)
self.initialized = True
return feed
def __get_items_tbl(self):
""" Get table for feeds items """
return DATABASE[("items_%i" % (self.id))]
def __parse_feed(self):
""" Parse items from feed """
f = feedparser.parse(self.url)
if self.initialized:
self.update_feed_info({"name": f["channel"]["title"]})
items = [{"title": i["title"], "link": i["link"]} for i in f["items"]]
return (f, items)
def __save_item(self, item, table=None):
""" Save item to feeds database """
if table is None:
table = self.__get_items_tbl()
# If override is set or the item cannot be found, it's a new one
if not table.find_one(title=item["title"], link=item["link"]):
# If printed isn't set, set it to the value in self.initialized (True, if initializing, else False)
# This is to prevent flooding when adding a new feed...
if "printed" not in item:
item["printed"] = self.initialized
table.insert(item)
def __mark_printed(self, item, table=None):
""" Mark item as printed """
if table is None:
table = self.__get_items_tbl()
table.update({"id": item["id"], "printed": True}, ["id"])
def _get_feed_from_db(self):
""" Get self from database """
feed = None
if self.url and not self.id:
feed = DATABASE["feeds"].find_one(
network=self.network, channel=self.channel, url=self.url
)
if self.id:
feed = DATABASE["feeds"].find_one(
network=self.network, channel=self.channel, id=self.id
)
if not feed:
feed = self.__init_feed()
self.id = feed["id"]
self.network = feed["network"]
self.channel = feed["channel"]
self.url = feed["url"]
# TODO: Name could just be the domain part of url?
self.name = feed["name"]
return feed
def get_item_str(self, item):
return "[%s] %s <%s>" % (
"".join([c for c in self.name][0:18]),
item["title"],
item["link"],
)
def get_latest(self):
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(order_by="id"))]
if not items:
return
return items[-1]
def update_feed_info(self, data):
""" Update feed information """
data["id"] = self.id
if "url" in data:
self.url = data["url"]
DATABASE["feeds"].update(data, ["id"])
# Update self to match new...
self._get_feed_from_db()
def read(self):
""" Read new items from feed """
f, items = self.__parse_feed()
# Get table -reference to speed up stuff...
tbl = self.__get_items_tbl()
# Save items in DB, saving takes care of duplicate checks
for i in reversed(items):
self.__save_item(i, tbl)
# Set initialized to False, as we have read everything...
self.initialized = False
return items
def get_new_items(self, mark_printed=False):
""" Get all items which are not marked as printed, if mark_printed is set, update printed also. """
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(printed=False))]
if mark_printed:
for i in items:
self.__mark_printed(i, tbl)
return items
def update(self):
global logger
global botref
# If botref isn't defined, bot isn't running, no need to run
# (used for tests?)
if not botref:
return
# Read all items for feed
logger.debug('Feed "%s" updating' % (self.name))
self.read()
# Get number of unprinted items (and don't mark as printed)
items = self.get_new_items(False)
if len(items) == 0:
logger.debug(
'Feed "%s" containes no new items, doing nothing.' % (self.name)
)
return
logger.debug('Feed "%s" updated with %i new items' % (self.name, len(items)))
# If bot instance isn't found, don't print anything
bot_instance = botref.find_bot_for_network(self.network)
if not bot_instance:
logger.error('Bot instance for "%s" not found, not printing' % (self.name))
return
logger.debug('Printing new items for "%s"' % (self.name))
# Get all new (not printed) items and print them
items = self.get_new_items(True)
for i in items:
bot_instance.say(self.channel, self.get_item_str(i))
if __name__ == "__main__":
f = Feed(
"ircnet", "#pyfibot", "http://feeds.feedburner.com/ampparit-kaikki?format=xml"
)
f.read()
for i in f.get_new_items(True):
print(i)
|
|
from __future__ import absolute_import
# -*- coding: utf-8 -*-
from datetime import datetime
from xml.dom import minidom
from StringIO import StringIO
from django.conf import settings
from django.core import serializers
from django.db import transaction, connection
from django.test import TestCase, TransactionTestCase, Approximate
from django.utils import simplejson, unittest
from .models import (Category, Author, Article, AuthorProfile, Actor, Movie,
Score, Player, Team)
class SerializerRegistrationTests(unittest.TestCase):
def setUp(self):
self.old_SERIALIZATION_MODULES = getattr(settings, 'SERIALIZATION_MODULES', None)
self.old_serializers = serializers._serializers
serializers._serializers = {}
settings.SERIALIZATION_MODULES = {
"json2" : "django.core.serializers.json",
}
def tearDown(self):
serializers._serializers = self.old_serializers
if self.old_SERIALIZATION_MODULES:
settings.SERIALIZATION_MODULES = self.old_SERIALIZATION_MODULES
else:
delattr(settings, 'SERIALIZATION_MODULES')
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970,1,1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline','pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = u"Za\u017c\u00f3\u0142\u0107"
movie_title = u'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serial_str = serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author = self.jane,
headline = "Nobody remembers the early years",
pub_date = datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 4)
class SerializersTransactionTestBase(object):
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to be contained
# within a transaction in order to test forward reference
# handling.
transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
transaction.commit()
transaction.leave_transaction_management()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return unicode(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[{"pk": null, "model": "serializers.category", "fields": {"name": "Reference"}}]"""
@staticmethod
def _validate_output(serial_str):
try:
simplejson.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = simplejson.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = simplejson.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
try:
import yaml
except ImportError:
pass
else:
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, basestring):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
|
from __future__ import absolute_import, unicode_literals
import json
import os
from django.contrib.contenttypes.models import ContentType
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import render
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six import text_type
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from unidecode import unidecode
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailadmin.utils import send_mail
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Orderable, Page, UserPagePermissionsProxy, get_page_models
from .forms import FormBuilder, WagtailAdminFormPageForm
FORM_FIELD_CHOICES = (
('singleline', _('Single line text')),
('multiline', _('Multi-line text')),
('email', _('Email')),
('number', _('Number')),
('url', _('URL')),
('checkbox', _('Checkbox')),
('checkboxes', _('Checkboxes')),
('dropdown', _('Drop down')),
('radio', _('Radio buttons')),
('date', _('Date')),
('datetime', _('Date/time')),
)
@python_2_unicode_compatible
class AbstractFormSubmission(models.Model):
"""
Data for a form submission.
You can create custom submission model based on this abstract model.
For example, if you need to save additional data or a reference to a user.
"""
form_data = models.TextField()
page = models.ForeignKey(Page, on_delete=models.CASCADE)
submit_time = models.DateTimeField(verbose_name=_('submit time'), auto_now_add=True)
def get_data(self):
"""
Returns dict with form data.
You can override this method to add additional data.
"""
form_data = json.loads(self.form_data)
form_data.update({
'submit_time': self.submit_time,
})
return form_data
def __str__(self):
return self.form_data
class Meta:
abstract = True
verbose_name = _('form submission')
class FormSubmission(AbstractFormSubmission):
"""Data for a Form submission."""
class AbstractFormField(Orderable):
"""
Database Fields required for building a Django Form field.
"""
label = models.CharField(
verbose_name=_('label'),
max_length=255,
help_text=_('The label of the form field')
)
field_type = models.CharField(verbose_name=_('field type'), max_length=16, choices=FORM_FIELD_CHOICES)
required = models.BooleanField(verbose_name=_('required'), default=True)
choices = models.TextField(
verbose_name=_('choices'),
blank=True,
help_text=_('Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.')
)
default_value = models.CharField(
verbose_name=_('default value'),
max_length=255,
blank=True,
help_text=_('Default value. Comma separated values supported for checkboxes.')
)
help_text = models.CharField(verbose_name=_('help text'), max_length=255, blank=True)
@property
def clean_name(self):
# unidecode will return an ascii string while slugify wants a
# unicode string on the other hand, slugify returns a safe-string
# which will be converted to a normal str
return str(slugify(text_type(unidecode(self.label))))
panels = [
FieldPanel('label'),
FieldPanel('help_text'),
FieldPanel('required'),
FieldPanel('field_type', classname="formbuilder-type"),
FieldPanel('choices', classname="formbuilder-choices"),
FieldPanel('default_value', classname="formbuilder-default"),
]
class Meta:
abstract = True
ordering = ['sort_order']
_FORM_CONTENT_TYPES = None
def get_form_types():
global _FORM_CONTENT_TYPES
if _FORM_CONTENT_TYPES is None:
form_models = [
model for model in get_page_models()
if issubclass(model, AbstractForm)
]
_FORM_CONTENT_TYPES = list(
ContentType.objects.get_for_models(*form_models).values()
)
return _FORM_CONTENT_TYPES
def get_forms_for_user(user):
"""
Return a queryset of form pages that this user is allowed to access the submissions for
"""
editable_forms = UserPagePermissionsProxy(user).editable_pages()
editable_forms = editable_forms.filter(content_type__in=get_form_types())
# Apply hooks
for fn in hooks.get_hooks('filter_form_submissions_for_user'):
editable_forms = fn(user, editable_forms)
return editable_forms
class AbstractForm(Page):
"""
A Form Page. Pages implementing a form should inherit from it
"""
form_builder = FormBuilder
base_form_class = WagtailAdminFormPageForm
def __init__(self, *args, **kwargs):
super(AbstractForm, self).__init__(*args, **kwargs)
if not hasattr(self, 'landing_page_template'):
name, ext = os.path.splitext(self.template)
self.landing_page_template = name + '_landing' + ext
class Meta:
abstract = True
def get_form_fields(self):
"""
Form page expects `form_fields` to be declared.
If you want to change backwards relation name,
you need to override this method.
"""
return self.form_fields.all()
def get_data_fields(self):
"""
Returns a list of tuples with (field_name, field_label).
"""
data_fields = [
('submit_time', _('Submission date')),
]
data_fields += [
(field.clean_name, field.label)
for field in self.get_form_fields()
]
return data_fields
def get_form_class(self):
fb = self.form_builder(self.get_form_fields())
return fb.get_form_class()
def get_form_parameters(self):
return {}
def get_form(self, *args, **kwargs):
form_class = self.get_form_class()
form_params = self.get_form_parameters()
form_params.update(kwargs)
return form_class(*args, **form_params)
def get_landing_page_template(self, request, *args, **kwargs):
return self.landing_page_template
def get_submission_class(self):
"""
Returns submission class.
You can override this method to provide custom submission class.
Your class must be inherited from AbstractFormSubmission.
"""
return FormSubmission
def process_form_submission(self, form):
"""
Accepts form instance with submitted data, user and page.
Creates submission instance.
You can override this method if you want to have custom creation logic.
For example, if you want to save reference to a user.
"""
self.get_submission_class().objects.create(
form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
page=self,
)
def serve(self, request, *args, **kwargs):
if request.method == 'POST':
form = self.get_form(request.POST, page=self, user=request.user)
if form.is_valid():
self.process_form_submission(form)
# render the landing_page
# TODO: It is much better to redirect to it
return render(
request,
self.get_landing_page_template(request),
self.get_context(request)
)
else:
form = self.get_form(page=self, user=request.user)
context = self.get_context(request)
context['form'] = form
return render(
request,
self.get_template(request),
context
)
preview_modes = [
('form', 'Form'),
('landing', 'Landing page'),
]
def serve_preview(self, request, mode):
if mode == 'landing':
return render(
request,
self.get_landing_page_template(request),
self.get_context(request)
)
else:
return super(AbstractForm, self).serve_preview(request, mode)
class AbstractEmailForm(AbstractForm):
"""
A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it
"""
to_address = models.CharField(
verbose_name=_('to address'), max_length=255, blank=True,
help_text=_("Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.")
)
from_address = models.CharField(verbose_name=_('from address'), max_length=255, blank=True)
subject = models.CharField(verbose_name=_('subject'), max_length=255, blank=True)
def process_form_submission(self, form):
submission = super(AbstractEmailForm, self).process_form_submission(form)
if self.to_address:
self.send_mail(form)
return submission
def send_mail(self, form):
addresses = [x.strip() for x in self.to_address.split(',')]
content = []
for field in form:
value = field.value()
if isinstance(value, list):
value = ', '.join(value)
content.append('{}: {}'.format(field.label, value))
content = '\n'.join(content)
send_mail(self.subject, content, addresses, self.from_address,)
class Meta:
abstract = True
|
|
#! /usr/bin/python
'''
Cisco Spark Bot for Room Finder Application
This Bot will use a provided bot Cisco Spark Account (identified by the Developer Token)
to interact with the Roomfinder application. Users can
check current available rooms, book them and found them on a map.
There are several pieces of information needed to run this application. It is
suggested to set them as OS Environment Variables. Here is an example on how to
set them:
# Address and key for app server
export roomfinder_data_server=http://ip_or_name:5001
# Details on the Cisco Spark Account to Use
export [email protected]
Find Cisco Spark developer token on http://developer.ciscospark.com
export spark_token=...
# Address and key for the Spark Bot itself
export roomfinder_spark_bot_url=http://public_ip_or_name:5000
'''
__author__ = '[email protected]'
from flask import Flask, request, Response
import os, requests, json, re, urllib, socket
import ntpath
import datetime
from requests_toolbelt.multipart.encoder import MultipartEncoder
import pika
import uuid
import urllib2
import lnetatmo
import time
import unicodedata
import feedparser
admin_list=["[email protected]","[email protected]","[email protected]"]
log_dir="/log/"
app = Flask(__name__)
spark_host = "https://api.ciscospark.com/"
spark_headers = {}
spark_headers["Content-type"] = "application/json; charset=utf-8"
app_headers = {}
app_headers["Content-type"] = "application/json"
google_headers = {}
google_headers["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30"
def return_utf(s):
if isinstance(s, unicode):
return s.encode('utf-8')
if isinstance(s, int):
return str(s).encode('utf-8')
if isinstance(s, float):
return str(s).encode('utf-8')
if isinstance(s, complex):
return str(s).encode('utf-8')
if isinstance(s, str):
return s
def netatmoOutdoor(sonde):
authorization = lnetatmo.ClientAuth()
devList = lnetatmo.WeatherStationData(authorization)
msg= (sonde +" current temperature : %s C" % ( devList.lastData()[sonde]['Temperature']))
return msg
def netatmoIndoor(sonde):
authorization = lnetatmo.ClientAuth()
devList = lnetatmo.WeatherStationData(authorization)
msg= (sonde + " current temperature : %s C" % ( devList.lastData()[sonde]['Temperature']))
return msg
def stats(user,roomid):
logfile = open(log_dir+"ILM-RoomFinder-Bot.log", 'r+')
line = logfile.readlines()
#sys.stderr.write('line='+str(line)+'\n')
j = 1
logfile.seek(0)
for i in line:
if i != '' and i!= "\r\n" and i!= "\n" and i.split()[0].lower().startswith(user) :
j = int(i.split()[1])+1
else :
logfile.write(i)
logfile.write(user +" "+ str(j) + " " + roomid + "\r\n")
logfile.truncate()
logfile.close()
return False
#REMOVE USER FROM THE ADVERTISING
def optout(user):
f = open(log_dir+"ILM-RoomFinder-Bot.log","r+")
d = f.readlines()
f.seek(0)
for i in d:
if (i.startswith(user) == 0) :
f.write(i)
f.truncate()
f.close()
return False
def log(user, request, response):
f = open(log_dir+user +'.log', 'a+')
f.write("\r\n" + datetime.datetime.now().replace(microsecond=0).isoformat() + " - " + str(request) + " - " + str(response) + "\r\n")
f.close()
return True
def readstats():
logs=""
nbUsers = 0
nbMsg = 0
logfile = open(log_dir+'ILM-RoomFinder-Bot.log', 'r+')
for line in logfile:
if line != '' and line!= "\r\n" and line!= "\n" :
nbMsg = nbMsg + int(line.split()[1])
nbUsers = nbUsers + 1
logfile.close()
logs = "* nb Users : " + str(nbUsers) + "\r\n" + "* nb Requests : " + str(nbMsg)
return logs
def advertise(msg,message_type="text"):
logfile = open(log_dir+'ILM-RoomFinder-Bot.log', 'r+')
for line in logfile:
if line != '' and line!= "\r\n" and line!= "\n" :
roomid = line.split()[2]
send_message_to_room(roomid, msg, message_type)
logfile.close()
return True
@app.route("/demoroom/members", methods=["POST", "GET"])
def process_demoroom_members():
# Verify that the request is propery authorized
#authz = valid_request_check(request)
#if not authz[0]:
# return authz[1]
status = 200
if request.method == "POST":
data = request.form
try:
sys.stderr.write("Adding %s to demo room.\n" % (data["email"]))
reply=send_welcome_message(data["email"])
status = 201
resp = Response(reply, content_type='application/json', status=status)
except KeyError:
error = {"Error":"API Expects dictionary object with single element and key of 'email'"}
status = 400
resp = Response(json.dumps(error), content_type='application/json', status=status)
# demo_room_members = get_membership()
# resp = Response(
# json.dumps(demo_room_members, sort_keys=True, indent = 4, separators = (',', ': ')),
# content_type='application/json',
# status=status)
else:
resp = Response("OK", status=status)
return resp
@app.route('/', methods=["POST"])
# Bot functions to process the incoming messages posted by Cisco Spark
def process_webhook():
text=""
post_data = request.get_json(force=True)
sys.stderr.write("post_data="+str(post_data)+"\n")
message_type="text"
message_id = post_data["data"]["id"]
message = get_message(message_id)
sys.stderr.write("message: "+str(message)+"\n")
reply=None
removed = False
# First make sure not processing a message from the bot
if post_data['data']["personEmail"] == bot_email:
return ""
# if "markdown" in message:
# sys.stderr.write("markdown: "+str(message["markdown"].encode('utf-8'))+"\n")
# if "html" in message:
# sys.stderr.write("html: "+str(message["html"].encode('utf-8'))+"\n")
if "errors" in message or "text" not in message:
message["text"]=""
text=""
else:
text=message["text"].replace('@Roomfinder','').lstrip().rstrip().lower().encode("utf-8")
text=message["text"].replace('Roomfinder','').lstrip().rstrip().lower().encode("utf-8")
sys.stderr.write("text: "+str(message["text"].encode('utf-8'))+"\n")
if "event" in message and message["event"] == 'deleted' :
sys.stderr.write('Message deleted\n')
return ""
# If someone is mentioned, do not answer
if 'mentionedPeople' in message:
sys.stderr.write("mentionned: "+str(message["mentionedPeople"])+"\n")
sys.stderr.write("Bot id : "+str(bot_id)+"\n")
if bot_id not in message["mentionedPeople"]:
sys.stderr.write("Not the bot mentionned, do not answer !\n")
return ""
else:
sys.stderr.write("Bot mentionned, removing the bot name !\n")
text=text.replace(bot_name,"").lstrip()
if not (post_data['data']['personEmail'] in admin_list
or post_data['data']['personEmail'].endswith('@cisco.com')
or post_data['data']['personEmail'].endswith('@ciscofrance.com') ) :
reply="** This bot is reserved for Cisco Employees **"
sys.stderr.write("reply: "+str(reply)+"\n")
return send_message_to_room(post_data["data"]["roomId"], reply,message_type)
sys.stderr.write("Incoming Room Message\tmessage: "+text+"\t")
# Check if message contains word "dispo" and if so send results
if text.startswith("dispo") or text.startswith("available"):
buildings = re.findall(r' [a-zA-Z][a-zA-Z0-9\-]+', text)
sys.stderr.write('Building founds: '+str(len(buildings))+"\n")
for b in buildings:
sys.stderr.write(' - '+str(b)+"\n")
if len(buildings) == 0 :
buildings=[" ILM-"]
building=buildings[0][1:]
try:
u = dispo_server + "/dispo?key="+str(building)
page = requests.get(u, headers = app_headers)
tally = page.json()
sys.stderr.write("Tally: "+str(tally)+"\n")
#tally = sorted(tally.items(), key = lambda (k,v): v, reverse=True)
results=(i[1] for i in tally[1] if i[0]=="Free")
start = " in building "+str(building)+" "+tally[0][2]
end = tally[0][3]
number = re.findall(r' [0-9]+', text)
print "number: "+str(number)
toto=list(results)
sys.stderr.write("result: "+str(toto)+"\n")
# Test if there is a criteria on the number of seats
if number:
if len(number) == 1:
inf = int(number[0])
filtered_results=[result for result in toto if int(result.split('(')[1].split(')')[0])>=inf]
sys.stderr.write("filtered_results: "+str(filtered_results)+"\n")
# reply = ", with more than "+str(inf)+" seats, "+start+" "+end
reply = ", with more than "+str(inf)+" seats, int the next 2 hours"
else:
inf = int(number[0])
sup = int(number[1])
filtered_results=[result for result in toto if inf <= int(result.split('(')[1].split(')')[0]) <= sup]
sys.stderr.write("filtered_results: "+str(filtered_results)+"\n")
# reply = ", with more than "+str(inf)+" and less than "+str(sup)+" seats, "+start+" "+end
reply = ", with more than "+str(inf)+" and less than "+str(sup)+" seats in the next 2 hours"
else:
# reply = " "+start+" "+end
reply = " in the next 2 hours"
filtered_results=toto
titi=list(filtered_results)
# Test if filtered result list is empty or not
if titi:
reply = "The current available rooms"+reply+" are:\n"
for result in titi:
reply += "* %s\n" % (result)
#sys.stderr.write("Salle: "+result+"\n")
reply += "\nYou can book one of the rooms with the keyword : **book ROOM-NAME [option: 30m or 1h]**"
else:
reply = "Sorry, there are currently no available rooms"+reply+"\n"
except Exception as e:
reply="Dispo server is not available !"
# Check if message contains word "options" and if so send options
elif text in ["options","help","aide","?","/help","hello","hi"] :
reply = "Here are the keywords you can use: \n"
reply += "* **dispo** or **available** keyword will display the available rooms for the next 2 hours timeslot. For other buildings than ILM, you will have to add the prefix of your building, like **available SJC14-**\n"
reply += "* **reserve** or **book** keyword will try to book, by default for the next 2 hours, the room mentionned after the keyword **book** or **reserve**. You can specify the duration of the meeting with the option 30m or 1h.\n"
reply += "* **plan** or **map** keyword will display the map of the floor in **ILM building** mentionned after the keyword **plan** or **map**.\n"
reply += "* **cherche** or **find** keyword will help you to find the floor of a room mentionned by its short name after the keyword.\n"
reply += "* **batiment** or **building** keyword will help you to find a building id based on name of the building/town/country mentionned after the keyword, like **building Toronto** or **batiment ILM**.\n"
reply += "* **in** or **inside** keyword will display a picture inside the room mentionned after the keyword in **ILM building**.\n"
reply += "* **dir** keyword will display the directory entry for the CCO id mentionned after the keyword **dir**.\n"
reply += "* [disabled] **guest** keyword will create a guest wifi account for an attendee. You should specify after the keyword **guest** the attendee first name, last name and email, like **guest** john doe [email protected].\n"
reply += "* **parking** keyword will display the available spots inside Cisco **ILM parking**.\n"
reply += "* **add** keyword followed by an email will create a room between the bot and this email.\n"
reply += "* [new] **optout** or **bye** keyword will remove you from the list of users. You will no longer receive ads until you send me a new request.\n"
reply += "* **help** or **aide** will display a helping message to the Spark room.\n"
reply += "\nAll the the bot request are documented in [EN](https://cisco.jiveon.com/docs/DOC-1766766) and [FR](https://cisco.jiveon.com/docs/DOC-1765746). \r\n"
reply += "\nDo not hesitate to help us improve RoomFinder by joining the [RoomFinder Support Space](http://incis.co/VNDI)\n"
if post_data['data']['personEmail'] in admin_list :
reply += "* **/stats/** keyword will display the statistics of Roomfinder Cisco Spark Bot.\n"
reply += "* **/advertise/** keyword, followed by a message, will display this message for all users of Roomfinder Cisco Spark Bot.\n"
message_type="text"
# Check if message contains phrase "add email" and if so add user to room
elif text.startswith("add "):
# Get the email that comes
emails = re.findall(r' [\w\.-]+@[\w\.-]+', text)
pprint(emails)
reply = "Adding users to demo room.\n"
for email in emails:
send_welcome_message(email)
reply += " - %s \n" % (email)
elif text.startswith("dir"):
if text.rstrip() == "dir" :
reply = "Usage of dir command is: \n"
reply += "\t\tdir cco_id \n"
reply += "\tor \n"
reply += "\t\tdir firstname lastname \n"
else:
# Find the cco id
cco=text.replace('dir ','')
reply = find_dir(cco)
print "find_dir: "+str(reply)
if type(reply) != str and type(reply) != unicode:
message_type="localfine"
elif text.startswith("guest"):
if post_data['data']['personEmail'] in admin_list :
if text not in ["guest"]:
# Find the
args=text.split()
if len(args) == 4:
reply = guest(args[1],args[2],args[3])
sys.stderr.write( "guest: "+str(reply)+"\n" )
else:
reply = "Usage of guest command is:\n"
reply += "\tguest firstName lastName email\n"
else:
reply = "Usage of guest command is:\n"
reply += "\tguest firstName lastName email\n"
else:
reply = "## We have been asked by Infosec to shutdown the Guest feature. We are working with them to find a way to restore this succesfull service. ##"
elif text.startswith("find ") or text.startswith("cherche "):
# Find the room
room=text.replace('find ','')
room=room.replace('cherche ','')
reply = where_room(room.upper())
print "where_room: "+str(reply)
if not reply.startswith("Sorry"):
rooms=reply.split(';')
if len(rooms)==1:
r=rooms[0].split('-')
if (len(r)>=2):
floor=r[0]+'-'+r[1]
floor_map_raw=display_map(floor)
floor_map=json.loads(floor_map_raw)
floor_map["text"]="Here is the full name of the room, and the map of the floor: \n * "+rooms[0]
# stats(post_data['data']['personEmail'],post_data['data']['roomId'])
# log(post_data['data']['personEmail']+" - " +post_data['data']['roomId'],str(text),reply)
# send_message_to_room(post_data["data"]["roomId"], reply,message_type)
# #message_type="pdf"
reply=json.dumps(floor_map)
message_type="pdf"
else:
reply="Do you mean:\n"
for r in rooms:
reply+="* "+r+"\n"
message_type="text"
else:
message_type="text"
elif text.startswith("image "):
# Find the cco id
keyword_list = re.findall(r'[\w-]+', text)
print "keyword_list= "+str(keyword_list)
keyword_list.reverse()
keyword=keyword_list.pop()
while keyword.find("image") > -1:
keyword=keyword_list.pop()
reply = find_image(keyword)
print "find_image: "+reply
if reply.startswith('http'):
message_type="image"
elif text.startswith("plan") or text.startswith("map"):
# Find the floor
if text in ["plan","map"]:
reply = "Usage of map/plan command is:\n"
reply += "\tmap/plan command followed by floor name like:\n"
reply += "\t\tmap SJC05-3\n"
reply += "\t\t\tor\n"
reply += "\t\tplan ILM-7\n"
else:
floor=text.replace('map ','')
floor=floor.replace('plan ','')
pattern = re.compile("^([0-7]+)$")
m = pattern.match(floor)
if m:
# Map and number => ILM
floor='ILM-'+m.group()
sys.stderr.write("display_map: "+floor+"\n")
reply = display_map(floor.upper())
message_type="pdf"
else:
pattern2 = re.compile("^([a-z0-9 ]+\-[0-9]+)$")
m2 = pattern2.match(floor)
if m2:
floor=m2.group()
sys.stderr.write("display_map: "+floor+"\n")
reply = display_map(floor.upper())
if reply != "Connection error to map server":
message_type="pdf"
else:
t=floor.split("-")
if len(t) == 3 :
floor=t[0]+"-"+t[1]
sys.stderr.write("display_map: "+floor+"\n")
reply = display_map(floor.upper())
if reply != "Connection error to map server":
message_type="pdf"
else:
reply = "No floor is corresponding. Try **map/plan floor_name** or **map/plan floor_name** \n<br>\n <blockquote> with floor_name like ILM-3 or SJC13-3 </blockquote>"
elif text.lower().startswith("building") or text.lower().startswith("batiment"):
# Find the floor
if text.lower() in ["building","batiment"]:
reply = "Usage of building/batiment command is:\n"
reply += "\tbuilding/batiment command followed by building/town/country name like:\n"
reply += "\t\tbuiding Toronto\n"
reply += "\t\t\tor\n"
reply += "\t\tbatiment ILM\n"
else:
building=text.lower().replace('building ','')
building=building.lower().replace('batiment ','')
reply = display_building(building.upper())
elif text.startswith("book") or text.startswith("reserve"):
if text in ["book","reserve"]:
reply = "Usage of book/reserve command is:\n"
reply += "\tbook/reserve command followed by room name like:\n"
reply += "\t\t reserve ILM-7-HUGO\n"
reply += "\t\t\tor\n"
reply += "\t\t book SJC13-3-SMILE\n"
else:
# Find the room name
end = len(text)
if text.startswith("book "):
start = len('book ')
elif text.startswith("reserve "):
start = len('reserve ')
else:
sys.stderr.write("I don't know how you arrive here ! This is a bug !\n")
room_name=text[start:end]
sys.stderr.write("room_name= "+str(room_name)+"\n")
reply = book_room(room_name.upper(),post_data['data']["personEmail"],getDisplayName(post_data['data']["personId"]))
sys.stderr.write("book_room: "+reply+"\n")
elif text.startswith('in') or text.startswith('inside') or text.startswith('interieur'):
if text in ["in","inside","interieur"]:
reply = "Usage of in/inside/interieur command is:\n"
reply += "\t in/inside/interieur command followed by room name like:\n"
reply += "\t\t in ILM-7-HUGO\n"
reply += "\t\t\tor\n"
reply += "\t\t inside SJC13-3-SMILE\n"
else:
inside = text.split()[1].upper()
if inside.startswith('ILM') :
reply=display_inside(inside)
message_type="image"
else :
reply = "No Inside View. This feature is available only for ILM building."
elif text in ["parking"] :
try:
page = requests.get("http://173.38.154.145/parking/getcounter.py", timeout=0.5)
result = page.json()
reply = "Free cars parking: "+str(result["car"]["count"])+" over "+str(result["car"]["total"])+"<br>"
reply += "Free motorbikes parking: "+str(result["motorbike"]["count"])+" over "+str(result["motorbike"]["total"])+"<br>"
reply += "Free bikecycles parking: "+str(result["bicycle"]["count"])+" over "+str(result["bicycle"]["total"])
except requests.exceptions.RequestException as e: # This is the correct syntax
sys.stderr.write("Timeout or HTTP error code on parking API")
reply = "Sorry parking information is not available !"
except socket.timeout as e:
sys.stderr.write("Timeout or HTTP error code on parking API")
reply = "Sorry parking information is not available !"
elif text.startswith('temp '):
sonde = text.split()[1].upper()
if (sonde == "ILM-1-GAUGUIN") :
reply = netatmoOutdoor(sonde)
else :
reply = "No Temperature sensors available in this room"
elif text == "/stats/":
if post_data['data']['personEmail'] in admin_list :
reply=readstats()
else:
reply = "##You have no admin rights to view stats##"
elif text == "optout" or text.startswith('bye') or text.startswith('quit'):
reply = "##Bye bye " + post_data['data']['personEmail'] + ", I am removing you from the list of users. ##"
optout(post_data['data']['personEmail'])
removed = True
elif text.startswith("/advertise/"):
if post_data['data']['personEmail'] in admin_list :
if "html" in message:
advertise(message["html"].replace("/advertise/","").lstrip().strip(),"html")
else:
advertise(message["text"].replace("/advertise/","").lstrip().strip())
reply=""
else :
reply = "##You have no admin rights to advertise##"
# If nothing matches, send instructions
else:
# reply=natural_langage_bot(text)
# if reply == "":
# return reply
if text=="":
reply="There seem to be an error with Cisco Spark ! Sorry about that, try again later !"
else:
reply="Command not found ! Type help to have the list of existing commands !"
sys.stderr.write("reply: "+"{0:.3000}".format(reply)+"\n")
if reply != "":
if not removed :
stats(post_data['data']['personEmail'],post_data['data']['roomId'])
log(post_data['data']['personEmail']+" - " +post_data['data']['roomId'],str(text),reply)
send_message_to_room(post_data["data"]["roomId"], reply,message_type)
log_message_to_room(log_room_id, post_data['data']['personEmail'], str(text.encode('utf-8')), reply,message_type)
return ""
def getDisplayName(id):
spark_u = spark_host + "v1/people/"+id
page = requests.get(spark_u, headers = spark_headers)
displayName = page.json()["displayName"]
return displayName
def on_response(ch, method, props, body):
global corr_id
global response
if corr_id == props.correlation_id:
response = body
def send_message_to_queue(message):
global corr_id
global response
global connection
global channel
global callback_queue
response=None
connection = pika.BlockingConnection(pika.ConnectionParameters(host="37.187.22.103",port=2765,heartbeat_interval=30))
channel = connection.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
channel.basic_consume(on_response, no_ack=True,
queue=callback_queue)
corr_id=str(uuid.uuid4())
response = None
corr_id = str(uuid.uuid4())
channel.basic_publish( exchange='',
routing_key="rpc_queue",
properties=pika.BasicProperties(
reply_to = callback_queue,
correlation_id = corr_id),
body=message)
print(" [x] Sent data to RabbitMQ")
while response is None:
connection.process_data_events()
print(" [x] Get response from RabbitMQ")
return str(response)
def book_room(room_name,user_email,user_name):
sys.stderr.write("Beginning process to book a room and especially this room: "+room_name+"\n")
duration=2
if room_name.endswith(' 1H') or room_name.endswith(' 1 H') or room_name.endswith(' 1 HOUR') or room_name.endswith(' 1HOUR') or room_name.endswith(' 1 HOURS') or room_name.endswith(' 1HOURS') :
duration=1
room_name=room_name.replace(' 1HOURS','').replace(' 1 HOURS','').replace(' 1 HOUR','').replace(' 1HOUR','').replace(' 1 H','').replace(' 1H','')
elif room_name.endswith(' 30M') or room_name.endswith(' 30 M') or room_name.endswith(' 30MIN') or room_name.endswith(' 30 MIN') or room_name.endswith(' 30MINS') or room_name.endswith(' 30 MINS') or room_name.endswith(' 30MINUTES') or room_name.endswith(' 30 MINUTES') or room_name.endswith(' 30MINUTE') or room_name.endswith(' 30 MINUTE') :
duration=0.5
room_name=room_name.replace(' 30MINS','').replace(' 30 MINS','').replace(' 30MINUTES','').replace(' 30 MINUTES','').replace(' 30MINUTE','').replace(' 30 MINUTE','')
room_name=room_name.replace(' 30MIN','').replace(' 30 MIN','').replace(' 30M','').replace(' 30 M','')
elif room_name.endswith(' 2H') or room_name.endswith(' 2 H') or room_name.endswith(' 2HOUR') or room_name.endswith(' 2 HOUR') or room_name.endswith(' 2HOURS') or room_name.endswith(' 2 HOURS') :
duration=2
room_name=room_name.replace(' 2HOURS','').replace(' 2 HOURS','').replace(' 2HOUR','').replace(' 2 HOUR','').replace(' 2H','').replace(' 2 H','')
sys.stderr.write("After removing duration, room:_name is "+room_name+"\n")
now = datetime.datetime.now().replace(microsecond=0)
starttime = (now - datetime.timedelta(minutes=5)).isoformat()
endtime = (now - datetime.timedelta(minutes=5) + datetime.timedelta(hours=duration)).isoformat()
data = {
"cmd": "book",
"data": {"starttime": starttime, "endtime": endtime, "user_name": user_name, "user_email": user_email, "room_name": room_name}
}
message = json.dumps(data)
return send_message_to_queue(message)
def find_dir(cco):
sys.stderr.write("Beginning process to find someone in the directory and especially this person: "+cco+"\n")
data = {
"cmd": "dir",
"data": {"cco": cco}
}
message = json.dumps(data)
reply=send_message_to_queue(message)
if reply.find(";") == -1 :
return reply
else:
tab = reply.split(';')
return tab[0],tab[1],tab[2],tab[3],tab[4],tab[5]
def guest(firstName, lastName, email):
sys.stderr.write("Beginning process to request a guest account for "+firstName+" "+lastName+" <"+email+">\n")
data = {
"cmd": "guest",
"data": {
"firstName" : firstName,
"lastName" : lastName,
"email" : email
}
}
message = json.dumps(data)
reply=send_message_to_queue(message)
return reply
def where_room(room):
sys.stderr.write("Beginning process to find this room: "+room+"\n")
data = {
"cmd": "where",
"data": {"room": room}
}
message = json.dumps(data)
reply=send_message_to_queue(message)
return reply
def display_map(floor):
sys.stderr.write("Display map of floor: "+floor+"\n")
data = {
"cmd": "map",
"data": {"floor": floor}
}
message = json.dumps(data)
reply=send_message_to_queue(message)
return reply
def display_building(building):
sys.stderr.write("Display building for: "+building+"\n")
data = {
"cmd": "building",
"data": {"building": building}
}
message = json.dumps(data)
reply=send_message_to_queue(message)
return reply
def display_inside(room):
sys.stderr.write("Display inside of room: "+room+"\n")
t=re.search(r'ILM-[1-7]',room)
if t is not None:
return "http://www.guismo.fr.eu.org/in/"+room+".jpg"
else:
return "Room "+ room + " not known"
def find_image(keyword):
u = "http://api.flickr.com/services/feeds/photos_public.gne?tags="+keyword+"&lang=en-us&format=json"
page = requests.get(u)
test=page.text.encode('utf-8').replace('jsonFlickrFeed(','').replace(')','').replace('\\\'','\\\\\'')
j=json.loads(test)
if len(j["items"]) > 0 :
i= ord(os.urandom(1))%len(j["items"])
link=j["items"][i]["media"]["m"]
return link
else:
return "Sorry no image found !"
# Spark Utility Functions
#### Message Utilities
def send_welcome_message(email):
spark_u = spark_host + "v1/messages"
message_body = {
"toPersonEmail" : email,
"markdown" : "Welcome in a chat room with **RoomFinder**, the 1:1 Bot to help you interact with Cisco Buildings\nType **help** to list the existing commands.\n"
}
page = requests.post(spark_u, headers = spark_headers, json=message_body)
message = page.json()
return message
def post_localfile(roomId, encoded_photo, text='', html='', markdown='', toPersonId='', toPersonEmail=''):
filename='/app/output.jpg'
with open(filename, 'wb') as handle:
handle.write(encoded_photo.decode('base64'))
openfile = open(filename, 'rb')
filename = ntpath.basename(filename)
payload = {'roomId': roomId, 'files': (filename, openfile, 'image/jpg')}
#payload = {'roomId': roomId}
if text:
payload['text'] = text
if html:
payload['html'] = html
if markdown:
payload['markdown'] = markdown
if toPersonId:
payload['toPersonId'] = toPersonId
if toPersonEmail:
payload['toPersonEmail'] = toPersonEmail
m = MultipartEncoder(fields=payload)
headers = {'Authorization': "Bearer " + spark_token, 'Content-Type': m.content_type}
page = requests.request("POST",url=spark_host + "v1/messages", data=m, headers = headers )
sys.stderr.write( "page: "+str(page)+"\n" )
message=page.json()
file_dict = json.loads(page.text)
file_dict['statuscode'] = str(page.status_code)
sys.stderr.write( "statuscode: "+str(file_dict['statuscode'])+"\n" )
sys.stderr.write( "file_dict: "+str(file_dict)+"\n" )
handle.close()
openfile.close()
return message
def post_pdffile(roomId, encoded_file, text='', html='', markdown='', toPersonId='', toPersonEmail=''):
filename='/app/output.pdf'
with open(filename, 'wb') as handle:
handle.write(encoded_file.decode('base64'))
openfile = open(filename, 'rb')
filename = ntpath.basename(filename)
payload = {'roomId': roomId, 'files': (filename, openfile, 'application/pdf')}
#payload = {'roomId': roomId}
if text:
payload['text'] = text
if html:
payload['html'] = html
if markdown:
payload['markdown'] = markdown
if toPersonId:
payload['toPersonId'] = toPersonId
if toPersonEmail:
payload['toPersonEmail'] = toPersonEmail
m = MultipartEncoder(fields=payload)
headers = {'Authorization': "Bearer " + spark_token, 'Content-Type': m.content_type}
page = requests.request("POST",url=spark_host + "v1/messages", data=m, headers = headers )
sys.stderr.write( "page: "+str(page)+"\n" )
message=page.json()
file_dict = json.loads(page.text)
file_dict['statuscode'] = str(page.status_code)
sys.stderr.write( "statuscode: "+str(file_dict['statuscode'])+"\n" )
sys.stderr.write( "file_dict: "+str(file_dict)+"\n" )
handle.close()
openfile.close()
return message
def log_message_to_room(room_id, author, message, message_reply,message_type="text"):
spark_u = spark_host + "v1/messages"
if message_type == "text":
message_body = {
"roomId" : room_id,
"markdown" : "Author: "+author+" <br> Request: "+message+" <br> Reply: "+message_reply.decode('utf-8')
}
elif message_type == "image":
message_body = {
"roomId" : room_id,
"text" : "Author: "+author+" <br> Request: "+message+" <br> Reply: ",
"files" : [message_reply]
}
elif message_type == "html":
message_body = {
"roomId" : room_id,
"html" : "Author: "+author+" \n Request: "+message+" \n Reply: "+message_reply
}
elif message_type == "pdf":
s = json.loads(message_reply)
if "text" in s and "pdf" in s:
return post_pdffile(room_id,s["pdf"],markdown="Author: "+author+" <br /> Request: "+message+" <br /> Reply: "+s["text"])
else:
message_body = {
"roomId" : room_id,
"html" : "Author: "+author+" \n Request: "+message+" \n Reply: Unreadadble reply !\n"
}
else:
try:
sys.stderr.write("message_reply: "+str(message_reply)+"\n")
name=message_reply[0]
sys.stderr.write("After get name from message_reply\n")
title=message_reply[1]
manager=message_reply[2]
phone=message_reply[3]
photo=message_reply[4]
dir_url=message_reply[5]
author
tmp=""
tmp+="Author: "+return_utf(author)+" <br>\n Request: "+return_utf(message)+" <br>\n Reply: <br>\n "
sys.stderr.write("Before name\n")
sys.stderr.write("Type: "+str(type(name))+"\n")
if name!= "":
sys.stderr.write("After test on name\n")
tmp+="<b>Name</b>: "+return_utf(name)+'\n'
sys.stderr.write("After name\n")
if title != "":
tmp+='<b>Title</b>: '+return_utf(title)+'\n'
if manager != "":
tmp+='<b>Manager</b>: '+return_utf(manager)+'\n'
if phone != "":
tmp+=return_utf(phone)
if dir_url != "":
tmp+=return_utf(dir_url)
#sys.stderr.write("tmp: "+str(tmp.encode('utf-8'))+"\n")
sys.stderr.write("Before post_localfile\n")
except Exception as ex:
sys.stderr.write("Exception: "+str(ex))
return post_localfile(room_id,photo,html=tmp)
sys.stderr.write( "message_body: "+str(message_body)+"\n" )
page = requests.post(spark_u, headers = spark_headers, json=message_body)
message = page.json()
#return message
return ""
def send_message_to_room(room_id, message,message_type="text"):
spark_u = spark_host + "v1/messages"
if message_type == "text":
message_body = {
"roomId" : room_id,
"markdown" : message
}
elif message_type == "image":
message_body = {
"roomId" : room_id,
"text" : "",
"files" : [message]
}
elif message_type == "html":
sys.stderr.write("Post HTML message\n")
message_body = {
"roomId" : room_id,
"html" : message
}
elif message_type == "pdf":
s = json.loads(message)
if "text" in s and "pdf" in s:
return post_pdffile(room_id,s["pdf"],markdown=s["text"])
else:
message_body = {
"roomId" : room_id,
"html" : "Unreadadble reply !\n"
}
else:
name=message[0]
title=message[1]
manager=message[2]
phone=message[3]
photo=message[4]
dir_url=message[5]
tmp=""
if name!= "":
tmp+="<b>Name</b>: "+str(name)+'\n'
if title != "":
tmp+='<b>Title</b>: '+str(title)+'\n'
if manager != "":
tmp+='<b>Manager</b>: '+str(manager)+'\n'
if phone != "":
tmp+=str(phone)
if dir_url != "":
tmp+=dir_url
return post_localfile(room_id,photo,html=tmp)
sys.stderr.write( "message_body: "+str(message_body)+"\n" )
page = requests.post(spark_u, headers = spark_headers, json=message_body)
message = page.json()
#return message
return ""
def get_message(message_id):
spark_u = spark_host + "v1/messages/" + message_id
page = requests.get(spark_u, headers = spark_headers)
message = page.json()
return message
#### Webhook Utilities
def current_webhooks():
spark_u = spark_host + "v1/webhooks"
page = requests.get(spark_u, headers = spark_headers)
webhooks = page.json()
return webhooks["items"]
def create_webhook(target, webhook_name = "New Webhook"):
spark_u = spark_host + "v1/webhooks"
spark_body = {
"name" : webhook_name,
"targetUrl" : target,
"resource" : "messages",
"event" : "created"
}
page = requests.post(spark_u, headers = spark_headers, json=spark_body)
webhook = page.json()
return webhook
def update_webhook(webhook_id, target, name):
spark_u = spark_host + "v1/webhooks/" + webhook_id
spark_body = {
"name" : name,
"targetUrl" : target
}
page = requests.put(spark_u, headers = spark_headers, json=spark_body)
webhook = page.json()
return webhook
def setup_webhook(target, name):
webhooks = current_webhooks()
pprint(webhooks)
# Look for a Web Hook for the Room
webhook_id = ""
for webhook in webhooks:
print("Found Webhook")
webhook_id = webhook["id"]
break
# If Web Hook not found, create it
if webhook_id == "":
webhook = create_webhook(target, name)
webhook_id = webhook["id"]
# If found, update url
else:
webhook = update_webhook(webhook_id, target, name)
pprint(webhook)
#sys.stderr.write("New WebHook Target URL: " + webhook["targetUrl"] + "\n")
return webhook_id
#### Room Utilities
def get_membership(room_id):
spark_u = spark_host + "v1/memberships?roomId=%s" % (room_id)
page = requests.get(spark_u, headers = spark_headers)
memberships = page.json()["items"]
return memberships
def get_bot_id():
spark_u = spark_host + "v1/people/me"
page = requests.get(spark_u, headers = spark_headers)
reply = page.json()
return reply["id"]
def get_bot_name():
spark_u = spark_host + "v1/people/me"
page = requests.get(spark_u, headers = spark_headers)
reply = page.json()
return reply["displayName"]
if __name__ == '__main__':
from argparse import ArgumentParser
import os, sys
from pprint import pprint
# Setup and parse command line arguments
parser = ArgumentParser("Roomfinder Spark Interaction Bot")
parser.add_argument(
"-t", "--token", help="Spark User Bearer Token", required=False
)
parser.add_argument(
"-a", "--app", help="Address of app server", required=False
)
parser.add_argument(
"-d", "--dir", help="Address of directory server", required=False
)
parser.add_argument(
"-p", "--photo", help="Address of photo directory server", required=False
)
parser.add_argument(
"-u", "--boturl", help="Local Host Address for this Bot", required=False
)
parser.add_argument(
"-b", "--botemail", help="Email address of the Bot", required=False
)
parser.add_argument(
"-f", "--dispo", help="Address of dispo server", required=False
)
parser.add_argument(
"--demoemail", help="Email Address to Add to Demo Room", required=False
)
parser.add_argument(
"--logroomid", help="Cisco Spark Room ID to log messages", required=False
)
# parser.add_argument(
# "-s", "--secret", help="Key Expected in API Calls", required=False
# )
args = parser.parse_args()
# Set application run-time variables
bot_url = args.boturl
if (bot_url == None):
bot_url = os.getenv("roomfinder_spark_bot_url")
if (bot_url == None):
bot_url = raw_input("What is the URL for this Spark Bot? ")
# print "Bot URL: " + bot_url
sys.stderr.write("Bot URL: " + bot_url + "\n")
bot_email = args.botemail
if (bot_email == None):
bot_email = os.getenv("roomfinder_spark_bot_email")
if (bot_email == None):
bot_email = raw_input("What is the Email Address for this Bot? ")
# print "Bot Email: " + bot_email
sys.stderr.write("Bot Email: " + bot_email + "\n")
dispo_server = args.dispo
if (dispo_server == None):
dispo_server = os.getenv("roomfinder_dispo_server")
if (dispo_server == None):
get_dispo_server = raw_input("What is the dispo server address? ")
# print "Input App: " + str(get_app_server)
dispo_server = get_dispo_server
# print "App Server: " + app_server
sys.stderr.write("Dispo Server: " + str(dispo_server) + "\n")
spark_token = args.token
# print "Spark Token: " + str(spark_token)
if (spark_token == None):
spark_token = os.getenv("spark_token")
# print "Env Spark Token: " + str(spark_token)
if (spark_token == None):
get_spark_token = raw_input("What is the Cisco Spark Token? ")
# print "Input Spark Token: " + str(get_spark_token)
spark_token = get_spark_token
# print "Spark Token: " + spark_token
# sys.stderr.write("Spark Token: " + spark_token + "\n")
sys.stderr.write("Spark Token: REDACTED\n")
log_room_id = args.logroomid
# print "Log room id: " + str(log_room_id)
if (log_room_id == None):
log_room_id = os.getenv("log_room_id")
# print "Env log_room_id: " + str(log_room_id)
if (log_room_id == None):
get_log_room_id = raw_input("What is the Cisco Spark Log Room ID? ")
# print "Input log_room_id: " + str(get_log_room_id)
log_room_id = get_log_room_id
# print "log_room_id: " + log_room_id
# sys.stderr.write("log_room_id: " + log_room_id + "\n")
sys.stderr.write("log_room_id: "+str(log_room_id)+"\n")
# Set Authorization Details for external requests
spark_headers["Authorization"] = "Bearer " + spark_token
#app_headers["key"] = app_key
# Setup Web Hook to process demo room messages
webhook_id = setup_webhook(bot_url, "Roomfinder Bot Webhook")
sys.stderr.write("Roomfinder Demo Web Hook ID: " + webhook_id + "\n")
bot_id=get_bot_id()
bot_name=get_bot_name()
sys.stderr.write("Bot ID: "+bot_id+"\n")
sys.stderr.write("Bot Name: "+bot_name+"\n")
corr_id=None
response=None
connection=None
channel=None
callback_queue=None
app.run(debug=False, host='0.0.0.0', port=int("5000"), threaded=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
managed_cluster_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster. The operation returns properties
of each agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_cluster_name: The name of the managed cluster resource.
:type managed_cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
managed_cluster_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the agent pool.
Gets the details of the agent pool by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_cluster_name: The name of the managed cluster resource.
:type managed_cluster_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_02_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
managed_cluster_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
managed_cluster_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_cluster_name: The name of the managed cluster resource.
:type managed_cluster_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: Parameters supplied to the Create or Update an agent pool operation.
:type parameters: ~azure.mgmt.containerservice.v2019_02_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_02_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
managed_cluster_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
managed_cluster_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool.
Deletes the agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_cluster_name: The name of the managed cluster resource.
:type managed_cluster_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
managed_cluster_name=managed_cluster_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{managedClusterName}/agentPools/{agentPoolName}'} # type: ignore
|
|
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dmpayton
# Copyright (c) 2014 dequis
# Copyright (c) 2014 Tycho Andersen
# Copyright (c) 2015 Serge Hallyn
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from .base import Layout
# This layout implements something akin to wmii's semantics.
#
# Each group starts with one column. The first window takes up the whole screen.
# Next window splits the column in half. Windows can be moved to the column to
# the left or right. If there is no column in the direction being moved into, a
# new column is created.
#
# Each column can be either stacked (each window takes up the whole vertical real
# estate) or split (the windows are split equally vertically in the column)
# Columns can be grown horizontally (cmd_grow_left/right).
#
# My config.py as the following added:
# Key(
# [mod, "shift", "control"], "l",
# lazy.layout.grow_right()
# ),
# Key(
# [mod, "shift"], "l",
# lazy.layout.shuffle_right()
# ),
# Key(
# [mod, "shift", "control"], "h",
# lazy.layout.grow_left()
# ),
# Key(
# [mod, "shift"], "h",
# lazy.layout.shuffle_left()
# ),
# Key(
# [mod], "s",
# lazy.layout.toggle_split()
# ),
# We have an array of columns. Each columns is a dict containing
# width (in percent), rows (an array of rows), # and mode, which is
# either 'stack' or 'split'
#
# Each row is an array of clients
class Wmii(Layout):
"""
This layout emulates wmii layouts. The screen it split into
columns, always starting with one. A new window is created in
the active window's column. Windows can be shifted left and right.
If there is no column when shifting, a new one is created.
Each column can be stacked or divided (equally split).
"""
defaults = [
("border_focus", "#881111", "Border colour for the focused window."),
("border_normal", "#220000", "Border colour for un-focused windows."),
("border_focus_stack", "#0000ff", "Border colour for un-focused windows."),
("border_normal_stack", "#000022", "Border colour for un-focused windows."),
("grow_amount", 5, "Amount by which to grow/shrink a window."),
("border_width", 2, "Border width."),
("name", "wmii", "Name of this layout."),
("margin", 0, "Margin of the layout"),
]
def __init__(self, **config):
Layout.__init__(self, **config)
self.add_defaults(Wmii.defaults)
self.current_window = None
self.clients = []
self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}]
def info(self):
d = Layout.info(self)
d["current_window"] = self.current_window
d["clients"] = [x.name for x in self.clients]
return d
def add_column(self, prepend, win):
newwidth = int(100 / (len(self.columns) + 1))
# we are only called if there already is a column, simplifies things
for c in self.columns:
c['width'] = newwidth
c = {'width': newwidth, 'mode': 'split', 'rows': [win]}
if prepend:
self.columns.insert(0, c)
else:
self.columns.append(c)
def clone(self, group):
c = Layout.clone(self, group)
c.current_window = None
c.clients = []
c.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}]
return c
def current_column(self):
if self.current_window is None:
return None
for c in self.columns:
if self.current_window in c['rows']:
return c
return None
def add(self, client):
self.clients.append(client)
c = self.current_column()
if c is None:
if len(self.columns) == 0:
self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}]
c = self.columns[0]
c['rows'].append(client)
self.focus(client)
def remove(self, client):
if client not in self.clients:
return
self.clients.remove(client)
for c in self.columns:
if client in c['rows']:
ridx = c['rows'].index(client)
cidx = self.columns.index(c)
c['rows'].remove(client)
if len(c['rows']) != 0:
if client == self.current_window:
if ridx > 0:
ridx -= 1
newclient = c['rows'][ridx]
self.focus(newclient)
self.group.focus(self.current_window)
return self.current_window
# column is now empty, remove it and select the previous one
self.columns.remove(c)
if len(self.columns) == 0:
return None
newwidth = int(100 / len(self.columns))
for c in self.columns:
c['width'] = newwidth
if len(self.columns) == 1:
# there is no window at all
return None
if cidx > 0:
cidx -= 1
c = self.columns[cidx]
rows = c['rows']
newclient = rows[0]
self.focus(newclient)
self.group.focus(newclient)
return newclient
def is_last_column(self, cidx):
return cidx == len(self.columns) - 1
def focus(self, client):
self.current_window = client
for c in self.columns:
if client in c['rows']:
c['active'] = c['rows'].index(client)
def configure(self, client, screen):
show = True
if client not in self.clients:
return
ridx = -1
xoffset = int(screen.x)
for c in self.columns:
if client in c['rows']:
ridx = c['rows'].index(client)
break
xoffset += int(float(c['width']) * screen.width / 100.0)
if ridx == -1:
return
if client == self.current_window:
if c['mode'] == 'split':
px = self.group.qtile.colorPixel(self.border_focus)
else:
px = self.group.qtile.colorPixel(self.border_focus_stack)
else:
if c['mode'] == 'split':
px = self.group.qtile.colorPixel(self.border_normal)
else:
px = self.group.qtile.colorPixel(self.border_normal_stack)
if c['mode'] == 'split':
oneheight = screen.height / len(c['rows'])
yoffset = int(screen.y + oneheight * ridx)
win_height = int(oneheight - 2 * self.border_width)
else: # stacked
if c['active'] != c['rows'].index(client):
show = False
yoffset = int(screen.y)
win_height = int(screen.height - 2 * self.border_width)
win_width = int(float(c['width'] * screen.width / 100.0))
win_width -= 2 * self.border_width
if show:
client.place(
xoffset,
yoffset,
win_width,
win_height,
self.border_width,
px,
margin=self.margin,
)
client.unhide()
else:
client.hide()
def cmd_toggle_split(self):
c = self.current_column()
if c['mode'] == "split":
c['mode'] = "stack"
else:
c['mode'] = "split"
self.group.layoutAll()
def focus_next(self, win):
self.cmd_down()
return self.curent_window
def focus_previous(self, win):
self.cmd_up()
return self.current_window
def focus_first(self):
if len(self.columns) == 0:
self.columns = [{'active': 0, 'width': 100, 'mode': 'split', 'rows': []}]
c = self.columns[0]
if len(c['rows']) != 0:
return c['rows'][0]
def focus_last(self):
c = self.columns[len(self.columns) - 1]
if len(c['rows']) != 0:
return c['rows'][len(c['rows']) - 1]
def cmd_left(self):
"""
Switch to the first window on prev column
"""
c = self.current_column()
cidx = self.columns.index(c)
if cidx == 0:
return
cidx -= 1
c = self.columns[cidx]
if c['mode'] == "split":
self.group.focus(c['rows'][0])
else:
self.group.focus(c['rows'][c['active']])
def cmd_right(self):
"""
Switch to the first window on next column
"""
c = self.current_column()
cidx = self.columns.index(c)
if self.is_last_column(cidx):
return
cidx += 1
c = self.columns[cidx]
if c['mode'] == "split":
self.group.focus(c['rows'][0])
else:
self.group.focus(c['rows'][c['active']])
def cmd_up(self):
"""
Switch to the previous window in current column
"""
c = self.current_column()
if c is None:
return
ridx = c['rows'].index(self.current_window)
if ridx == 0:
if c['mode'] != "split":
ridx = len(c['rows']) - 1
else:
ridx -= 1
client = c['rows'][ridx]
self.group.focus(client)
def cmd_down(self):
"""
Switch to the next window in current column
"""
c = self.current_column()
if c is None:
return
ridx = c['rows'].index(self.current_window)
if ridx == len(c['rows']) - 1:
if c['mode'] != "split":
ridx = 0
else:
ridx += 1
client = c['rows'][ridx]
self.group.focus(client)
cmd_next = cmd_down
cmd_previous = cmd_up
def cmd_shuffle_left(self):
cur = self.current_window
if cur is None:
return
for c in self.columns:
if cur in c['rows']:
cidx = self.columns.index(c)
if cidx == 0:
if len(c['rows']) == 1:
return
c['rows'].remove(cur)
self.add_column(True, cur)
if len(c['rows']) == 0:
self.columns.remove(c)
else:
c['rows'].remove(cur)
self.columns[cidx - 1]['rows'].append(cur)
if len(c['rows']) == 0:
self.columns.remove(c)
newwidth = int(100 / len(self.columns))
for c in self.columns:
c['width'] = newwidth
else:
if c['active'] >= len(c['rows']):
c['active'] = len(c['rows']) - 1
self.group.focus(cur)
return
def swap_column_width(self, grow, shrink):
grower = self.columns[grow]
shrinker = self.columns[shrink]
amount = self.grow_amount
if shrinker['width'] - amount < 5:
return
grower['width'] += amount
shrinker['width'] -= amount
def cmd_grow_left(self):
cur = self.current_window
if cur is None:
return
for c in self.columns:
if cur in c['rows']:
cidx = self.columns.index(c)
if cidx == 0:
# grow left for leftmost-column, shrink left
if self.is_last_column(cidx):
return
self.swap_column_width(cidx + 1, cidx)
self.group.focus(cur)
return
self.swap_column_width(cidx, cidx - 1)
self.group.focus(cur)
return
def cmd_grow_right(self):
cur = self.current_window
if cur is None:
return
for c in self.columns:
if cur in c['rows']:
cidx = self.columns.index(c)
if self.is_last_column(cidx):
# grow right from right most, shrink right
if cidx == 0:
return
self.swap_column_width(cidx - 1, cidx)
self.group.focus(cur)
return
# grow my width by 20, reduce neighbor to the right by 20
self.swap_column_width(cidx, cidx + 1)
self.group.focus(cur)
return
def cmd_shuffle_right(self):
cur = self.current_window
if cur is None:
return
for c in self.columns:
if cur in c['rows']:
cidx = self.columns.index(c)
if self.is_last_column(cidx):
if len(c['rows']) == 1:
return
c['rows'].remove(cur)
self.add_column(False, cur)
if len(c['rows']) == 0:
self.columns.remove(c)
else:
c['rows'].remove(cur)
self.columns[cidx + 1]['rows'].append(cur)
if len(c['rows']) == 0:
self.columns.remove(c)
newwidth = int(100 / len(self.columns))
for c in self.columns:
c['width'] = newwidth
else:
if c['active'] >= len(c['rows']):
c['active'] = len(c['rows']) - 1
self.group.focus(cur)
return
def cmd_shuffle_down(self):
for c in self.columns:
if self.current_window in c['rows']:
r = c['rows']
ridx = r.index(self.current_window)
if ridx + 1 < len(r):
r[ridx], r[ridx + 1] = r[ridx + 1], r[ridx]
client = r[ridx + 1]
self.focus(client)
self.group.focus(client)
return
def cmd_shuffle_up(self):
for c in self.columns:
if self.current_window in c['rows']:
r = c['rows']
ridx = r.index(self.current_window)
if ridx > 0:
r[ridx - 1], r[ridx] = r[ridx], r[ridx - 1]
client = r[ridx - 1]
self.focus(client)
self.group.focus(client)
return
|
|
# classes for event-driven programming
# http://en.wikipedia.org/wiki/Event-driven_programming
__all__ = ['StateVar', 'FunctionCall', 'EnterExit', 'Pulse', 'EventPulse',
'EventArgument', ]
from direct.showbase.DirectObject import DirectObject
class PushesStateChanges:
# base class for objects that broadcast state changes to a set of subscriber objects
def __init__(self, value):
self._value = value
# push state changes to these objects
self._subscribers = set()
def destroy(self):
if len(self._subscribers) != 0:
raise Exception('%s object still has subscribers in destroy(): %s' % (
self.__class__.__name__, self._subscribers))
del self._subscribers
del self._value
def getState(self):
return self._value
def pushCurrentState(self):
self._handleStateChange()
return self
def _addSubscription(self, subscriber):
self._subscribers.add(subscriber)
subscriber._recvStatePush(self)
def _removeSubscription(self, subscriber):
self._subscribers.remove(subscriber)
def _handlePotentialStateChange(self, value):
oldValue = self._value
self._value = value
if oldValue != value:
self._handleStateChange()
def _handleStateChange(self):
# push this object's state to the subscribing objects
for subscriber in self._subscribers:
subscriber._recvStatePush(self)
if __debug__:
psc = PushesStateChanges(0)
assert psc.getState() == 0
psc.destroy()
del psc
class ReceivesStateChanges:
# base class for objects that subscribe to state changes from PushesStateChanges objects
def __init__(self, source):
self._source = None
self._initSource = source
def _finishInit(self):
# initialization is split across two functions to allow objects that derive from this
# class to set everything up so that they can respond appropriately to the initial
# state push from the state source
self._subscribeTo(self._initSource)
del self._initSource
def destroy(self):
self._unsubscribe()
del self._source
def _subscribeTo(self, source):
self._unsubscribe()
self._source = source
if self._source:
self._source._addSubscription(self)
def _unsubscribe(self):
if self._source:
self._source._removeSubscription(self)
self._source = None
def _recvStatePush(self, source):
pass
if __debug__:
rsc = ReceivesStateChanges(None)
rsc.destroy()
del rsc
class StateVar(PushesStateChanges):
# coder-friendly object that allows values to be set on it and pushes those values
# as state changes
def set(self, value):
PushesStateChanges._handlePotentialStateChange(self, value)
def get(self):
return PushesStateChanges.getState(self)
if __debug__:
sv = StateVar(0)
assert sv.get() == 0
sv.set(1)
assert sv.get() == 1
sv.destroy()
del sv
class StateChangeNode(PushesStateChanges, ReceivesStateChanges):
# base class that can be used to create a state-change notification chain
def __init__(self, source):
ReceivesStateChanges.__init__(self, source)
PushesStateChanges.__init__(self, source.getState())
ReceivesStateChanges._finishInit(self)
def destroy(self):
PushesStateChanges.destroy(self)
ReceivesStateChanges.destroy(self)
def _recvStatePush(self, source):
# got a state push, apply new state to self
self._handlePotentialStateChange(source._value)
if __debug__:
sv = StateVar(0)
assert sv.get() == 0
scn = StateChangeNode(sv)
assert scn.getState() == 0
sv.set(1)
assert sv.get() == 1
assert scn.getState() == 1
scn2 = StateChangeNode(scn)
assert scn2.getState() == 1
sv.set(2)
assert scn2.getState() == 2
scn3 = StateChangeNode(scn)
assert scn3.getState() == 2
sv.set(3)
assert scn2.getState() == 3
assert scn3.getState() == 3
scn3.destroy()
scn2.destroy()
scn.destroy()
sv.destroy()
del scn3
del scn2
del scn
del sv
class ReceivesMultipleStateChanges:
# base class for objects that subscribe to state changes from multiple PushesStateChanges
# objects
def __init__(self):
self._key2source = {}
self._source2key = {}
def destroy(self):
keys = list(self._key2source.keys())
for key in keys:
self._unsubscribe(key)
del self._key2source
del self._source2key
def _subscribeTo(self, source, key):
self._unsubscribe(key)
self._key2source[key] = source
self._source2key[source] = key
source._addSubscription(self)
def _unsubscribe(self, key):
if key in self._key2source:
source = self._key2source[key]
source._removeSubscription(self)
del self._key2source[key]
del self._source2key[source]
def _recvStatePush(self, source):
self._recvMultiStatePush(self._source2key[source], source)
def _recvMultiStatePush(self, key, source):
pass
if __debug__:
rsc = ReceivesMultipleStateChanges()
sv = StateVar(0)
sv2 = StateVar('b')
rsc._subscribeTo(sv, 'a')
rsc._subscribeTo(sv2, 2)
rsc._unsubscribe('a')
rsc.destroy()
del rsc
class FunctionCall(ReceivesMultipleStateChanges, PushesStateChanges):
# calls func with provided args whenever arguments' state changes
def __init__(self, func, *args, **kArgs):
self._initialized = False
ReceivesMultipleStateChanges.__init__(self)
PushesStateChanges.__init__(self, None)
self._func = func
self._args = args
self._kArgs = kArgs
# keep a copy of the arguments ready to go, already filled in with
# the value of arguments that push state
self._bakedArgs = []
self._bakedKargs = {}
for i, arg in enumerate(self._args):
key = i
if isinstance(arg, PushesStateChanges):
self._bakedArgs.append(arg.getState())
self._subscribeTo(arg, key)
else:
self._bakedArgs.append(self._args[i])
for key, arg in self._kArgs.items():
if isinstance(arg, PushesStateChanges):
self._bakedKargs[key] = arg.getState()
self._subscribeTo(arg, key)
else:
self._bakedKargs[key] = arg
self._initialized = True
# call pushCurrentState() instead
## push the current state to any listeners
##self._handleStateChange()
def destroy(self):
ReceivesMultipleStateChanges.destroy(self)
PushesStateChanges.destroy(self)
del self._func
del self._args
del self._kArgs
del self._bakedArgs
del self._bakedKargs
def getState(self):
# for any state recievers that are hooked up to us, they get a tuple
# of (tuple(positional argument values), dict(keyword argument name->value))
return (tuple(self._bakedArgs), dict(self._bakedKargs))
def _recvMultiStatePush(self, key, source):
# one of the arguments changed
# pick up the new value
if isinstance(key, str):
self._bakedKargs[key] = source.getState()
else:
self._bakedArgs[key] = source.getState()
# and send it out
self._handlePotentialStateChange(self.getState())
def _handleStateChange(self):
if self._initialized:
self._func(*self._bakedArgs, **self._bakedKargs)
PushesStateChanges._handleStateChange(self)
if __debug__:
l = []
def handler(value, l=l):
l.append(value)
assert l == []
sv = StateVar(0)
fc = FunctionCall(handler, sv)
assert l == []
fc.pushCurrentState()
assert l == [0,]
sv.set(1)
assert l == [0,1,]
sv.set(2)
assert l == [0,1,2,]
fc.destroy()
sv.destroy()
del fc
del sv
del handler
del l
l = []
def handler(value, kDummy=None, kValue=None, l=l):
l.append((value, kValue))
assert l == []
sv = StateVar(0)
ksv = StateVar('a')
fc = FunctionCall(handler, sv, kValue=ksv)
assert l == []
fc.pushCurrentState()
assert l == [(0,'a',),]
sv.set(1)
assert l == [(0,'a'),(1,'a'),]
ksv.set('b')
assert l == [(0,'a'),(1,'a'),(1,'b'),]
fc.destroy()
sv.destroy()
del fc
del sv
del handler
del l
class EnterExit(StateChangeNode):
# call enterFunc when our state becomes true, exitFunc when it becomes false
def __init__(self, source, enterFunc, exitFunc):
self._enterFunc = enterFunc
self._exitFunc = exitFunc
StateChangeNode.__init__(self, source)
def destroy(self):
StateChangeNode.destroy(self)
del self._exitFunc
del self._enterFunc
def _handlePotentialStateChange(self, value):
# convert the incoming state as a bool
StateChangeNode._handlePotentialStateChange(self, bool(value))
def _handleStateChange(self):
if self._value:
self._enterFunc()
else:
self._exitFunc()
StateChangeNode._handleStateChange(self)
if __debug__:
l = []
def enter(l=l):
l.append(1)
def exit(l=l):
l.append(0)
sv = StateVar(0)
ee = EnterExit(sv, enter, exit)
sv.set(0)
assert l == []
sv.set(1)
assert l == [1,]
sv.set(2)
assert l == [1,]
sv.set(0)
assert l == [1,0,]
sv.set(True)
assert l == [1,0,1,]
sv.set(False)
assert l == [1,0,1,0,]
ee.destroy()
sv.destroy()
del ee
del sv
del enter
del exit
del l
class Pulse(PushesStateChanges):
# changes state to True then immediately to False whenever sendPulse is called
def __init__(self):
PushesStateChanges.__init__(self, False)
def sendPulse(self):
self._handlePotentialStateChange(True)
self._handlePotentialStateChange(False)
if __debug__:
l = []
def handler(value, l=l):
l.append(value)
p = Pulse()
fc = FunctionCall(handler, p)
assert l == []
fc.pushCurrentState()
assert l == [False, ]
p.sendPulse()
assert l == [False, True, False, ]
p.sendPulse()
assert l == [False, True, False, True, False, ]
fc.destroy()
p.destroy()
del fc
del p
del l
del handler
class EventPulse(Pulse, DirectObject):
# sends a True-False "pulse" whenever a specific messenger message is sent
def __init__(self, event):
Pulse.__init__(self)
self.accept(event, self.sendPulse)
def destroy(self):
self.ignoreAll()
Pulse.destroy(self)
class EventArgument(PushesStateChanges, DirectObject):
# tracks a particular argument to a particular messenger event
def __init__(self, event, index=0):
PushesStateChanges.__init__(self, None)
self._index = index
self.accept(event, self._handleEvent)
def destroy(self):
self.ignoreAll()
del self._index
PushesStateChanges.destroy(self)
def _handleEvent(self, *args):
self._handlePotentialStateChange(args[self._index])
class AttrSetter(StateChangeNode):
def __init__(self, source, object, attrName):
self._object = object
self._attrName = attrName
StateChangeNode.__init__(self, source)
self._handleStateChange()
def _handleStateChange(self):
setattr(self._object, self._attrName, self._value)
StateChangeNode._handleStateChange(self)
if __debug__:
o = ScratchPad()
svar = StateVar(0)
aset = AttrSetter(svar, o, 'testAttr')
assert hasattr(o, 'testAttr')
assert o.testAttr == 0
svar.set('red')
assert o.testAttr == 'red'
aset.destroy()
svar.destroy()
o.destroy()
del aset
del svar
del o
|
|
from JumpScale import j
import struct
TIMES = {'s': 1,
'm': 60,
'h': 3600,
'd': 3600 * 24,
'w': 3600 * 24 * 7
}
import datetime
import time
from TimeInterval import TimeInterval
class Time_:
"""
generic provider of time functions
lives at j.data.time
"""
def __init__(self):
self.__jslocation__ = "j.data.time"
self.timeinterval = TimeInterval
@property
def epoch(self):
return int(time.time())
def getTimeEpoch(self):
'''
Get epoch timestamp (number of seconds passed since January 1, 1970)
'''
timestamp = int(time.time())
return timestamp
def getSecondsInHR(self, seconds):
minute = 60.
hour = 3600.
day = hour * 24
week = day * 7
if seconds < minute:
return "%s seconds" % seconds
elif seconds < hour:
return "%s minutes" % round((seconds / minute), 1)
elif seconds < day:
return "%s hours" % round((seconds / hour), 1)
elif seconds < week:
return "%s days" % round((seconds / day), 1)
else:
return "%s weeks" % round((seconds / week), 1)
def getTimeEpochBin(self):
'''
Get epoch timestamp (number of seconds passed since January 1, 1970)
'''
return struct.pack("<I", self.getTimeEpoch())
def getLocalTimeHR(self):
'''Get the current local date and time in a human-readable form'''
#timestamp = time.asctime(time.localtime(time.time()))
timestr = self.formatTime(self.getTimeEpoch())
return timestr
def getLocalTimeHRForFilesystem(self):
# TODO: check if correct implementation
return time.strftime("%d_%b_%Y_%H_%M_%S", time.gmtime())
def formatTime(self, epoch, formatstr='%Y/%m/%d %H:%M:%S', local=True):
'''
Returns a formatted time string representing the current time
See http://docs.python.org/lib/module-time.html#l2h-2826 for an
overview of available formatting options.
@param format: Format string
@type format: string
@returns: Formatted current time
@rtype: string
'''
epoch = float(epoch)
if local:
timetuple = time.localtime(epoch)
else:
timetuple = time.gmtime(epoch)
timestr = time.strftime(formatstr, timetuple)
return timestr
def epoch2HRDate(self, epoch, local=True):
return self.formatTime(epoch, '%Y/%m/%d', local)
def epoch2HRDateTime(self, epoch, local=True):
return self.formatTime(epoch, '%Y/%m/%d %H:%M:%S', local)
def pythonDateTime2HRDateTime(self, pythonDateTime, local=True):
if not isinstance(pythonDateTime, datetime.datetime):
raise j.exceptions.Input(
"needs to be python date.time obj:%s" % pythonDateTime)
epoch = pythonDateTime.timestamp()
return self.epoch2HRDateTime(epoch)
def pythonDateTime2Epoch(self, pythonDateTime, local=True):
if not isinstance(pythonDateTime, datetime.datetime):
raise j.exceptions.Input(
"needs to be python date.time obj:%s" % pythonDateTime)
epoch = pythonDateTime.timestamp()
return epoch
def epoch2pythonDateTime(self, epoch):
return datetime.datetime.fromtimestamp(epoch)
def epoch2ISODateTime(self, epoch):
dt = datetime.datetime.fromtimestamp(epoch)
return dt.isoformat()
def epoch2pythonDate(self, epoch):
return datetime.date.fromtimestamp(epoch)
def epoch2HRTime(self, epoch, local=True):
return self.formatTime(epoch, '%H:%M:%S', local)
def getMinuteId(self, epoch=None):
"""
is # min from jan 1 2010
"""
if epoch is None:
epoch = time.time()
if epoch < 1262318400.0:
raise j.exceptions.RuntimeError(
"epoch cannot be smaller than 1262318400, given epoch:%s" % epoch)
return int((epoch - 1262318400.0) / 60.0)
def getHourId(self, epoch=None):
"""
is # hour from jan 1 2010
"""
return int(self.getMinuteId(epoch) / 60)
def fiveMinuteIdToEpoch(self, fiveMinuteId):
return fiveMinuteId * 60 * 5 + 1262318400
def get5MinuteId(self, epoch=None):
"""
is # 5 min from jan 1 2010
"""
return int(self.getMinuteId(epoch) / 5)
def getDayId(self, epoch=None):
"""
is # day from jan 1 2010
"""
return int(self.getMinuteId(epoch) / (60 * 24))
def getDeltaTime(self, txt):
"""
only supported now is -3m, -3d and -3h (ofcourse 3 can be any int)
and an int which would be just be returned
means 3 days ago 3 hours ago
if 0 or '' then is now
"""
txt = txt.strip()
unit = txt[-1]
if txt[-1] not in list(TIMES.keys()):
raise j.exceptions.RuntimeError(
"Cannot find time, needs to be in format have time indicator %s " % list(TIMES.keys()))
value = float(txt[:-1])
return value * TIMES[unit]
def getEpochAgo(self, txt):
"""
only supported now is -3m, -3d and -3h (ofcourse 3 can be any int)
and an int which would be just be returned
means 3 days ago 3 hours ago
if 0 or '' then is now
"""
if txt is None or str(txt).strip() == "0":
return self.getTimeEpoch()
return self.getTimeEpoch() + self.getDeltaTime(txt)
def getEpochFuture(self, txt):
"""
only supported now is +3d and +3h (ofcourse 3 can be any int)
+3d means 3 days in future
and an int which would be just be returned
if txt==None or 0 then will be 1 day ago
"""
if txt is None or str(txt).strip() == "0":
return self.getTimeEpoch()
return self.getTimeEpoch() + self.getDeltaTime(txt)
def HRDatetoEpoch(self, datestr, local=True):
"""
convert string date to epoch
Date needs to be formatted as 16/06/1988
"""
if datestr.strip() == "":
return 0
try:
datestr = datestr.strip()
return time.mktime(time.strptime(datestr, "%d/%m/%Y"))
except:
raise ValueError(
"Date needs to be formatted as \"16/06/1981\", also check if date is valid, now format = %s" % datestr)
def HRDateTime2epoch(self, hrdatetime):
"""
convert string date/time to epoch
Needs to be formatted as 16/06/1988 %H:%M:%S
"""
if hrdatetime.strip() == "":
return 0
try:
hrdatetime = hrdatetime.strip()
return int(time.mktime(time.strptime(hrdatetime, "%Y/%m/%d %H:%M:%S")))
except:
raise ValueError(
"Date needs to be formatted as Needs to be formatted as 16/06/1988 %H:%M:%S, also check if date is valid, now format = %s" % hrdatetime)
def any2epoch(self, val, in_list=False):
"""
if list will go item by item until not empty,0 or None
if int is epoch
if string is human readable format
if date.time yeh ...
"""
if j.data.types.list.check(val):
for item in val:
res = self.any2epoch(item, in_list=True)
if res != 0:
return res
return 0
if val is None:
return 0
if j.data.types.int.check(val):
return val
if j.data.types.string.check(val):
try:
return self.HRDateTime2epoch(val)
except:
pass
try:
return self.HRDatetoEpoch(val)
except:
pass
if isinstance(val, datetime.datetime):
return self.pythonDateTime2Epoch(val)
if not in_list:
raise j.exceptions.Input(
"Could not define format of time value, needs to be int, human readable time, list or python datetime obj.")
else:
return 0
def any2HRDateTime(self, val):
"""
if list will go item by item until not empty,0 or None
if int is epoch
if string is human readable format
if date.time yeh ...
"""
epoch = self.any2epoch(val)
return self.epoch2HRDateTime(epoch)
def _test(self):
now = self.getTimeEpoch()
hr = self.epoch2HRDateTime(now)
assert self.HRDateTime2epoch(hr) == now
assert self.any2epoch(hr) == now
dt = self.epoch2pythonDateTime(now)
assert self.any2epoch(dt) == now
hr = self.pythonDateTime2HRDateTime(dt)
assert self.any2epoch(hr) == now
hr = self.any2HRDateTime(now)
assert self.any2epoch(hr) == now
hr = self.any2HRDateTime(hr)
assert self.any2epoch(hr) == now
hr = self.any2HRDateTime(dt)
assert self.any2epoch(hr) == now
hr = self.any2HRDateTime(["", 0, dt])
assert self.any2epoch(hr) == now
|
|
# -*- coding: utf-8 -*-
"""
@author: Federico Cerchiari <[email protected]>
Table Widget
"""
from copy import copy
from itertools import zip_longest
import tempy.tags as tags
from ..tools import AdjustableList
from ..exceptions import WidgetDataError
class TempyTable(tags.Table):
"""Table widget.
Creates a simple table structure using the give data, or an empty table of the given size.self
params:
data: an iterable of iterables in this form [[col1, col2, col3], [col1, col2, col3]]
rows, columns: size of the table if no data is given
head: if True adds the table header using the first data row
foot: if True add a table footer using the last data row
caption: adds the caption to the table
"""
__tag = tags.Table._Table__tag
def __init__(self, rows=0, cols=0, data=None, **kwargs):
caption = kwargs.pop("caption", None)
head = kwargs.pop("head", False)
foot = kwargs.pop("foot", False)
super().__init__(**kwargs)
self(body=tags.Tbody())
# Initialize empty data structure if data is not given
if not data:
data = [
[None for _ in range(cols)] for _ in range(rows + sum((head, foot)))
]
else:
rows = len(data)
cols = max(map(len, data))
table_data = copy(data)
if caption:
self.make_caption(caption)
if head and rows > 0:
self.make_header(table_data.pop(0))
if foot and rows > 0:
self.make_footer(table_data.pop())
if data:
self.populate(table_data, resize_x=True)
def _check_row_size(self, row):
try:
row_length = len(row)
except TypeError:
row_length = row
if self.body.childs and max(map(len, self.body)) < row_length:
raise WidgetDataError(self, "The given data has more columns than the table column size.")
def populate(self, data, resize_x=True, normalize=True):
"""Adds/Replace data in the table.
data: an iterable of iterables in the form [[col1, col2, col3], [col1, col2, col3]]
resize_x: if True, changes the x-size of the table according to the given data.
If False and data have dimensions different from the existing table structure a WidgetDataError is raised.
normalize: if True all the rows will have the same number of columns, if False, data structure is followed.
"""
if data is None:
raise WidgetDataError(
self,
"Parameter data should be non-None, to empty the table use TempyTable.clear() or "
"pass an empty list.",
)
data = copy(data)
if not self.body:
self(body=tags.Tbody())
self.clear()
max_data_x = max(map(len, data))
if not resize_x:
self._check_row_size(max_data_x)
for t_row, d_row in zip_longest(self.body, data):
if not d_row:
t_row.remove()
else:
if not t_row:
t_row = tags.Tr().append_to(self.body)
if normalize:
d_row = AdjustableList(d_row).ljust(max_data_x, None)
for t_cell, d_cell in zip_longest(t_row, d_row):
if not t_cell and resize_x:
t_cell = tags.Td().append_to(t_row)
t_cell.empty()
if d_cell is not None:
t_cell(d_cell)
return self
def clear(self):
return self.body.empty()
def add_row(self, row_data, resize_x=True):
"""Adds a row at the end of the table"""
if not resize_x:
self._check_row_size(row_data)
self.body(tags.Tr()(tags.Td()(cell) for cell in row_data))
return self
def pop_row(self, idr=None, tags=False):
"""Pops a row, default the last"""
idr = idr if idr is not None else len(self.body) - 1
row = self.body.pop(idr)
return row if tags else [cell.childs[0] for cell in row]
def pop_cell(self, idy=None, idx=None, tags=False):
"""Pops a cell, default the last of the last row"""
idy = idy if idy is not None else len(self.body) - 1
idx = idx if idx is not None else len(self.body[idy]) - 1
cell = self.body[idy].pop(idx)
return cell if tags else cell.childs[0]
def _make_table_part(self, part, data):
part_tag, inner_tag = {"header": (tags.Thead, tags.Th), "footer": (tags.Tfoot, tags.Td)}.get(part)
part_instance = part_tag().append_to(self)
if not hasattr(self, part):
setattr(self, part, part_instance)
return part_instance(tags.Tr()(inner_tag()(col) for col in data))
def make_header(self, head):
"""Makes the header row from the given data."""
self._make_table_part("header", head)
def make_footer(self, footer):
"""Makes the footer row from the given data."""
self._make_table_part("footer", footer)
def make_caption(self, caption):
"""Adds/Substitutes the table's caption."""
if not hasattr(self, "caption"):
self(caption=tags.Caption())
return self.caption.empty()(caption)
def _iter_rows(self, col_index):
for row in self.body.childs:
if self.is_col_within_bounds(col_index, row) and row.childs[col_index].childs:
yield row
def col_class(self, css_class, col_index=None):
# adds css_class to every cell
if col_index is None:
gen = ((row, cell) for row in self.body.childs for cell in row.childs)
for (row, cell) in gen:
cell.attr(klass=css_class)
return
for row in self._iter_rows(col_index):
row.childs[col_index].attr(klass=css_class)
def row_class(self, css_class, row_index=None):
# adds css_class to every row
if row_index is None:
for row in self.body.childs:
row.attr(klass=css_class)
elif self.is_row_within_bounds(row_index):
self.body.childs[row_index].attr(klass=css_class)
def map_col(self, col_function, col_index=None, ignore_errors=True):
# applies function to every cell
if col_index is None:
self.map_table(col_function)
return self
try:
for row in self._iter_rows(col_index):
row.childs[col_index].apply_function(col_function)
except Exception as ex:
if ignore_errors:
pass
else:
raise ex
def map_row(self, row_function, row_index=None, ignore_errors=True):
# applies function to every row
if row_index is None:
self.map_table(row_function)
return self
if self.is_row_within_bounds(row_index):
gen = (
cell
for cell in self.body.childs[row_index].childs
if len(cell.childs) > 0
)
self.apply_function_to_cells(gen, row_function, ignore_errors)
def map_table(self, format_function, ignore_errors=True):
for row in self.body.childs:
gen = (cell for cell in row.childs if len(cell.childs) > 0)
self.apply_function_to_cells(gen, format_function, ignore_errors)
@staticmethod
def apply_function_to_cells(gen, format_function, ignore_errors):
try:
for cell in gen:
cell.apply_function(format_function)
except Exception as ex:
if ignore_errors:
pass
else:
raise ex
def make_scope(self, col_scope_list=None, row_scope_list=None):
"""Makes scopes and converts Td to Th for given arguments
which represent lists of tuples (row_index, col_index)"""
for scope, itm in ((col_scope_list, "col"), (row_scope_list, "row")):
if scope is not None and len(scope) > 0:
self.apply_scope(scope, itm)
def apply_scope(self, scope_list, scope_tag):
gen = (
(row_index, col_index)
for row_index, col_index in scope_list
if self.is_row_within_bounds(row_index)
and self.is_col_within_bounds(col_index, self.body.childs[row_index])
and len(self.body.childs[row_index].childs[col_index].childs) > 0
)
for row_index, col_index in gen:
cell = self.body.childs[row_index].childs[col_index]
self.body.childs[row_index].childs[col_index] = tags.Th()(cell.childs[0])
self.body.childs[row_index].childs[col_index].attrs = copy(cell.attrs)
self.body.childs[row_index].childs[col_index].attr(scope=scope_tag)
def is_row_within_bounds(self, row_index):
if row_index >= 0 and (row_index < len(self.body.childs)):
return True
raise WidgetDataError(self, "Row index should be within table bounds")
def is_col_within_bounds(self, col_index, row):
if col_index >= 0 and (col_index < len(row.childs)):
return True
raise WidgetDataError(self, "Column index should be within table bounds")
|
|
#!/usr/bin/env python
#coding=utf-8
import urllib
import base64
import hmac
import time
from hashlib import sha1 as sha
import os
import sys
import md5
import StringIO
from threading import Thread
import Queue
import threading
import ConfigParser
import logging
from logging.handlers import RotatingFileHandler
from xml.sax.saxutils import escape
import socket
import re
try:
from oss.oss_xml_handler import *
except:
from oss_xml_handler import *
#LOG_LEVEL can be one of DEBUG INFO ERROR CRITICAL WARNNING
DEBUG = False
LOG_LEVEL = "DEBUG"
PROVIDER = "OSS"
SELF_DEFINE_HEADER_PREFIX = "x-oss-"
if "AWS" == PROVIDER:
SELF_DEFINE_HEADER_PREFIX = "x-amz-"
class EmptyHandler(logging.Handler):
def __init__(self):
self.lock = None
self.level = None
def emit(self, record):
pass
def handle(self, record):
pass
def createLock(self):
self.lock = None
class Logger():
def __init__(self, debug, log_name, log_level, logger):
self.logger = logging.getLogger(logger)
if debug:
logfile = os.path.join(os.getcwd(), log_name)
max_log_size = 100*1024*1024 #Bytes
backup_count = 5
format = \
"%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
hdlr = RotatingFileHandler(logfile,
mode='a',
maxBytes=max_log_size,
backupCount=backup_count)
formatter = logging.Formatter(format)
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
if "DEBUG" == log_level.upper():
self.logger.setLevel(logging.DEBUG)
elif "INFO" == log_level.upper():
self.logger.setLevel(logging.INFO)
elif "WARNING" == log_level.upper():
self.logger.setLevel(logging.WARNING)
elif "ERROR" == log_level.upper():
self.logger.setLevel(logging.ERROR)
elif "CRITICAL" == log_level.upper():
self.logger.setLevel(logging.CRITICAL)
else:
self.logger.setLevel(logging.ERROR)
else:
self.logger.addHandler(EmptyHandler())
def getlogger(self):
return self.logger
OSS_LOGGER_SET = None
def helper_get_host_from_resp(res, bucket):
host = helper_get_host_from_headers(res.getheaders(), bucket)
if not host:
xml = res.read()
host = RedirectXml(xml).Endpoint().strip()
host = helper_get_host_from_endpoint(host, bucket)
return host
def helper_get_host_from_headers(headers, bucket):
mp = convert_header2map(headers)
location = safe_get_element('location', mp).strip()
#https://bucket.oss.aliyuncs.com or http://oss.aliyuncs.com/bucket
location = location.replace("https://", "").replace("http://", "")
if location.startswith("%s." % bucket):
location = location[len(bucket)+1:]
index = location.find('/')
if index == -1:
return location
return location[:index]
def helper_get_host_from_endpoint(host, bucket):
index = host.find('/')
if index != -1:
host = host[:index]
index = host.find('\\')
if index != -1:
host = host[:index]
index = host.find(bucket)
if index == 0:
host = host[len(bucket)+1:]
return host
def check_bucket_valid(bucket):
alphabeta = "abcdefghijklmnopqrstuvwxyz0123456789-"
if len(bucket) < 3 or len(bucket) > 63:
return False
if bucket[-1] == "-" or bucket[-1] == "_":
return False
if not ((bucket[0] >= 'a' and bucket[0] <= 'z') or (bucket[0] >= '0' and bucket[0] <= '9')):
return False
for i in bucket:
if not i in alphabeta:
return False
return True
def check_redirect(res):
is_redirect = False
try:
if res.status == 301 or res.status == 302:
is_redirect = True
except:
pass
return is_redirect
########## function for Authorization ##########
def _format_header(headers=None):
'''
format the headers that self define
convert the self define headers to lower.
'''
if not headers:
headers = {}
tmp_headers = {}
for k in headers.keys():
if isinstance(headers[k], unicode):
headers[k] = convert_utf8(headers[k])
if k.lower().startswith(SELF_DEFINE_HEADER_PREFIX):
k_lower = k.lower().strip()
tmp_headers[k_lower] = headers[k]
else:
tmp_headers[k.strip()] = headers[k]
return tmp_headers
def get_assign(secret_access_key, method, headers=None, resource="/", result=None, debug=DEBUG):
'''
Create the authorization for OSS based on header input.
You should put it into "Authorization" parameter of header.
'''
if not headers:
headers = {}
if not result:
result = []
content_md5 = ""
content_type = ""
date = ""
canonicalized_oss_headers = ""
secret_access_key = convert_utf8(secret_access_key)
global OSS_LOGGER_SET
if not OSS_LOGGER_SET:
OSS_LOGGER_SET = Logger(debug, "log.txt", LOG_LEVEL, "oss_util").getlogger()
OSS_LOGGER_SET.debug("secret_access_key: %s" % secret_access_key)
content_md5 = safe_get_element('Content-MD5', headers)
content_type = safe_get_element('Content-Type', headers)
date = safe_get_element('Date', headers)
canonicalized_resource = resource
tmp_headers = _format_header(headers)
if len(tmp_headers) > 0:
x_header_list = tmp_headers.keys()
x_header_list.sort()
for k in x_header_list:
if k.startswith(SELF_DEFINE_HEADER_PREFIX):
canonicalized_oss_headers += "%s:%s\n" % (k, tmp_headers[k])
string_to_sign = method + "\n" + content_md5.strip() + "\n" + content_type + "\n" + date + "\n" + canonicalized_oss_headers + canonicalized_resource
result.append(string_to_sign)
OSS_LOGGER_SET.debug("method:%s\n content_md5:%s\n content_type:%s\n data:%s\n canonicalized_oss_headers:%s\n canonicalized_resource:%s\n" % (method, content_md5, content_type, date, canonicalized_oss_headers, canonicalized_resource))
OSS_LOGGER_SET.debug("string_to_sign:%s\n \nlength of string_to_sign:%d\n" % (string_to_sign, len(string_to_sign)))
h = hmac.new(secret_access_key, string_to_sign, sha)
sign_result = base64.encodestring(h.digest()).strip()
OSS_LOGGER_SET.debug("sign result:%s" % sign_result)
return sign_result
def get_resource(params=None):
if not params:
return ""
tmp_headers = {}
for k, v in params.items():
tmp_k = k.lower().strip()
tmp_headers[tmp_k] = v
override_response_list = ['response-content-type', 'response-content-language', \
'response-cache-control', 'logging', 'response-content-encoding', \
'acl', 'uploadId', 'uploads', 'partNumber', 'group', 'link', \
'delete', 'website', 'location', 'objectInfo', \
'response-expires', 'response-content-disposition', 'cors', 'lifecycle']
override_response_list.sort()
resource = ""
separator = "?"
for i in override_response_list:
if tmp_headers.has_key(i.lower()):
resource += separator
resource += i
tmp_key = str(tmp_headers[i.lower()])
if len(tmp_key) != 0:
resource += "="
resource += tmp_key
separator = '&'
return resource
def oss_quote(in_str):
if not isinstance(in_str, str):
in_str = str(in_str)
return urllib.quote(in_str, '')
def append_param(url, params):
'''
convert the parameters to query string of URI.
'''
l = []
for k, v in params.items():
k = k.replace('_', '-')
if k == 'maxkeys':
k = 'max-keys'
v = convert_utf8(v)
if v is not None and v != '':
l.append('%s=%s' % (oss_quote(k), oss_quote(v)))
elif k == 'acl':
l.append('%s' % (oss_quote(k)))
elif v is None or v == '':
l.append('%s' % (oss_quote(k)))
if len(l):
url = url + '?' + '&'.join(l)
return url
############### Construct XML ###############
def create_object_group_msg_xml(part_msg_list=None):
'''
get information from part_msg_list and covert it to xml.
part_msg_list has special format.
'''
if not part_msg_list:
part_msg_list = []
xml_string = r'<CreateFileGroup>'
for part in part_msg_list:
if len(part) >= 3:
if isinstance(part[1], unicode):
file_path = convert_utf8(part[1])
else:
file_path = part[1]
file_path = escape(file_path)
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<PartName>' + str(file_path) + r'</PartName>'
xml_string += r'<ETag>"' + str(part[2]).upper() + r'"</ETag>'
xml_string += r'</Part>'
else:
print "the ", part, " in part_msg_list is not as expected!"
return ""
xml_string += r'</CreateFileGroup>'
return xml_string
def create_object_link_msg_xml_by_name(object_list = None):
'''
get information from object_list and covert it to xml.
'''
if not object_list:
object_list = []
xml_string = r'<CreateObjectLink>'
for i in range(len(object_list)):
part = str(object_list[i]).strip()
file_path = convert_utf8(part)
file_path = escape(file_path)
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(i + 1) + r'</PartNumber>'
xml_string += r'<PartName>' + str(file_path) + r'</PartName>'
xml_string += r'</Part>'
xml_string += r'</CreateObjectLink>'
return xml_string
def create_object_link_msg_xml(part_msg_list = None):
'''
get information from part_msg_list and covert it to xml.
part_msg_list has special format.
'''
if not part_msg_list:
part_msg_list = []
xml_string = r'<CreateObjectLink>'
for part in part_msg_list:
if len(part) >= 2:
file_path = convert_utf8(part[1])
file_path = escape(file_path)
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<PartName>' + str(file_path) + r'</PartName>'
xml_string += r'</Part>'
else:
print "the ", part, " in part_msg_list is not as expected!"
return ""
xml_string += r'</CreateObjectLink>'
return xml_string
def create_part_xml(part_msg_list=None):
'''
get information from part_msg_list and covert it to xml.
part_msg_list has special format.
'''
if not part_msg_list:
part_msg_list = []
xml_string = r'<CompleteMultipartUpload>'
for part in part_msg_list:
if len(part) >= 3:
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<ETag>"' + str(part[2]).upper() + r'"</ETag>'
xml_string += r'</Part>'
else:
print "the ", part, " in part_msg_list is not as expected!"
return ""
xml_string += r'</CompleteMultipartUpload>'
return xml_string
def create_delete_object_msg_xml(object_list=None, is_quiet=False, is_defult=False):
'''
covert object name list to xml.
'''
if not object_list:
object_list = []
xml_string = r'<Delete>'
if not is_defult:
if is_quiet:
xml_string += r'<Quiet>true</Quiet>'
else:
xml_string += r'<Quiet>false</Quiet>'
for object in object_list:
key = convert_utf8(object)
key = escape(key)
xml_string += r'<Object><Key>%s</Key></Object>' % key
xml_string += r'</Delete>'
return xml_string
############### operate OSS ###############
def clear_all_object_of_bucket(oss_instance, bucket):
'''
clean all objects in bucket, after that, it will delete this bucket.
'''
return clear_all_objects_in_bucket(oss_instance, bucket)
def clear_all_objects_in_bucket(oss_instance, bucket, delete_marker="", delete_upload_id_marker="", debug=False):
'''
it will clean all objects in bucket, after that, it will delete this bucket.
example:
from oss_api import *
host = ""
id = ""
key = ""
oss_instance = OssAPI(host, id, key)
bucket = "leopublicreadprivatewrite"
if clear_all_objects_in_bucket(oss_instance, bucket):
pass
else:
print "clean Fail"
'''
prefix = ""
delimiter = ""
maxkeys = 1000
try:
delete_all_objects(oss_instance, bucket, prefix, delimiter, delete_marker, maxkeys, debug)
delete_all_parts(oss_instance, bucket, delete_marker, delete_upload_id_marker, debug)
res = oss_instance.delete_bucket(bucket)
if (res.status / 100 != 2 and res.status != 404):
print "clear_all_objects_in_bucket: delete bucket:%s fail, ret:%s, request id:%s" % (bucket, res.status, res.getheader("x-oss-request-id"))
return False
except socket.error:
print "socket exception when clear_all_objects_in_bucket:%s from %s" % (bucket, oss_instance.host)
return False
return True
def delete_all_objects(oss_instance, bucket, prefix="", delimiter="", delete_marker="", maxkeys=1000, debug=False):
marker = delete_marker
delete_obj_num = 0
while 1:
object_list = []
res = oss_instance.get_bucket(bucket, prefix, marker, delimiter, maxkeys)
if res.status != 200:
return False
body = res.read()
(tmp_object_list, marker) = get_object_list_marker_from_xml(body)
for item in tmp_object_list:
object_list.append(item[0])
if object_list:
object_list_xml = create_delete_object_msg_xml(object_list)
res = oss_instance.batch_delete_object(bucket, object_list_xml)
if res.status/100 != 2:
if marker:
print "delete_all_objects: batch delete objects in bucket:%s fail, ret:%s, request id:%s, first object:%s, marker:%s" % (bucket, res.status, res.getheader("x-oss-request-id"), object_list[0], marker)
else:
print "delete_all_objects: batch delete objects in bucket:%s fail, ret:%s, request id:%s, first object:%s" % (bucket, res.status, res.getheader("x-oss-request-id"), object_list[0])
return False
else:
if debug:
delete_obj_num += len(object_list)
if marker:
print "delete_all_objects: Now %s objects deleted, marker:%s" % (delete_obj_num, marker)
else:
print "delete_all_objects: Now %s objects deleted" % (delete_obj_num)
if len(marker) == 0:
break
return True
def delete_all_parts(oss_instance, bucket, delete_object_marker="", delete_upload_id_marker="", debug=False):
delete_mulitipart_num = 0
marker = delete_object_marker
id_marker = delete_upload_id_marker
while 1:
res = oss_instance.get_all_multipart_uploads(bucket, key_marker=marker, upload_id_marker=id_marker)
if res.status != 200:
break
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
object = convert_utf8(i[0])
res = oss_instance.cancel_upload(bucket, object, i[1])
if (res.status / 100 != 2 and res.status != 404):
print "delete_all_parts: cancel upload object:%s, upload_id:%s FAIL, ret:%s, request-id:%s" % (object, i[1], res.status, res.getheader("x-oss-request-id"))
else:
delete_mulitipart_num += 1
if debug:
print "delete_all_parts: cancel upload object:%s, upload_id:%s OK\nNow %s parts deleted." % (object, i[1], delete_mulitipart_num)
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if not marker:
break
def clean_all_bucket(oss_instance):
'''
it will clean all bucket, including the all objects in bucket.
'''
res = oss_instance.get_service()
if (res.status / 100) == 2:
h = GetServiceXml(res.read())
for b in h.bucket_list:
if not clear_all_objects_in_bucket(oss_instance, b.name):
print "clean bucket ", b.name, " failed! in clean_all_bucket"
return False
return True
else:
print "failed! get service in clean_all_bucket return ", res.status
print res.read()
print res.getheaders()
return False
def pgfs_clear_all_objects_in_bucket(oss_instance, bucket):
'''
it will clean all objects in bucket, after that, it will delete this bucket.
'''
b = GetAllObjects()
b.get_all_object_in_bucket(oss_instance, bucket)
for i in b.object_list:
res = oss_instance.delete_object(bucket, i)
if (res.status / 100 != 2):
print "clear_all_objects_in_bucket: delete object fail, ret is:", res.status, "bucket is:", bucket, "object is: ", i
return False
else:
pass
res = oss_instance.delete_bucket(bucket)
if (res.status / 100 != 2 and res.status != 404):
print "clear_all_objects_in_bucket: delete bucket fail, ret is: %s, request id is:%s" % (res.status, res.getheader("x-oss-request-id"))
return False
return True
def pgfs_clean_all_bucket(oss_instance):
'''
it will clean all bucket, including the all objects in bucket.
'''
res = oss_instance.get_service()
if (res.status / 100) == 2:
h = GetServiceXml(res.read())
for b in h.bucket_list:
if not pgfs_clear_all_objects_in_bucket(oss_instance, b.name):
print "clean bucket ", b.name, " failed! in clean_all_bucket"
return False
return True
else:
print "failed! get service in clean_all_bucket return ", res.status
print res.read()
print res.getheaders()
return False
def delete_all_parts_of_object_group(oss, bucket, object_group_name):
res = oss.get_object_group_index(bucket, object_group_name)
if res.status == 200:
body = res.read()
h = GetObjectGroupIndexXml(body)
object_group_index = h.list()
for i in object_group_index:
if len(i) == 4 and len(i[1]) > 0:
part_name = i[1].strip()
res = oss.delete_object(bucket, part_name)
if res.status != 204:
print "delete part ", part_name, " in bucket:", bucket, " failed!"
return False
else:
return False
return True
def delete_all_parts_of_object_link(oss, bucket, object_link_name):
res = oss.get_link_index(bucket, object_link_name)
if res.status == 200:
body = res.read()
h = GetObjectLinkIndexXml(body)
object_link_index = h.list()
for i in object_link_index:
if len(i) == 2 and len(i[1]) > 0:
part_name = i[1].strip()
res = oss.delete_object(bucket, part_name)
if res.status != 204:
print "delete part ", part_name, " in bucket:", bucket, " failed!"
return False
else:
return False
return True
class GetAllObjects:
def __init__(self):
self.object_list = []
self.dir_list = []
def get_object_in_bucket(self, oss, bucket="", marker="", prefix=""):
object_list = []
maxkeys = 1000
try:
res = oss.get_bucket(bucket, prefix, marker, maxkeys=maxkeys)
body = res.read()
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
object = convert_utf8(i[0])
object_list.append(object)
marker = hh.nextmarker
except:
pass
return (object_list, marker)
def get_object_dir_in_bucket(self, oss, bucket="", marker="", prefix="", delimiter=""):
object_list = []
dir_list = []
maxkeys = 1000
try:
res = oss.get_bucket(bucket, prefix, marker, delimiter, maxkeys=maxkeys)
body = res.read()
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
object_list.append((i[0], i[3], i[1])) #name, size, modified_time
if len(pl) != 0:
for i in pl:
dir_list.append(i)
marker = hh.nextmarker
except:
pass
return (object_list, dir_list, marker)
def get_all_object_in_bucket(self, oss, bucket="", marker="", prefix=""):
marker2 = ""
while True:
(object_list, marker) = self.get_object_in_bucket(oss, bucket, marker2, prefix)
marker2 = marker
if len(object_list) != 0:
self.object_list.extend(object_list)
if not marker:
break
def get_all_object_dir_in_bucket(self, oss, bucket="", marker="", prefix="", delimiter=""):
marker2 = ""
while True:
(object_list, dir_list, marker) = self.get_object_dir_in_bucket(oss, bucket, marker2, prefix, delimiter)
marker2 = marker
if len(object_list) != 0:
self.object_list.extend(object_list)
if len(dir_list) != 0:
self.dir_list.extend(dir_list)
if not marker:
break
return (self.object_list, self.dir_list)
def get_all_buckets(oss):
bucket_list = []
res = oss.get_service()
if res.status == 200:
h = GetServiceXml(res.read())
for b in h.bucket_list:
bucket_list.append(str(b.name).strip())
return bucket_list
def get_object_list_marker_from_xml(body):
#return ([(object_name, object_length, last_modify_time)...], marker)
object_meta_list = []
next_marker = ""
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
object = convert_utf8(i[0])
last_modify_time = i[1]
length = i[3]
etag = i[2]
object_meta_list.append((object, length, last_modify_time, etag))
if hh.is_truncated:
next_marker = hh.nextmarker
return (object_meta_list, next_marker)
def get_dir_list_marker_from_xml(body):
#return (dirname, marker)
dir_list = []
next_marker = ""
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(pl) != 0:
for i in pl:
dir_list.append(i)
if hh.is_truncated:
next_marker = hh.nextmarker
return (dir_list, next_marker)
def get_bucket_meta_list_marker_from_xml(body):
next_marker = ""
hh = GetServiceXml(body)
if hh.is_truncated:
next_marker = hh.nextmarker
return (hh.bucket_list, next_marker)
def get_upload_id(oss, bucket, object, headers=None):
'''
get the upload id of object.
Returns:
string
'''
if not headers:
headers = {}
upload_id = ""
res = oss.init_multi_upload(bucket, object, headers)
if res.status == 200:
body = res.read()
h = GetInitUploadIdXml(body)
upload_id = h.upload_id
else:
print res.status
print res.getheaders()
print res.read()
return upload_id
def get_all_upload_id_list(oss, bucket):
'''
get all upload id of bucket
Returns:
list
'''
all_upload_id_list = []
marker = ""
id_marker = ""
while True:
res = oss.get_all_multipart_uploads(bucket, key_marker=marker, upload_id_marker=id_marker)
if res.status != 200:
return all_upload_id_list
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
all_upload_id_list.append(i)
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if not marker and not id_marker:
break
return all_upload_id_list
def get_upload_id_list(oss, bucket, object):
'''
get all upload id list of one object.
Returns:
list
'''
upload_id_list = []
marker = ""
id_marker = ""
while True:
res = oss.get_all_multipart_uploads(bucket, prefix=object, key_marker=marker, upload_id_marker=id_marker)
if res.status != 200:
break
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
upload_id_list.append(i[1])
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if not marker:
break
return upload_id_list
def get_part_list(oss, bucket, object, upload_id, max_part=""):
'''
get uploaded part list of object.
Returns:
list
'''
part_list = []
marker = ""
while True:
res = oss.get_all_parts(bucket, object, upload_id, part_number_marker = marker, max_parts=max_part)
if res.status != 200:
break
body = res.read()
h = GetPartsXml(body)
part_list.extend(h.list())
if h.is_truncated:
marker = h.next_part_number_marker
else:
break
if not marker:
break
return part_list
def get_part_xml(oss, bucket, object, upload_id):
'''
get uploaded part list of object.
Returns:
string
'''
part_list = []
part_list = get_part_list(oss, bucket, object, upload_id)
xml_string = r'<CompleteMultipartUpload>'
for part in part_list:
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<ETag>' + part[1] + r'</ETag>'
xml_string += r'</Part>'
xml_string += r'</CompleteMultipartUpload>'
return xml_string
def get_part_map(oss, bucket, object, upload_id):
part_list = []
part_list = get_part_list(oss, bucket, object, upload_id)
part_map = {}
for part in part_list:
part_number = str(part[0])
etag = part[1]
part_map[part_number] = etag
return part_map
########## multi-thread ##########
def multi_get(oss, bucket, object, localfile, thread_num, retry_times):
length = 0
res = oss.head_object(bucket, object)
if 200 == res.status:
length = (int)(res.getheader('content-length'))
else:
print "can not get the length of object:", object
return False
ranges = []
ranges.append(0)
size = length // thread_num
for i in xrange(thread_num - 1):
ranges.append((i + 1) * size)
ranges.append(length)
threadpool = []
for i in xrange(len(ranges) - 1):
exec("file_%s = open(localfile, 'wb+')" % i)
exec("current = MultiGetWorker(oss, bucket, object, file_%s, ranges[i], ranges[i + 1] - 1, %s)" % (i, retry_times))
threadpool.append(current)
current.start()
for item in threadpool:
item.join()
if not os.path.isfile(localfile) or length != os.path.getsize(localfile):
print "localfile:%s size:%s is not equal with object:%s size:%s " % (localfile, os.path.getsize(localfile), object, length)
return False
else:
return True
class DeleteObjectWorker(Thread):
def __init__(self, oss, bucket, part_msg_list, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.part_msg_list = part_msg_list
self.retry_times = retry_times
def run(self):
bucket = self.bucket
object_list = self.part_msg_list
step = 1000
begin = 0
end = 0
total_length = len(object_list)
remain_length = total_length
while True:
if remain_length > step:
end = begin + step
elif remain_length > 0:
end = begin + remain_length
else:
break
is_fail = True
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.delete_objects(bucket, object_list[begin:end])
if res.status / 100 == 2:
is_fail = False
break
except:
retry_times = retry_times - 1
time.sleep(1)
if is_fail:
print "delete object_list[%s:%s] failed!, first is %s" % (begin, end, object_list[begin])
begin = end
remain_length = remain_length - step
class PutObjectGroupWorker(Thread):
def __init__(self, oss, bucket, file_path, part_msg_list, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.part_msg_list = part_msg_list
self.file_path = file_path
self.retry_times = retry_times
def run(self):
for part in self.part_msg_list:
if len(part) == 5:
bucket = self.bucket
file_name = convert_utf8(part[1])
object_name = file_name
retry_times = self.retry_times
is_skip = False
while True:
try:
if retry_times <= 0:
break
res = self.oss.head_object(bucket, object_name)
if res.status == 200:
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
md5 = part[2]
if etag.replace('"', "").upper() == md5.upper():
is_skip = True
break
except:
retry_times = retry_times - 1
time.sleep(1)
if is_skip:
continue
partsize = part[3]
offset = part[4]
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.put_object_from_file_given_pos(bucket, object_name, self.file_path, offset, partsize)
if res.status != 200:
print "upload ", file_name, "failed!", " ret is:", res.status
print "headers", res.getheaders()
retry_times = retry_times - 1
time.sleep(1)
else:
break
except:
retry_times = retry_times - 1
time.sleep(1)
else:
print "ERROR! part", part , " is not as expected!"
class PutObjectLinkWorker(Thread):
def __init__(self, oss, bucket, file_path, part_msg_list, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.part_msg_list = part_msg_list
self.file_path = file_path
self.retry_times = retry_times
def run(self):
for part in self.part_msg_list:
if len(part) == 5:
bucket = self.bucket
file_name = convert_utf8(part[1])
object_name = file_name
retry_times = self.retry_times
is_skip = False
while True:
try:
if retry_times <= 0:
break
res = self.oss.head_object(bucket, object_name)
if res.status == 200:
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
md5 = part[2]
if etag.replace('"', "").upper() == md5.upper():
is_skip = True
break
except:
retry_times = retry_times - 1
time.sleep(1)
if is_skip:
continue
partsize = part[3]
offset = part[4]
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.put_object_from_file_given_pos(bucket, object_name, self.file_path, offset, partsize)
if res.status != 200:
print "upload ", file_name, "failed!", " ret is:", res.status
print "headers", res.getheaders()
retry_times = retry_times - 1
time.sleep(1)
else:
break
except:
retry_times = retry_times - 1
time.sleep(1)
else:
print "ERROR! part", part , " is not as expected!"
def multi_upload_file2(oss, bucket, object, filename, upload_id, thread_num=10, max_part_num=10000, retry_times=5, headers=None, params=None):
if not upload_id:
print "empty upload_id"
return False
filename = convert_utf8(filename)
part_msg_list = []
part_msg_list = split_large_file(filename, object, max_part_num)
queue = Queue.Queue(0)
uploaded_part_map = {}
part_msg_xml = create_part_xml(part_msg_list)
each_part_retry_times = 1
for i in range(retry_times):
tmp_uploaded_part_map = get_part_map(oss, bucket, object, upload_id)
if tmp_uploaded_part_map:
for k, v in tmp_uploaded_part_map.items():
uploaded_part_map[k] = v
thread_pool = []
for part in part_msg_list:
if len(part) == 5:
part_number = str(part[0])
md5 = part[2]
is_need_upload = True
if uploaded_part_map.has_key(part_number):
md5 = part[2]
if uploaded_part_map[part_number].replace('"', "").upper() == md5.upper():
is_need_upload = False
continue
if is_need_upload:
queue.put((upload_part, oss, bucket, object, upload_id, filename, part))
else:
print "not expected part", part
for i in xrange(thread_num):
current = UploadPartWorker2(each_part_retry_times, queue)
thread_pool.append(current)
current.start()
queue.join()
for item in thread_pool:
item.join()
res = oss.complete_upload(bucket, object, upload_id, part_msg_xml, headers, params)
if res.status == 200:
return res
raise Exception("-3, after retry %s, failed, multi upload file failed! upload_id:%s" % (retry_times, upload_id))
def upload_part(oss, bucket, object, upload_id, file_path, part, retry_times=2):
if len(part) == 5:
part_number = str(part[0])
md5 = part[2]
partsize = part[3]
offset = part[4]
for i in range(retry_times):
try:
res = oss.upload_part_from_file_given_pos(bucket, object, file_path, offset, partsize, upload_id, part_number)
if res.status != 200:
time.sleep(1)
else:
break
except:
time.sleep(1)
else:
print "not expected part for multiupload", part
class UploadPartWorker2(threading.Thread):
def __init__(self, retry_times, queue):
threading.Thread.__init__(self)
self.queue = queue
self.retry_times = retry_times
def run(self):
while 1:
try:
(upload_part, oss, bucket, object, upload_id, filename, part) = self.queue.get(block=False)
upload_part(oss, bucket, object, upload_id, filename, part, self.retry_times)
self.queue.task_done()
except Queue.Empty:
break
except:
self.queue.task_done()
class UploadPartWorker(Thread):
def __init__(self, oss, bucket, object, upload_id, file_path, part_msg_list, uploaded_part_map, retry_times=5, debug=DEBUG):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.object = object
self.part_msg_list = part_msg_list
self.file_path = file_path
self.upload_id = upload_id
self.uploaded_part_map = uploaded_part_map.copy()
self.retry_times = retry_times
def run(self):
for part in self.part_msg_list:
part_number = str(part[0])
if len(part) == 5:
bucket = self.bucket
object = self.object
partsize = part[3]
offset = part[4]
retry_times = self.retry_times
while True:
try:
if self.uploaded_part_map.has_key(part_number):
md5 = part[2]
if self.uploaded_part_map[part_number].replace('"', "").upper() == md5.upper():
break
if retry_times <= 0:
break
res = self.oss.upload_part_from_file_given_pos(bucket, object, self.file_path, offset, partsize, self.upload_id, part_number)
if res.status != 200:
retry_times = retry_times - 1
time.sleep(1)
else:
etag = res.getheader("etag")
if etag:
self.uploaded_part_map[part_number] = etag
break
except:
retry_times = retry_times - 1
time.sleep(1)
else:
print "not expected part for multiupload", part
pass
class MultiGetWorker(Thread):
def __init__(self, oss, bucket, object, file, start, end, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.object = object
self.curpos = start
self.startpos = start
self.endpos = end
self.file = file
self.length = self.endpos - self.startpos + 1
self.get_buffer_size = 10*1024*1024
self.retry_times = retry_times
def run(self):
if self.startpos > self.endpos:
return
retry_times = 0
totalread = 0
while True:
headers = {}
range_info = 'bytes=%d-%d' % (self.curpos, self.endpos)
headers['Range'] = range_info
self.file.seek(self.curpos)
try:
res = self.oss.object_operation("GET", self.bucket, self.object, headers)
if res.status == 206:
while True:
content = res.read(self.get_buffer_size)
if content:
self.file.write(content)
totalread += len(content)
self.curpos += len(content)
else:
break
else:
print "range get /%s/%s [%s] ret:%s" % (self.bucket, self.object, range_info, res.status)
except:
self.file.flush()
print "range get /%s/%s [%s] exception, retry:%s" % (self.bucket, self.object, range_info, retry_times)
if totalread == self.length or self.curpos > self.endpos:
break
retry_times += 1
if retry_times > self.retry_times:
print "ERROR, reach max retry times:%s when multi get /%s/%s" % (self.retry_times, self.bucket, self.object)
break
self.file.flush()
self.file.close()
############### misc ###############
def split_large_file(file_path, object_prefix="", max_part_num=1000, part_size=10*1024*1024, buffer_size=10*1024*1024):
parts_list = []
if os.path.isfile(file_path):
file_size = os.path.getsize(file_path)
if file_size > part_size * max_part_num:
part_size = (file_size + max_part_num - file_size % max_part_num) / max_part_num
part_order = 1
fp = open(file_path, 'rb')
fp.seek(os.SEEK_SET)
part_num = (file_size + part_size - 1) / part_size
for i in xrange(0, part_num):
left_len = part_size
real_part_size = 0
m = md5.new()
offset = part_size * i
while True:
read_size = 0
if left_len <= 0:
break
elif left_len < buffer_size:
read_size = left_len
else:
read_size = buffer_size
buffer_content = fp.read(read_size)
m.update(buffer_content)
real_part_size += len(buffer_content)
left_len = left_len - read_size
md5sum = m.hexdigest()
temp_file_name = os.path.basename(file_path) + "_" + str(part_order)
object_prefix = convert_utf8(object_prefix)
if not object_prefix:
file_name = sum_string(temp_file_name) + "_" + temp_file_name
else:
file_name = object_prefix + "/" + sum_string(temp_file_name) + "_" + temp_file_name
part_msg = (part_order, file_name, md5sum, real_part_size, offset)
parts_list.append(part_msg)
part_order += 1
fp.close()
else:
print "ERROR! No file: ", file_path, ", please check."
return parts_list
def sumfile(fobj):
'''Returns an md5 hash for an object with read() method.'''
m = md5.new()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def md5sum(fname):
'''Returns an md5 hash for file fname, or stdin if fname is "-".'''
if fname == '-':
ret = sumfile(sys.stdin)
else:
try:
f = file(fname, 'rb')
except:
return 'Failed to open file'
ret = sumfile(f)
f.close()
return ret
def md5sum2(filename, offset=0, partsize=0):
m = md5.new()
fp = open(filename, 'rb')
if offset > os.path.getsize(filename):
fp.seek(os.SEEK_SET, os.SEEK_END)
else:
fp.seek(offset)
left_len = partsize
BufferSize = 8 * 1024
while True:
if left_len <= 0:
break
elif left_len < BufferSize:
buffer_content = fp.read(left_len)
else:
buffer_content = fp.read(BufferSize)
m.update(buffer_content)
left_len = left_len - len(buffer_content)
md5sum = m.hexdigest()
return md5sum
def sum_string(content):
f = StringIO.StringIO(content)
md5sum = sumfile(f)
f.close()
return md5sum
def convert_header2map(header_list):
header_map = {}
for (a, b) in header_list:
header_map[a] = b
return header_map
def safe_get_element(name, container):
for k, v in container.items():
if k.strip().lower() == name.strip().lower():
return v
return ""
def get_content_type_by_filename(file_name):
mime_type = ""
mime_map = {}
mime_map["js"] = "application/javascript"
mime_map["xlsx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
mime_map["xltx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.template"
mime_map["potx"] = "application/vnd.openxmlformats-officedocument.presentationml.template"
mime_map["ppsx"] = "application/vnd.openxmlformats-officedocument.presentationml.slideshow"
mime_map["pptx"] = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
mime_map["sldx"] = "application/vnd.openxmlformats-officedocument.presentationml.slide"
mime_map["docx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
mime_map["dotx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.template"
mime_map["xlam"] = "application/vnd.ms-excel.addin.macroEnabled.12"
mime_map["xlsb"] = "application/vnd.ms-excel.sheet.binary.macroEnabled.12"
try:
suffix = ""
name = os.path.basename(file_name)
suffix = name.split('.')[-1]
if suffix in mime_map.keys():
mime_type = mime_map[suffix]
else:
import mimetypes
mimetypes.init()
mime_type = mimetypes.types_map["." + suffix]
except Exception:
mime_type = 'application/octet-stream'
if not mime_type:
mime_type = 'application/octet-stream'
return mime_type
def smart_code(input_stream):
if isinstance(input_stream, str):
try:
tmp = unicode(input_stream, 'utf-8')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'gbk')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'big5')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'ascii')
except:
tmp = input_stream
else:
tmp = input_stream
return tmp
def is_ip(s):
try:
tmp_list = s.split(':')
s = tmp_list[0]
if s == 'localhost':
return True
tmp_list = s.split('.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True
def get_host_from_list(hosts):
tmp_list = hosts.split(",")
if len(tmp_list) <= 1:
return hosts
for tmp_host in tmp_list:
tmp_host = tmp_host.strip()
host = tmp_host
port = 80
try:
host_port_list = tmp_host.split(":")
if len(host_port_list) == 1:
host = host_port_list[0].strip()
elif len(host_port_list) == 2:
host = host_port_list[0].strip()
port = int(host_port_list[1].strip())
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((host, port))
return host
except:
pass
return tmp_list[0].strip()
def is_oss_host(host):
if host.find(".aliyuncs.com") != -1 or host.find(".aliyun-inc.com") != -1 or host.find("s3.amazonaws.com") != -1:
return True
return False
def convert_utf8(input_string):
if isinstance(input_string, unicode):
input_string = input_string.encode('utf-8')
return input_string
def get_file_base64_md5(file):
m = md5.new()
fd = open(file, 'rb')
while True:
d = fd.read(8096)
if not d:
break
m.update(d)
base64md5 = base64.encodestring(m.digest())
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
fd.close()
return base64md5
def NeedDownload(o, rule):
if rule and re.match(rule, o):
return True
return False
if __name__ == '__main__':
pass
|
|
from __future__ import absolute_import
from builtins import *
#!/usr/bin/env python
#Copyright (c) 2013, Eduard Broecker
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
#WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
#DAMAGE.
#
# this script exports dbc-files from a canmatrix-object
# dbc-files are the can-matrix-definitions of the CanOe (Vector Informatic)
from .canmatrix import *
import codecs
import re
#dbcExportEncoding = 'iso-8859-1'
#CP1253
def normalizeName(name, whitespaceReplacement):
name = re.sub('\s+', whitespaceReplacement, name)
if ' ' in name:
name = '"' + name + '"'
return name
def exportDbc(db, filename, **options):
if 'dbcExportEncoding' in options:
dbcExportEncoding=options["dbcExportEncoding"]
else:
dbcExportEncoding='iso-8859-1'
if 'dbcExportCommentEncoding' in options:
dbcExportCommentEncoding=options["dbcExportCommentEncoding"]
else:
dbcExportCommentEncoding=dbcExportEncoding
if 'whitespaceReplacement' in options:
whitespaceReplacement=options["whitespaceReplacement"]
if whitespaceReplacement in ['', None] or set([' ', '\t']).intersection(whitespaceReplacement):
print("Warning: Settings may result in whitespace in DBC variable names. This is not supported by the DBC format.")
else:
whitespaceReplacement='_'
f = open(filename,"wb")
f.write( "VERSION \"created by canmatrix\"\n\n".encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
f.write("NS_ :\n\nBS_:\n\n".encode(dbcExportEncoding))
#Boardunits
f.write( "BU_: ".encode(dbcExportEncoding))
id = 1
nodeList = {};
for bu in db._BUs._list:
f.write((bu._name + " ").encode(dbcExportEncoding))
f.write("\n\n".encode(dbcExportEncoding))
#ValueTables
for table in db._valueTables:
f.write(("VAL_TABLE_ " + table).encode(dbcExportEncoding))
for row in db._valueTables[table]:
f.write((' ' + row + ' "' + db._valueTables[table][row] + '"').encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#Frames
for bo in db._fl._list:
if bo._Transmitter.__len__() == 0:
bo._Transmitter = ["Vector__XXX"]
if bo._extended == 1:
bo._Id += 0x80000000
f.write(("BO_ %d " % bo._Id + bo._name + ": %d " % bo._Size + bo._Transmitter[0] + "\n").encode(dbcExportEncoding))
for signal in bo._signals:
name = normalizeName(signal._name, whitespaceReplacement)
f.write((" SG_ " + name).encode(dbcExportEncoding))
if signal._multiplex == 'Multiplexor':
f.write(' M '.encode(dbcExportEncoding))
elif signal._multiplex is not None:
f.write((" m%d " % int(signal._multiplex)).encode(dbcExportEncoding))
startbit = signal.getMsbStartbit()
if signal._is_signed:
sign = '-'
else:
sign = '+'
f.write((" : %d|%d@%d%c" % (startbit, signal._signalsize,signal._is_little_endian, sign)).encode(dbcExportEncoding))
f.write((" (%g,%g)" % (signal._factor, signal._offset)).encode(dbcExportEncoding))
f.write((" [%g|%g]" % (signal._min, signal._max)).encode(dbcExportEncoding))
f.write(' "'.encode(dbcExportEncoding))
f.write(signal._unit.encode(dbcExportEncoding))
f.write('" '.encode(dbcExportEncoding))
if signal._receiver.__len__() == 0:
signal._receiver = ['Vector__XXX']
f.write((','.join(signal._receiver) + "\n").encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#second Sender:
for bo in db._fl._list:
if bo._Transmitter.__len__() > 1:
f.write(("BO_TX_BU_ %d : %s;\n" % (bo._Id,','.join(bo._Transmitter))).encode(dbcExportEncoding))
#frame comments
for bo in db._fl._list:
if bo._comment is not None and bo._comment.__len__() > 0:
f.write(("CM_ BO_ " + "%d " % bo._Id + ' "').encode(dbcExportEncoding))
f.write(bo._comment.replace('"','\\"').encode(dbcExportCommentEncoding))
f.write('";\n'.encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#signal comments
for bo in db._fl._list:
for signal in bo._signals:
if signal._comment is not None and signal._comment.__len__() > 0:
name = normalizeName(signal._name, whitespaceReplacement)
f.write(("CM_ SG_ " + "%d " % bo._Id + name + ' "').encode(dbcExportEncoding))
f.write(signal._comment.replace('"','\\"').encode(dbcExportCommentEncoding))
f.write('";\n'.encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#boarUnit comments
for bu in db._BUs._list:
if bu._comment is not None and bu._comment.__len__() > 0:
f.write(("CM_ BU_ " + bu._name + ' "' + bu._comment.replace('"','\\"') + '";\n').encode(dbcExportCommentEncoding))
f.write("\n".encode(dbcExportEncoding))
defaults = {}
for (type,define) in sorted(list(db._frameDefines.items())):
f.write(('BA_DEF_ BO_ "' + type + '" ').encode(dbcExportEncoding) + define._definition.encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
if type not in defaults and define._defaultValue is not None:
defaults[type] = define._defaultValue
for (type,define) in sorted(list(db._signalDefines.items())):
f.write(('BA_DEF_ SG_ "' + type + '" ').encode(dbcExportEncoding) + define._definition.encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
if type not in defaults and define._defaultValue is not None:
defaults[type] = define._defaultValue
for (type,define) in sorted(list(db._buDefines.items())):
f.write(('BA_DEF_ BU_ "' + type + '" ').encode(dbcExportEncoding) + define._definition.encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
if type not in defaults and define._defaultValue is not None:
defaults[type] = define._defaultValue
for (type,define) in sorted(list(db._globalDefines.items())):
f.write(('BA_DEF_ "' + type + '" ').encode(dbcExportEncoding) + define._definition.encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
if type not in defaults and define._defaultValue is not None:
defaults[type] = define._defaultValue
for define in sorted(defaults):
f.write(('BA_DEF_DEF_ "' + define + '" ').encode(dbcExportEncoding) + defaults[define].encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
#boardunit-attributes:
for bu in db._BUs._list:
for attrib,val in sorted(bu._attributes.items()):
f.write(('BA_ "' + attrib + '" BU_ ' + bu._name + ' ' + str(val) + ';\n').encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#global-attributes:
for attrib,val in sorted(db._attributes.items()):
f.write(('BA_ "' + attrib + '" ' + val + ';\n').encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#messages-attributes:
for bo in db._fl._list:
for attrib,val in sorted(bo._attributes.items()):
f.write(('BA_ "' + attrib + '" BO_ %d ' % bo._Id + val + ';\n').encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#signal-attributes:
for bo in db._fl._list:
for signal in bo._signals:
for attrib,val in sorted(signal._attributes.items()):
name = normalizeName(signal._name, whitespaceReplacement)
f.write(('BA_ "' + attrib + '" SG_ %d ' % bo._Id + name + ' ' + val + ';\n').encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
#signal-values:
for bo in db._fl._list:
for signal in bo._signals:
if len(signal._values) > 0:
f.write(('VAL_ %d ' % bo._Id + signal._name).encode(dbcExportEncoding))
for attrib,val in sorted(signal._values.items(), key=lambda x: int(x[0])):
f.write((' ' + str(attrib) + ' "' + val + '"').encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding));
#signal-groups:
for bo in db._fl._list:
for sigGroup in bo._SignalGroups:
f.write(("SIG_GROUP_ " + str(bo._Id) + " " + sigGroup._name + " " + str(sigGroup._Id)+ " :").encode(dbcExportEncoding))
for signal in sigGroup._members:
f.write((" " + signal._name).encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding))
|
|
from __future__ import with_statement
import os
import sys
from StringIO import StringIO
from nose.tools import eq_, ok_
from fabric.state import env, output
from fabric.context_managers import (cd, settings, lcd, hide, shell_env, quiet,
warn_only, prefix, path)
from fabric.operations import run, local, _prefix_commands
from utils import mock_streams, FabricTest
from server import server
#
# cd()
#
def test_error_handling():
"""
cd cleans up after itself even in case of an exception
"""
class TestException(Exception):
pass
try:
with cd('somewhere'):
raise TestException('Houston, we have a problem.')
except TestException:
pass
finally:
with cd('else'):
eq_(env.cwd, 'else')
def test_cwd_with_absolute_paths():
"""
cd() should append arg if non-absolute or overwrite otherwise
"""
existing = '/some/existing/path'
additional = 'another'
absolute = '/absolute/path'
with settings(cwd=existing):
with cd(absolute):
eq_(env.cwd, absolute)
with cd(additional):
eq_(env.cwd, existing + '/' + additional)
def test_cd_home_dir():
"""
cd() should work with home directories
"""
homepath = "~/somepath"
with cd(homepath):
eq_(env.cwd, homepath)
def test_cd_nested_home_abs_dirs():
"""
cd() should work with nested user homedir (starting with ~) paths.
It should always take the last path if the new path begins with `/` or `~`
"""
home_path = "~/somepath"
abs_path = "/some/random/path"
relative_path = "some/random/path"
# 2 nested homedir paths
with cd(home_path):
eq_(env.cwd, home_path)
another_path = home_path + "/another/path"
with cd(another_path):
eq_(env.cwd, another_path)
# first absolute path, then a homedir path
with cd(abs_path):
eq_(env.cwd, abs_path)
with cd(home_path):
eq_(env.cwd, home_path)
# first relative path, then a homedir path
with cd(relative_path):
eq_(env.cwd, relative_path)
with cd(home_path):
eq_(env.cwd, home_path)
# first home path, then a a relative path
with cd(home_path):
eq_(env.cwd, home_path)
with cd(relative_path):
eq_(env.cwd, home_path + "/" + relative_path)
#
# prefix
#
def test_nested_prefix():
"""
prefix context managers can be created outside of the with block and nested
"""
cm1 = prefix('1')
cm2 = prefix('2')
with cm1:
with cm2:
eq_(env.command_prefixes, ['1', '2'])
#
# cd prefix with dev/null
#
def test_cd_prefix():
"""
cd prefix should direct output to /dev/null in case of CDPATH
"""
some_path = "~/somepath"
with cd(some_path):
command_out = _prefix_commands('foo', "remote")
eq_(command_out, 'cd %s >/dev/null && foo' % some_path)
# def test_cd_prefix_on_win32():
# """
# cd prefix should NOT direct output to /dev/null on win32
# """
# some_path = "~/somepath"
# import fabric
# try:
# fabric.state.win32 = True
# with cd(some_path):
# command_out = _prefix_commands('foo', "remote")
# eq_(command_out, 'cd %s && foo' % some_path)
# finally:
# fabric.state.win32 = False
#
# hide/show
#
def test_hide_show_exception_handling():
"""
hide()/show() should clean up OK if exceptions are raised
"""
try:
with hide('stderr'):
# now it's False, while the default is True
eq_(output.stderr, False)
raise Exception
except Exception:
# Here it should be True again.
# If it's False, this means hide() didn't clean up OK.
eq_(output.stderr, True)
#
# settings()
#
def test_setting_new_env_dict_key_should_work():
"""
Using settings() with a previously nonexistent key should work correctly
"""
key = 'thisshouldnevereverexistseriouslynow'
value = 'a winner is you'
with settings(**{key: value}):
ok_(key in env)
ok_(key not in env)
def test_settings():
"""
settings() should temporarily override env dict with given key/value pair
"""
env.testval = "outer value"
with settings(testval="inner value"):
eq_(env.testval, "inner value")
eq_(env.testval, "outer value")
def test_settings_with_multiple_kwargs():
"""
settings() should temporarily override env dict with given key/value pairS
"""
env.testval1 = "outer 1"
env.testval2 = "outer 2"
with settings(testval1="inner 1", testval2="inner 2"):
eq_(env.testval1, "inner 1")
eq_(env.testval2, "inner 2")
eq_(env.testval1, "outer 1")
eq_(env.testval2, "outer 2")
def test_settings_with_other_context_managers():
"""
settings() should take other context managers, and use them with other overrided
key/value pairs.
"""
env.testval1 = "outer 1"
prev_lcwd = env.lcwd
with settings(lcd("here"), testval1="inner 1"):
eq_(env.testval1, "inner 1")
ok_(env.lcwd.endswith("here")) # Should be the side-effect of adding cd to settings
ok_(env.testval1, "outer 1")
eq_(env.lcwd, prev_lcwd)
def test_settings_clean_revert():
"""
settings(clean_revert=True) should only revert values matching input values
"""
env.modified = "outer"
env.notmodified = "outer"
with settings(
modified="inner",
notmodified="inner",
inner_only="only",
clean_revert=True
):
eq_(env.modified, "inner")
eq_(env.notmodified, "inner")
eq_(env.inner_only, "only")
env.modified = "modified internally"
eq_(env.modified, "modified internally")
ok_("inner_only" not in env)
#
# shell_env()
#
def test_shell_env():
"""
shell_env() sets the shell_env attribute in the env dict
"""
with shell_env(KEY="value"):
eq_(env.shell_env['KEY'], 'value')
eq_(env.shell_env, {})
class TestQuietAndWarnOnly(FabricTest):
@server()
@mock_streams('both')
def test_quiet_hides_all_output(self):
# Sanity test - normally this is not empty
run("ls /simple")
ok_(sys.stdout.getvalue())
# Reset
sys.stdout = StringIO()
# Real test
with quiet():
run("ls /simple")
# Empty output
ok_(not sys.stdout.getvalue())
# Reset
sys.stdout = StringIO()
# Kwarg test
run("ls /simple", quiet=True)
ok_(not sys.stdout.getvalue())
@server(responses={'barf': [
"this is my stdout",
"this is my stderr",
1
]})
def test_quiet_sets_warn_only_to_true(self):
# Sanity test to ensure environment
with settings(warn_only=False):
with quiet():
eq_(run("barf").return_code, 1)
# Kwarg test
eq_(run("barf", quiet=True).return_code, 1)
@server(responses={'hrm': ["", "", 1]})
@mock_streams('both')
def test_warn_only_is_same_as_settings_warn_only(self):
with warn_only():
eq_(run("hrm").failed, True)
@server()
@mock_streams('both')
def test_warn_only_does_not_imply_hide_everything(self):
with warn_only():
run("ls /simple")
assert sys.stdout.getvalue().strip() != ""
# path() (distinct from shell_env)
class TestPathManager(FabricTest):
def setup(self):
super(TestPathManager, self).setup()
self.real = os.environ.get('PATH')
def via_local(self):
with hide('everything'):
return local("echo $PATH", capture=True)
def test_lack_of_path_has_default_local_path(self):
"""
No use of 'with path' == default local $PATH
"""
eq_(self.real, self.via_local())
def test_use_of_path_appends_by_default(self):
"""
'with path' appends by default
"""
with path('foo'):
eq_(self.via_local(), self.real + ":foo")
|
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 11:28:37 2017
@author: tuili
"""
from __future__ import print_function
import numpy as np
import os
import pickle
import datetime
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.python.platform import gfile
#from tensorflow.python import debug as tf_debug
import data_util as du
from model import Regression
# Like FLAGS.
LENGTH_MAX = 350 # Greater than the maximum number of tokens among all paragraphs.
DATA_DIRECTORY = "./data_new_small"
BATCH_SIZE = 5
NUM_EPOCH = 200000
EMBEDDING_SIZE = 10000
LEARNING_RATE = 0.0005
MODEL_DIR = 'model_saidi'
SAVE_EVERY = 75
def training():
# Load data.
print('Loading data...')
try:
with gfile.Open(MODEL_DIR + '/data', 'rb') as f:
x_data, y_data = pickle.loads(f.read())
print(' Old data found in {}.'.format(MODEL_DIR +'/data'))
except:
print(' Creation of a new set of data.')
x_data, y_data = zip(*du.load_labels_data(DATA_DIRECTORY))
with gfile.Open(MODEL_DIR + '/data', 'wb') as f:
f.write(pickle.dumps((x_data, y_data)))
# Load and save vocabulary.
print('Loading vocabulary...')
try:
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(MODEL_DIR +'/vocab')
print(" Old vocabulary found in {}.".format(MODEL_DIR + '/vocab'))
except:
print(" Creation of a new vocabulary.")
max_document_length = max([len(x.split(" ")) for x in y_data])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
vocab_processor.fit(y_data)
vocab_processor_x = learn.preprocessing.VocabularyProcessor(4, vocabulary=vocab_processor.vocabulary_)
vocab_processor.save(MODEL_DIR+ '/vocab')
print(" Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
# Write correspondance 'word ID' to 'word'.
with open(MODEL_DIR + '/correspondance.tsv', 'w') as f:
f.write('Word ID\tWord\n')
for word, word_id in vocab_processor.vocabulary_._mapping.iteritems():
f.write('{}\t{}\n'.format(str(word_id), word))
with tf.Graph().as_default() as graph:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# Create model.
print('Creating model...')
model = Regression(
number_of_words = len(x_data[0]),
sequence_length = LENGTH_MAX,
vocab_size = len(vocab_processor.vocabulary_),
embedding_size = EMBEDDING_SIZE)
# Define Training procedure.
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Checkpoint directory.
checkpoint_path = MODEL_DIR + "/checkpoint.ckpt"
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
with tf.Session(graph=graph) as sess:
# Initialize.
print('Initializing...')
sess.run(tf.global_variables_initializer())
# Maybe restore model parameters.
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + '.index'):
print("Restoring model parameters from %s." % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Fresh parameters for this model.")
# Tensorboard.
dir_summary = MODEL_DIR +'/summary/' + datetime.datetime.now().isoformat()
train_writer = tf.summary.FileWriter(dir_summary, sess.graph)
merged_summary = tf.summary.merge_all()
def train_step(x_batch, y_batch):
"""
A single training step.
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch}
summary, _, step, loss = sess.run(
[merged_summary, train_op, global_step, model.loss],
feed_dict)
train_writer.add_summary(summary, step)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {}".format(time_str, step, loss))
# Generate batches.
batch_generator = du.batch_iter(DATA_DIRECTORY, BATCH_SIZE, 200000)
# Training loops.
while True:
x_text, y_text = zip(*batch_generator.next())
x_batch = [" ".join(four_words) for four_words in x_text]
x_batch = vocab_processor_x.transform(x_batch) # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
y_batch = vocab_processor.transform(y_text) # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]
x_batch = np.array([x for x in x_batch])
y_batch = np.array([y for y in y_batch])
# Pad sentences of variable lengths.
y_batch = np.concatenate((y_batch, np.zeros((len(y_batch), LENGTH_MAX - len(y_batch[1])))), 1)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % SAVE_EVERY == 0:
path = saver.save(sess, checkpoint_path, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
# Unfinished test function. Use with precaution.
def testing():
tf.reset_default_graph()
with tf.Session() as sess:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# Definition of x_data, y_data for the definition of the model.
x_data = [['i']*4]*4
y_data = ['man eat dog <<EOS>> help <<EOS>> pie',
'man eat dog <<EOS>> fit <<EOS>> pile',
'man eat dog <<EOS>> form <<EOS>> lip',
'man eat dog god <<EOS>> bye <<EOS>> plot']
# Creation of the vocabulary
max_document_length = max([len(x.split(" ")) for x in y_data])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
vocab_processor.fit(y_data)
vocab_processor_x = learn.preprocessing.VocabularyProcessor(4, vocabulary=vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
#print(vocab_processor.vocabulary_._mapping) # print all vocabulary
# Definition model
# Create model.
print('Creating model...')
model = Regression(
number_of_words = len(x_data[0]),
sequence_length = LENGTH_MAX,
vocab_size = len(vocab_processor.vocabulary_),
embedding_size = 3)
# Define Training procedure.
print('training procedure')
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(0.001)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Initialize.
print('Initialize...')
sess.run(tf.global_variables_initializer())
print('End of initialization.')
def train_step(x_batch, y_batch):
"""
A single training step.
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
}
_, step, loss = sess.run(
[train_op, global_step, model.loss],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {}".format(time_str, step, loss))
# Training loops
while True:
x_text = (('man', 'dog', 'eat', 'pie'),
('man', 'dog', 'eat', 'pile'),
('man', 'dog', 'eat', 'lip'),
('man', 'dog', 'eat', 'plot'))
y_text = ('man eat dog <<EOS>> help <<EOS>> pie',
'man eat dog <<EOS>> fit <<EOS>> pile',
'man eat dog <<EOS>> form <<EOS>> lip',
'man eat dog god <<EOS>> bye <<EOS>> plot')
x_batch = [" ".join(four_words) for four_words in x_text]
x_batch = vocab_processor_x.transform(x_batch) # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
y_batch = vocab_processor.transform(y_text) # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]
x_batch = np.array([x for x in x_batch])
y_batch = np.array([y for y in y_batch])
# Padding
y_batch = np.concatenate((y_batch, np.zeros((len(y_batch), LENGTH_MAX - len(y_batch[1])))), 1)
train_step(x_batch, y_batch)
def using(four_words_in_a_tuple):
# Load data.
print('Loading data...')
try: ## TODO: change try-except with is_file..
with gfile.Open(MODEL_DIR + '/data', 'rb') as f:
x_data, y_data = pickle.loads(f.read())
print(' Old data found in {}.'.format(MODEL_DIR+'/data'))
except:
print("I cannot continue: no data has been found in {}.".format(MODEL_DIR+'/data'))
return
# Load and save vocabulary.
print('Loading vocabulary...')
try:
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(MODEL_DIR+'/vocab')
print(" Old vocabulary found in {}.".format(MODEL_DIR+'/vocab'))
except:
print("I cannot continue: no vocabulary has been found in {}.".format(MODEL_DIR+'/vocab'))
return
vocab_processor_x = learn.preprocessing.VocabularyProcessor(4, vocabulary=vocab_processor.vocabulary_)
with tf.Graph().as_default() as graph:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# Create model.
print('Creating model...')
model = Regression(
number_of_words = len(x_data[0]),
sequence_length = LENGTH_MAX,
vocab_size = len(vocab_processor.vocabulary_),
embedding_size = EMBEDDING_SIZE)
# Checkpoint directory.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.Session(graph=graph) as sess:
# Initialize.
print('Initializing...')
sess.run(tf.global_variables_initializer())
# Maybe restore model parameters.
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + '.index'):
print("Restoring model parameters from %s." % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("I cannot continue: no checkpoint has been found in {}.".format(ckpt.model_checkpoint_path))
return
def test_step(x_batch, y_batch):
"""
A single training step.
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch}
scores = sess.run([model.scores],feed_dict)
return scores
x_text, y_text = zip(*[[four_words_in_a_tuple, 'help <<EOS>> help <<EOS>> help']])
x_batch = [" ".join(four_words) for four_words in x_text]
x_batch = vocab_processor_x.transform(x_batch) # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
y_batch = vocab_processor.transform(y_text) # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]
x_batch = np.array([x for x in x_batch])
y_batch = np.array([y for y in y_batch])
# Padding
y_batch = np.concatenate((y_batch, np.zeros((len(y_batch), LENGTH_MAX - len(y_batch[0])))), 1)
scores = test_step(x_batch, y_batch)
return scores
if __name__ == '__main__':
#test()
training()
#
# scores = using(('animal', 'time', 'feed', 'witch'))
# print('scores: {}'.format(scores))
#
# with open('scores', 'wb') as f:
# pickle.dump(scores, f)
|
|
"""Easy to use python subprocess interface."""
from easyprocess.unicodeutil import split_command, unidecode, uniencode
import logging
import os.path
import platform
import signal
import subprocess
import tempfile
import threading
import time
from easyprocess.about import __version__
log = logging.getLogger(__name__)
log.debug('version=%s', __version__)
SECTION_LINK = 'link'
POLL_TIME = 0.1
USE_POLL = 0
class EasyProcessError(Exception):
def __init__(self, easy_process, msg=''):
self.easy_process = easy_process
self.msg = msg
def __str__(self):
return self.msg + ' ' + repr(self.easy_process)
template = '''cmd=%s
OSError=%s
Program install error! '''
class EasyProcessCheckInstalledError(Exception):
"""This exception is raised when a process run by check() returns
a non-zero exit status or OSError is raised.
"""
def __init__(self, easy_process):
self.easy_process = easy_process
def __str__(self):
msg = template % (self.easy_process.cmd,
self.easy_process.oserror,
)
if self.easy_process.url:
msg += '\nhome page: ' + self.easy_process.url
if platform.dist()[0].lower() == 'ubuntu':
if self.easy_process.ubuntu_package:
msg += '\nYou can install it in terminal:\n'
msg += 'sudo apt-get install %s' % self.easy_process.ubuntu_package
return msg
class EasyProcess(object):
'''
.. module:: easyprocess
simple interface for :mod:`subprocess`
shell is not supported (shell=False)
.. warning::
unicode is supported only for string list command (Python2.x)
(check :mod:`shlex` for more information)
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:param cwd: working directory
:param use_temp_files: use temp files instead of pipes for
stdout and stderr,
pipes can cause deadlock in some cases
(see unit tests)
:param env: If *env* is not ``None``, it must be a mapping that defines the environment
variables for the new process; these are used instead of inheriting the current
process' environment, which is the default behavior.
(check :mod:`subprocess` for more information)
'''
def __init__(self, cmd, ubuntu_package=None, url=None, cwd=None, use_temp_files=True, env=None):
self.use_temp_files = use_temp_files
self._outputs_processed = False
self.env = env
self.popen = None
self.stdout = None
self.stderr = None
self._stdout_file = None
self._stderr_file = None
self.url = url
self.ubuntu_package = ubuntu_package
self.is_started = False
self.oserror = None
self.cmd_param = cmd
self._thread = None
# self.max_bytes_to_log = max_bytes_to_log
self._stop_thread = False
self.timeout_happened = False
self.cwd = cwd
cmd = split_command(cmd)
self.cmd = cmd
self.cmd_as_string = ' '.join(self.cmd) # TODO: not perfect
log.debug('param: "%s" ', self.cmd_param)
log.debug('command: %s', self.cmd)
log.debug('joined command: %s', self.cmd_as_string)
if not len(cmd):
raise EasyProcessError(self, 'empty command!')
def __repr__(self):
msg = '<%s cmd_param=%s cmd=%s oserror=%s return_code=%s stdout="%s" stderr="%s" timeout_happened=%s>' % (
self.__class__.__name__,
self.cmd_param,
self.cmd,
self.oserror,
self.return_code,
self.stdout,
self.stderr,
self.timeout_happened,
)
return msg
@property
def pid(self):
'''
PID (:attr:`subprocess.Popen.pid`)
:rtype: int
'''
if self.popen:
return self.popen.pid
@property
def return_code(self):
'''
returncode (:attr:`subprocess.Popen.returncode`)
:rtype: int
'''
if self.popen:
return self.popen.returncode
def check(self, return_code=0):
"""Run command with arguments. Wait for command to complete. If the
exit code was as expected and there is no exception then return,
otherwise raise EasyProcessError.
:param return_code: int, expected return code
:rtype: self
"""
ret = self.call().return_code
ok = ret == return_code
if not ok:
raise EasyProcessError(
self, 'check error, return code is not zero!')
return self
def check_installed(self):
"""Used for testing if program is installed.
Run command with arguments. Wait for command to complete.
If OSError raised, then raise :class:`EasyProcessCheckInstalledError`
with information about program installation
:param return_code: int, expected return code
:rtype: self
"""
try:
self.call()
except Exception:
raise EasyProcessCheckInstalledError(self)
return self
def call(self, timeout=None):
"""Run command with arguments. Wait for command to complete.
same as:
1. :meth:`start`
2. :meth:`wait`
3. :meth:`stop`
:rtype: self
"""
self.start().wait(timeout=timeout)
if self.is_alive():
self.stop()
return self
def start(self):
"""start command in background and does not wait for it.
:rtype: self
"""
if self.is_started:
raise EasyProcessError(self, 'process was started twice!')
if self.use_temp_files:
self._stdout_file = tempfile.TemporaryFile(prefix='stdout_')
self._stderr_file = tempfile.TemporaryFile(prefix='stderr_')
stdout = self._stdout_file
stderr = self._stderr_file
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
cmd = list(map(uniencode, self.cmd))
try:
self.popen = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
cwd=self.cwd,
env=self.env,
)
except OSError, oserror:
log.debug('OSError exception: %s', oserror)
self.oserror = oserror
raise EasyProcessError(self, 'start error')
self.is_started = True
log.debug('process was started (pid=%s)', self.pid)
return self
def is_alive(self):
'''
poll process using :meth:`subprocess.Popen.poll`
:rtype: bool
'''
if self.popen:
return self.popen.poll() is None
else:
return False
def wait(self, timeout=None):
"""Wait for command to complete.
Timeout:
- discussion: http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self
"""
if timeout is not None:
if not self._thread:
self._thread = threading.Thread(target=self._wait4process)
self._thread.daemon = 1
self._thread.start()
if self._thread:
self._thread.join(timeout=timeout)
self.timeout_happened = self.timeout_happened or self._thread.isAlive()
else:
# no timeout and no existing thread
self._wait4process()
return self
def _wait4process(self):
if self._outputs_processed:
return
def remove_ending_lf(s):
if s.endswith('\n'):
return s[:-1]
else:
return s
if self.popen:
if self.use_temp_files:
if USE_POLL:
while 1:
if self.popen.poll() is not None:
break
if self._stop_thread:
return
time.sleep(POLL_TIME)
else:
# wait() blocks process, timeout not possible
self.popen.wait()
self._outputs_processed = True
self._stdout_file.seek(0)
self._stderr_file.seek(0)
self.stdout = self._stdout_file.read()
self.stderr = self._stderr_file.read()
self._stdout_file.close()
self._stderr_file.close()
else:
# This will deadlock when using stdout=PIPE and/or stderr=PIPE
# and the child process generates enough output to a pipe such
# that it blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that.
# self.popen.wait()
# self.stdout = self.popen.stdout.read()
# self.stderr = self.popen.stderr.read()
# communicate() blocks process, timeout not possible
self._outputs_processed = True
(self.stdout, self.stderr) = self.popen.communicate()
log.debug('process has ended')
self.stdout = remove_ending_lf(unidecode(self.stdout))
self.stderr = remove_ending_lf(unidecode(self.stderr))
log.debug('return code=%s', self.return_code)
# def limit_str(s):
# if len(s) > self.max_bytes_to_log:
# warn = '[middle of output was removed, max_bytes_to_log=%s]'%(self.max_bytes_to_log)
# s = s[:self.max_bytes_to_log / 2] + warn + s[-self.max_bytes_to_log / 2:]
# return s
log.debug('stdout=%s', self.stdout)
log.debug('stderr=%s', self.stderr)
def stop(self):
"""Kill process and wait for command to complete.
same as:
1. :meth:`sendstop`
2. :meth:`wait`
:rtype: self
"""
return self.sendstop().wait()
def sendstop(self):
'''
Kill process (:meth:`subprocess.Popen.terminate`).
Do not wait for command to complete.
:rtype: self
'''
if not self.is_started:
raise EasyProcessError(self, 'process was not started!')
log.debug('stopping process (pid=%s cmd="%s")', self.pid, self.cmd)
if self.popen:
if self.is_alive():
log.debug('process is active -> sending SIGTERM')
try:
try:
self.popen.terminate()
except AttributeError:
os.kill(self.popen.pid, signal.SIGKILL)
except OSError, oserror:
log.debug('exception in terminate:%s', oserror)
else:
log.debug('process was already stopped')
else:
log.debug('process was not started')
return self
def sleep(self, sec):
'''
sleeping (same as :func:`time.sleep`)
:rtype: self
'''
time.sleep(sec)
return self
def wrap(self, func, delay=0):
'''
returns a function which:
1. start process
2. call func, save result
3. stop process
4. returns result
similar to :keyword:`with` statement
:rtype:
'''
def wrapped():
self.start()
if delay:
self.sleep(delay)
x = None
try:
x = func()
except OSError, oserror:
log.debug('OSError exception:%s', oserror)
self.oserror = oserror
raise EasyProcessError(self, 'wrap error!')
finally:
self.stop()
return x
return wrapped
def __enter__(self):
'''used by the :keyword:`with` statement'''
self.start()
return self
def __exit__(self, *exc_info):
'''used by the :keyword:`with` statement'''
self.stop()
def extract_version(txt):
"""This function tries to extract the version from the help text of any
program."""
words = txt.replace(',', ' ').split()
version = None
for x in reversed(words):
if len(x) > 2:
if x[0].lower() == 'v':
x = x[1:]
if '.' in x and x[0].isdigit():
version = x
break
return version
Proc = EasyProcess
|
|
"""
Default settings for the ``mezzanine.core`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ADMIN_MENU_ORDER",
description=_("Controls the ordering and grouping of the admin menu."),
editable=False,
default=(
(_("Content"), ("pages.Page", "blog.BlogPost",
"generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
(_("Site"), ("sites.Site", "redirects.Redirect", "conf.Setting")),
(_("Users"), ("auth.User", "auth.Group",)),
),
)
register_setting(
name="ADMIN_MENU_COLLAPSED",
description=_("Controls whether or not the left-hand admin menu is "
"collpased by default."),
editable=True,
default=False,
)
register_setting(
name="ADMIN_REMOVAL",
description=_("Unregister these models from the admin."),
editable=False,
default=(),
)
register_setting(
name="ADMIN_THUMB_SIZE",
description=_("Size of thumbnail previews for image fields in the "
"admin interface."),
editable=False,
default="24x24",
)
register_setting(
name="AKISMET_API_KEY",
label=_("Akismet API Key"),
description=_("Key for http://akismet.com spam filtering service. Used "
"for filtering comments and forms."),
editable=True,
default="",
)
register_setting(
name="BITLY_ACCESS_TOKEN",
label=_("bit.ly access token"),
description=_("Access token for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="CACHE_SET_DELAY_SECONDS",
description=_("Mezzanine's caching uses a technique know as mint "
"caching. This is where the requested expiry for a cache entry "
"is stored with the cache entry in cache, and the real expiry "
"used has the ``CACHE_SET_DELAY`` added to it. Then on a cache get, "
"the store expiry is checked, and if it has passed, the cache entry "
"is set again, and no entry is returned. This tries to ensure that "
"cache misses never occur, and if many clients were to get a cache "
"miss at once, only one would actually need to re-generated the "
"cache entry."),
editable=False,
default=30,
)
if "mezzanine.blog" in settings.INSTALLED_APPS:
dashboard_tags = (
("blog_tags.quick_blog", "mezzanine_tags.app_list"),
("comment_tags.recent_comments",),
("mezzanine_tags.recent_actions",),
)
else:
dashboard_tags = (
("mezzanine_tags.app_list",),
("mezzanine_tags.recent_actions",),
(),
)
register_setting(
name="DASHBOARD_TAGS",
description=_("A three item sequence, each containing a sequence of "
"template tags used to render the admin dashboard."),
editable=False,
default=dashboard_tags,
)
register_setting(
name="DEVICE_DEFAULT",
description=_("Device specific template sub-directory to use as the "
"default device."),
editable=False,
default="",
)
register_setting(
name="DEVICE_USER_AGENTS",
description=_("Mapping of device specific template sub-directory names to "
"the sequence of strings that may be found in their user agents."),
editable=False,
default=(
("mobile", ("2.0 MMP", "240x320", "400X240", "AvantGo", "BlackBerry",
"Blazer", "Cellphone", "Danger", "DoCoMo", "Elaine/3.0",
"EudoraWeb", "Googlebot-Mobile", "hiptop", "IEMobile",
"KYOCERA/WX310K", "LG/U990", "MIDP-2.", "MMEF20", "MOT-V",
"NetFront", "Newt", "Nintendo Wii", "Nitro", "Nokia",
"Opera Mini", "Palm", "PlayStation Portable", "portalmmm",
"Proxinet", "ProxiNet", "SHARP-TQ-GX10", "SHG-i900",
"Small", "SonyEricsson", "Symbian OS", "SymbianOS",
"TS21i-10", "UP.Browser", "UP.Link", "webOS", "Windows CE",
"WinWAP", "YahooSeeker/M1A1-R2D2", "iPhone", "iPod", "Android",
"BlackBerry9530", "LG-TU915 Obigo", "LGE VX", "webOS",
"Nokia5800",)
),
),
)
register_setting(
name="FORMS_USE_HTML5",
description=_("If ``True``, website forms will use HTML5 features."),
editable=False,
default=False,
)
register_setting(
name="EMAIL_FAIL_SILENTLY",
description=_("If ``True``, failures to send email will happen "
"silently, otherwise an exception is raised. "
"Defaults to ``settings.DEBUG``."),
editable=False,
default=settings.DEBUG,
)
register_setting(
name="EXTRA_MODEL_FIELDS",
description=_("A sequence of fields that will be injected into "
"Mezzanine's (or any library's) models. Each item in the sequence is "
"a four item sequence. The first two items are the dotted path to the "
"model and its field name to be added, and the dotted path to the "
"field class to use for the field. The third and fourth items are a "
"sequence of positional args and a dictionary of keyword args, to use "
"when creating the field instance. When specifying the field class, "
"the path ``django.models.db.`` can be omitted for regular Django "
"model fields."),
editable=False,
default=(),
)
register_setting(
name="GOOGLE_ANALYTICS_ID",
label=_("Google Analytics ID"),
description=_("Google Analytics ID (http://www.google.com/analytics/)"),
editable=True,
default="",
)
register_setting(
name="HOST_THEMES",
description=_("A sequence mapping host names to themes, allowing "
"different templates to be served per HTTP host. "
"Each item in the sequence is a two item sequence, "
"containing a host such as ``othersite.example.com``, and "
"the name of an importable Python package for the theme. "
"If the host is matched for a request, the templates "
"directory inside the theme package will be first searched "
"when loading templates."),
editable=False,
default=(),
)
register_setting(
name="INLINE_EDITING_ENABLED",
description=_("If ``True``, front-end inline editing will be enabled."),
editable=False,
default=True,
)
register_setting(
name="JQUERY_FILENAME",
label=_("Name of the jQuery file."),
description=_("Name of the jQuery file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-1.7.1.min.js",
)
register_setting(
name="JQUERY_UI_FILENAME",
label=_("Name of the jQuery UI file."),
description=_("Name of the jQuery UI file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-ui-1.9.1.custom.min.js",
)
register_setting(
name="MAX_PAGING_LINKS",
label=_("Max paging links"),
description=_("Max number of paging links to display when paginating."),
editable=True,
default=10,
)
register_setting(
name="MEDIA_LIBRARY_PER_SITE",
label=_("Media library per site"),
description=_("If ``True``, each site will use its own directory within "
"the filebrowser media library."),
editable=False,
default=False,
)
register_setting(
name="OWNABLE_MODELS_ALL_EDITABLE",
description=_("Models that subclass ``Ownable`` and use the "
"``OwnableAdmin`` have their admin change-list records filtered "
"down to records owned by the current user. This setting contains a "
"sequence of models in the format ``app_label.object_name``, that "
"when subclassing ``Ownable``, will still show all records in the "
"admin change-list interface, regardless of the current user."),
editable=False,
default=(),
)
register_setting(
name="RICHTEXT_WIDGET_CLASS",
description=_("Dotted package path and class name of the widget to use "
"for the ``RichTextField``."),
editable=False,
default="mezzanine.core.forms.TinyMceWidget",
)
register_setting(
name="RICHTEXT_ALLOWED_TAGS",
description=_("List of HTML tags that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("a", "abbr", "acronym", "address", "area", "article", "aside",
"b", "bdo", "big", "blockquote", "br", "button", "caption", "center",
"cite", "code", "col", "colgroup", "dd", "del", "dfn", "dir", "div",
"dl", "dt", "em", "fieldset", "figure", "font", "footer", "form",
"h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "i", "img",
"input", "ins", "kbd", "label", "legend", "li", "map", "menu",
"nav", "ol", "optgroup", "option", "p", "pre", "q", "s", "samp",
"section", "select", "small", "span", "strike", "strong",
"sub", "sup", "table", "tbody", "td", "textarea",
"tfoot", "th", "thead", "tr", "tt", "u", "ul", "var", "wbr"),
)
register_setting(
name="RICHTEXT_ALLOWED_ATTRIBUTES",
description=_("List of HTML attributes that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("abbr", "accept", "accept-charset", "accesskey", "action",
"align", "alt", "axis", "border", "cellpadding", "cellspacing",
"char", "charoff", "charset", "checked", "cite", "class", "clear",
"cols", "colspan", "color", "compact", "coords", "datetime", "dir",
"disabled", "enctype", "for", "frame", "headers", "height", "href",
"hreflang", "hspace", "id", "ismap", "label", "lang", "longdesc",
"maxlength", "media", "method", "multiple", "name", "nohref",
"noshade", "nowrap", "prompt", "readonly", "rel", "rev", "rows",
"rowspan", "rules", "scope", "selected", "shape", "size", "span",
"src", "start", "style", "summary", "tabindex", "target", "title",
"type", "usemap", "valign", "value", "vspace", "width", "xml:lang"),
)
register_setting(
name="RICHTEXT_ALLOWED_STYLES",
description=_("List of inline CSS styles that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("margin-top", "margin-bottom", "margin-left", "margin-right",
"float", "vertical-align", "border", "margin"),
)
register_setting(
name="RICHTEXT_FILTERS",
description=_("List of dotted paths to functions, called in order, on a "
"``RichTextField`` value before it is rendered to the template."),
editable=False,
default=(),
)
RICHTEXT_FILTER_LEVEL_HIGH = 1
RICHTEXT_FILTER_LEVEL_LOW = 2
RICHTEXT_FILTER_LEVEL_NONE = 3
RICHTEXT_FILTER_LEVELS = (
(RICHTEXT_FILTER_LEVEL_HIGH, _("High")),
(RICHTEXT_FILTER_LEVEL_LOW, _("Low (allows video, iframe, Flash, etc)")),
(RICHTEXT_FILTER_LEVEL_NONE, _("No filtering")),
)
register_setting(
name="RICHTEXT_FILTER_LEVEL",
label=_("Rich Text filter level"),
description=_("*Do not change this setting unless you know what you're "
"doing.*\n\nWhen content is saved in a Rich Text (WYSIWYG) field, "
"unsafe HTML tags and attributes are stripped from the content to "
"protect against staff members intentionally adding code that could "
"be used to cause problems, such as changing their account to "
"a super-user with full access to the system.\n\n"
"This setting allows you to change the level of filtering that "
"occurs. Setting it to low will allow certain extra tags to be "
"permitted, such as those required for embedding video. While these "
"tags are not the main candidates for users adding malicious code, "
"they are still considered dangerous and could potentially be "
"mis-used by a particularly technical user, and so are filtered out "
"when the filtering level is set to high.\n\n"
"Setting the filtering level to no filtering, will disable all "
"filtering, and allow any code to be entered by staff members, "
"including script tags."),
editable=True,
choices=RICHTEXT_FILTER_LEVELS,
default=RICHTEXT_FILTER_LEVEL_HIGH,
)
register_setting(
name="SEARCH_MODEL_CHOICES",
description=_("Sequence of models that will be provided by default as "
"choices in the search form. Each model should be in the format "
"``app_label.model_name``. Only models that subclass "
"``mezzanine.core.models.Displayable`` should be used."),
editable=False,
default=("pages.Page", "blog.BlogPost"),
)
register_setting(
name="SEARCH_PER_PAGE",
label=_("Search results per page"),
description=_("Number of results shown in the search results page."),
editable=True,
default=10,
)
register_setting(
name="SITE_PREFIX",
description=_("A URL prefix for mounting all of Mezzanine's urlpatterns "
"under. When using this, you'll also need to manually apply it to "
"your project's root ``urls.py`` module. The root ``urls.py`` module "
"provided by Mezzanine's ``mezzanine-project`` command contains an "
"example of this towards its end."),
editable=False,
default="",
)
register_setting(
name="SITE_TITLE",
label=_("Site Title"),
description=_("Title that will display at the top of the site, and be "
"appended to the content of the HTML title tags on every page."),
editable=True,
default="Mezzanine",
)
register_setting(
name="SITE_TAGLINE",
label=_("Tagline"),
description=_("A tag line that will appear at the top of all pages."),
editable=True,
default=_("An open source content management platform."),
)
register_setting(
name="SLUGIFY",
description=_("Dotted Python path to the callable for converting "
"strings into URL slugs. Defaults to "
"``mezzanine.utils.urls.slugify_unicode`` which allows for non-ascii "
"URLs. Change to ``django.template.defaultfilters.slugify`` to use "
"Django's slugify function, or something of your own if required."),
editable=False,
default="mezzanine.utils.urls.slugify_unicode",
)
register_setting(
name="SPAM_FILTERS",
description=_("Sequence of dotted Python paths to callable functions "
"used for checking posted content (such as forms or comments) is "
"spam. Each function should accept three arguments: the request "
"object, the form object, and the URL that was posted from. "
"Defaults to ``mezzanine.utils.views.is_spam_akismet`` which will "
"use the http://akismet.com spam filtering service when the "
"``AKISMET_API_KEY`` setting is configured."),
editable=False,
default=("mezzanine.utils.views.is_spam_akismet",),
)
register_setting(
name="SSL_ENABLED",
label=_("Enable SSL"),
description=_("If ``True``, users will be automatically redirected to "
"HTTPS for the URLs specified by the ``SSL_FORCE_URL_PREFIXES`` "
"setting."),
editable=True,
default=False,
)
register_setting(
name="SSL_FORCE_HOST",
label=_("Force Host"),
description=_("Host name that the site should always be accessed via that "
"matches the SSL certificate."),
editable=True,
default="",
)
register_setting(
name="SSL_FORCE_URL_PREFIXES",
description="Sequence of URL prefixes that will be forced to run over "
"SSL when ``SSL_ENABLED`` is ``True``. i.e. "
"('/admin', '/example') would force all URLs beginning with "
"/admin or /example to run over SSL.",
editable=False,
default=("/admin", "/account"),
)
register_setting(
name="SSL_FORCED_PREFIXES_ONLY",
description=_("If ``True``, only URLs specified by the "
"``SSL_FORCE_URL_PREFIXES`` setting will be accessible over SSL, "
"and all other URLs will be redirected back to HTTP if accessed "
"over HTTPS."),
editable=False,
default=True,
)
register_setting(
name="STOP_WORDS",
description=_("List of words which will be stripped from search queries."),
editable=False,
default=(
"a", "about", "above", "above", "across", "after",
"afterwards", "again", "against", "all", "almost", "alone",
"along", "already", "also", "although", "always", "am",
"among", "amongst", "amoungst", "amount", "an", "and",
"another", "any", "anyhow", "anyone", "anything", "anyway",
"anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been",
"before", "beforehand", "behind", "being", "below", "beside",
"besides", "between", "beyond", "bill", "both", "bottom",
"but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do",
"done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough",
"etc", "even", "ever", "every", "everyone", "everything",
"everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly",
"forty", "found", "four", "from", "front", "full", "further",
"get", "give", "go", "had", "has", "hasnt", "have", "he",
"hence", "her", "here", "hereafter", "hereby", "herein",
"hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc",
"indeed", "interest", "into", "is", "it", "its", "itself",
"keep", "last", "latter", "latterly", "least", "less", "ltd",
"made", "many", "may", "me", "meanwhile", "might", "mill",
"mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "never",
"nevertheless", "next", "nine", "no", "nobody", "none",
"noone", "nor", "not", "nothing", "now", "nowhere", "of",
"off", "often", "on", "once", "one", "only", "onto", "or",
"other", "others", "otherwise", "our", "ours", "ourselves",
"out", "over", "own", "part", "per", "perhaps", "please",
"put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should",
"show", "side", "since", "sincere", "six", "sixty", "so",
"some", "somehow", "someone", "something", "sometime",
"sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves",
"then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they",
"thickv", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to",
"together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever",
"when", "whence", "whenever", "where", "whereafter", "whereas",
"whereby", "wherein", "whereupon", "wherever", "whether",
"which", "while", "whither", "who", "whoever", "whole", "whom",
"whose", "why", "will", "with", "within", "without", "would",
"yet", "you", "your", "yours", "yourself", "yourselves", "the",
),
)
register_setting(
name="TAG_CLOUD_SIZES",
label=_("Tag Cloud Sizes"),
description=_("Number of different sizes for tags when shown as a cloud."),
editable=True,
default=4,
)
register_setting(
name="TEMPLATE_ACCESSIBLE_SETTINGS",
description=_("Sequence of setting names available within templates."),
editable=False,
default=(
"ACCOUNTS_APPROVAL_REQUIRED", "ACCOUNTS_VERIFICATION_REQUIRED",
"ADMIN_MENU_COLLAPSED",
"BITLY_ACCESS_TOKEN", "BLOG_USE_FEATURED_IMAGE",
"COMMENTS_DISQUS_SHORTNAME", "COMMENTS_NUM_LATEST",
"COMMENTS_DISQUS_API_PUBLIC_KEY", "COMMENTS_DISQUS_API_SECRET_KEY",
"COMMENTS_USE_RATINGS", "DEV_SERVER", "FORMS_USE_HTML5",
"GRAPPELLI_INSTALLED", "GOOGLE_ANALYTICS_ID", "JQUERY_FILENAME",
"LOGIN_URL", "LOGOUT_URL", "SITE_TITLE", "SITE_TAGLINE", "USE_L10N",
),
)
register_setting(
name="THUMBNAILS_DIR_NAME",
description=_("Directory name to store thumbnails in, that will be "
"created relative to the original image's directory."),
editable=False,
default=".thumbnails",
)
register_setting(
name="TINYMCE_SETUP_JS",
description=_("URL for the JavaScript file (relative to ``STATIC_URL``) "
"that handles configuring TinyMCE when the default "
"``RICHTEXT_WIDGET_CLASS`` is used."),
editable=False,
default="mezzanine/js/tinymce_setup.js",
)
register_setting(
name="UPLOAD_TO_HANDLERS",
description=_("Dict mapping file field names in the format "
"``app_label.model_name.field_name`` to the Python dotted path "
"to function names that will be used for the file field's "
"``upload_to`` argument."),
editable=False,
default={},
)
# The following settings are defined here for documentation purposes
# as this file is used to auto-generate the documentation for all
# available settings. They are Mezzanine specific, but their values
# are *always* overridden by the project's settings or local_settings
# modules, so the default values defined here will never be used.
register_setting(
name="USE_SOUTH",
description=_("If ``True``, the south application will be "
"automatically added to the ``INSTALLED_APPS`` setting."),
editable=False,
default=True,
)
|
|
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import TYPE_CHECKING, Dict, List, Union, Tuple, Sequence, Optional, Type, Iterable, Any
from functools import partial
from electrum_mona.plugin import (BasePlugin, hook, Device, DeviceMgr, DeviceInfo,
assert_runs_in_hwd_thread, runs_in_hwd_thread)
from electrum_mona.i18n import _
from electrum_mona.bitcoin import is_address, opcodes
from electrum_mona.util import bfh, versiontuple, UserFacingException
from electrum_mona.transaction import TxOutput, Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_mona.bip32 import BIP32Node
from electrum_mona.storage import get_derivation_used_for_hw_device_encryption
from electrum_mona.keystore import Xpub, Hardware_KeyStore
if TYPE_CHECKING:
import threading
from electrum_mona.wallet import Abstract_Wallet
from electrum_mona.base_wizard import BaseWizard
class HW_PluginBase(BasePlugin):
keystore_class: Type['Hardware_KeyStore']
libraries_available: bool
# define supported library versions: minimum_library <= x < maximum_library
minimum_library = (0,)
maximum_library = (float('inf'),)
DEVICE_IDS: Iterable[Any]
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
self._ignore_outdated_fw = False
def is_enabled(self):
return True
def device_manager(self) -> 'DeviceMgr':
return self.parent.device_manager
def create_device_from_hid_enumeration(self, d: dict, *, product_key) -> Optional['Device']:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
device = Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid')
return device
@hook
def close_wallet(self, wallet: 'Abstract_Wallet'):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
if keystore.thread:
keystore.thread.stop()
def scan_and_create_client_for_device(self, *, device_id: str, wizard: 'BaseWizard') -> 'HardwareClientBase':
devmgr = self.device_manager()
client = wizard.run_task_without_blocking_gui(
task=partial(devmgr.client_by_id, device_id))
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
return client
def setup_device(self, device_info: DeviceInfo, wizard: 'BaseWizard', purpose) -> 'HardwareClientBase':
"""Called when creating a new wallet or when using the device to decrypt
an existing wallet. Select the device to use. If the device is
uninitialized, go through the initialization process.
Runs in GUI thread.
"""
raise NotImplementedError()
def get_client(self, keystore: 'Hardware_KeyStore', force_pair: bool = True, *,
devices: Sequence['Device'] = None,
allow_user_interaction: bool = True) -> Optional['HardwareClientBase']:
devmgr = self.device_manager()
handler = keystore.handler
client = devmgr.client_for_keystore(self, handler, keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
return client
def show_address(self, wallet: 'Abstract_Wallet', address, keystore: 'Hardware_KeyStore' = None):
pass # implemented in child classes
def show_address_helper(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not is_address(address):
keystore.handler.show_error(_('Invalid Bitcoin Address'))
return False
if not wallet.is_mine(address):
keystore.handler.show_error(_('Address not in wallet.'))
return False
if type(keystore) != self.keystore_class:
return False
return True
def get_library_version(self) -> str:
"""Returns the version of the 3rd party python library
for the hw wallet. For example '0.9.0'
Returns 'unknown' if library is found but cannot determine version.
Raises 'ImportError' if library is not found.
Raises 'LibraryFoundButUnusable' if found but there was some problem (includes version num).
"""
raise NotImplementedError()
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown'
or versiontuple(library_version) < self.minimum_library
or versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), version_str(self.maximum_library)))
self.logger.warning(self.libraries_available_message)
return False
return True
def get_library_not_available_message(self) -> str:
if hasattr(self, 'libraries_available_message'):
message = self.libraries_available_message
else:
message = _("Missing libraries for {}.").format(self.name)
message += '\n' + _("Make sure you install it with python3")
return message
def set_ignore_outdated_fw(self):
self._ignore_outdated_fw = True
def is_outdated_fw_ignored(self) -> bool:
return self._ignore_outdated_fw
def create_client(self, device: 'Device',
handler: Optional['HardwareHandlerBase']) -> Optional['HardwareClientBase']:
raise NotImplementedError()
def get_xpub(self, device_id: str, derivation: str, xtype, wizard: 'BaseWizard') -> str:
raise NotImplementedError()
def create_handler(self, window) -> 'HardwareHandlerBase':
# note: in Qt GUI, 'window' is either an ElectrumWindow or an InstallWizard
raise NotImplementedError()
def can_recognize_device(self, device: Device) -> bool:
"""Whether the plugin thinks it can handle the given device.
Used for filtering all connected hardware devices to only those by this vendor.
"""
return device.product_key in self.DEVICE_IDS
class HardwareClientBase:
handler = None # type: Optional['HardwareHandlerBase']
def __init__(self, *, plugin: 'HW_PluginBase'):
assert_runs_in_hwd_thread()
self.plugin = plugin
def device_manager(self) -> 'DeviceMgr':
return self.plugin.device_manager()
def is_pairable(self) -> bool:
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def timeout(self, cutoff) -> None:
pass
def is_initialized(self) -> bool:
"""True if initialized, False if wiped."""
raise NotImplementedError()
def label(self) -> Optional[str]:
"""The name given by the user to the device.
Note: labels are shown to the user to help distinguish their devices,
and they are also used as a fallback to distinguish devices programmatically.
So ideally, different devices would have different labels.
"""
# When returning a constant here (i.e. not implementing the method in the way
# it is supposed to work), make sure the return value is in electrum.plugin.PLACEHOLDER_HW_CLIENT_LABELS
return " "
def get_soft_device_id(self) -> Optional[str]:
"""An id-like string that is used to distinguish devices programmatically.
This is a long term id for the device, that does not change between reconnects.
This method should not prompt the user, i.e. no user interaction, as it is used
during USB device enumeration (called for each unpaired device).
Stored in the wallet file.
"""
# This functionality is optional. If not implemented just return None:
return None
def has_usable_connection_with_device(self) -> bool:
raise NotImplementedError()
def get_xpub(self, bip32_path: str, xtype) -> str:
raise NotImplementedError()
@runs_in_hwd_thread
def request_root_fingerprint_from_device(self) -> str:
# digitalbitbox (at least) does not reveal xpubs corresponding to unhardened paths
# so ask for a direct child, and read out fingerprint from that:
child_of_root_xpub = self.get_xpub("m/0'", xtype='standard')
root_fingerprint = BIP32Node.from_xkey(child_of_root_xpub).fingerprint.hex().lower()
return root_fingerprint
@runs_in_hwd_thread
def get_password_for_storage_encryption(self) -> str:
# note: using a different password based on hw device type is highly undesirable! see #5993
derivation = get_derivation_used_for_hw_device_encryption()
xpub = self.get_xpub(derivation, "standard")
password = Xpub.get_pubkey_from_xpub(xpub, ()).hex()
return password
def device_model_name(self) -> Optional[str]:
"""Return the name of the model of this device, which might be displayed in the UI.
E.g. for Trezor, "Trezor One" or "Trezor T".
"""
return None
def manipulate_keystore_dict_during_wizard_setup(self, d: dict) -> None:
"""Called during wallet creation in the wizard, before the keystore
is constructed for the first time. 'd' is the dict that will be
passed to the keystore constructor.
"""
pass
class HardwareHandlerBase:
"""An interface between the GUI and the device handling logic for handling I/O."""
win = None
device: str
def get_wallet(self) -> Optional['Abstract_Wallet']:
if self.win is not None:
if hasattr(self.win, 'wallet'):
return self.win.wallet
def get_gui_thread(self) -> Optional['threading.Thread']:
if self.win is not None:
if hasattr(self.win, 'gui_thread'):
return self.win.gui_thread
def update_status(self, paired: bool) -> None:
pass
def query_choice(self, msg: str, labels: Sequence[str]) -> Optional[int]:
raise NotImplementedError()
def yes_no_question(self, msg: str) -> bool:
raise NotImplementedError()
def show_message(self, msg: str, on_cancel=None) -> None:
raise NotImplementedError()
def show_error(self, msg: str, blocking: bool = False) -> None:
raise NotImplementedError()
def finished(self) -> None:
pass
def get_word(self, msg: str) -> str:
raise NotImplementedError()
def get_passphrase(self, msg: str, confirm: bool) -> Optional[str]:
raise NotImplementedError()
def get_pin(self, msg: str, *, show_strength: bool = True) -> str:
raise NotImplementedError()
def is_any_tx_output_on_change_branch(tx: PartialTransaction) -> bool:
return any([txout.is_change for txout in tx.outputs()])
def trezor_validate_op_return_output_and_get_data(output: TxOutput) -> bytes:
validate_op_return_output(output)
script = output.scriptpubkey
if not (script[0] == opcodes.OP_RETURN and
script[1] == len(script) - 2 and script[1] <= 75):
raise UserFacingException(_("Only OP_RETURN scripts, with one constant push, are supported."))
return script[2:]
def validate_op_return_output(output: TxOutput, *, max_size: int = None) -> None:
script = output.scriptpubkey
if script[0] != opcodes.OP_RETURN:
raise UserFacingException(_("Only OP_RETURN scripts are supported."))
if max_size is not None and len(script) > max_size:
raise UserFacingException(_("OP_RETURN payload too large." + "\n"
+ f"(scriptpubkey size {len(script)} > {max_size})"))
if output.value != 0:
raise UserFacingException(_("Amount for OP_RETURN output must be zero."))
def get_xpubs_and_der_suffixes_from_txinout(tx: PartialTransaction,
txinout: Union[PartialTxInput, PartialTxOutput]) \
-> List[Tuple[str, List[int]]]:
xfp_to_xpub_map = {xfp: bip32node for bip32node, (xfp, path)
in tx.xpubs.items()} # type: Dict[bytes, BIP32Node]
xfps = [txinout.bip32_paths[pubkey][0] for pubkey in txinout.pubkeys]
try:
xpubs = [xfp_to_xpub_map[xfp] for xfp in xfps]
except KeyError as e:
raise Exception(f"Partial transaction is missing global xpub for "
f"fingerprint ({str(e)}) in input/output") from e
xpubs_and_deriv_suffixes = []
for bip32node, pubkey in zip(xpubs, txinout.pubkeys):
xfp, path = txinout.bip32_paths[pubkey]
der_suffix = list(path)[bip32node.depth:]
xpubs_and_deriv_suffixes.append((bip32node.to_xpub(), der_suffix))
return xpubs_and_deriv_suffixes
def only_hook_if_libraries_available(func):
# note: this decorator must wrap @hook, not the other way around,
# as 'hook' uses the name of the function it wraps
def wrapper(self: 'HW_PluginBase', *args, **kwargs):
if not self.libraries_available: return None
return func(self, *args, **kwargs)
return wrapper
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
self.library_version = library_version
class OutdatedHwFirmwareException(UserFacingException):
def text_ignore_old_fw_and_continue(self) -> str:
suffix = (_("The firmware of your hardware device is too old. "
"If possible, you should upgrade it. "
"You can ignore this error and try to continue, however things are likely to break.") + "\n\n" +
_("Ignore and continue?"))
if str(self):
return str(self) + "\n\n" + suffix
else:
return suffix
|
|
#!/usr/bin/env python3
import os
import json
import sqlite3
from pathlib import Path
import tkinter as tk
import tkinter.scrolledtext as tkst
from tkinter import ttk
from tkinter import font
from tkinter import Menu
from tkinter import IntVar
from tkinter import PhotoImage
from tkinter import messagebox
from tkinter import Checkbutton
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
import csv_handler
import database_handler
import facesheet
class RestorativeJusticeApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
with open('app_files/defaults.json', 'r') as jsonFile:
self.defaults = json.load(jsonFile)
self.wm_title('Restorative Justice App -- Produced by Scott Frasier')
img = PhotoImage(file='app_files/icon.gif')
self.tk.call('wm', 'iconphoto', self._w, img)
container = tk.Frame(self)
container.pack(side='top', fill='both', expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = dict()
self.frames['ButtonFrame'] = ButtonFrame(parent=container, controller=self)
self.frames['OutputFrame'] = OutputFrame(parent=container, controller=self)
self.frames['SelectionFrame'] = SelectionFrame(parent=container, controller=self)
self.frames['ButtonFrame'].grid(row=1, column=0, sticky='nsew')
self.frames['OutputFrame'].grid(row=0, column=0, sticky='nsew')
self.frames['SelectionFrame'].grid(row=0, column=1, rowspan=3, sticky='nsew')
self.frames['ButtonFrame'].config()
self.frames['OutputFrame'].config()
self.frames['SelectionFrame'].config()
menubar = MenuBar(self)
self.config(menu=menubar, pady=10, padx=10)
self.AppLogic = AppLogic(controller=self)
db_path = Path('app_files/app_db.sqlite3')
if db_path.is_file():
self.db = sqlite3.connect('app_files/app_db.sqlite3')
self.cursor = self.db.cursor()
else:
if messagebox.askokcancel('Database not found', 'Would you like to create a database?'):
self.db = sqlite3.connect('app_files/app_db.sqlite3')
self.cursor = self.db.cursor()
database_handler.create_table(self.cursor)
self.db.commit()
else:
quit()
class MenuBar(tk.Menu):
def __init__(self, controller):
tk.Menu.__init__(self, controller)
self.controller = controller
############################## Sub-Menu ##############################
file_menu = tk.Menu(self, activeborderwidth=1, tearoff=False)
self.add_cascade(label='File', menu=file_menu)
file_menu.add_command(label='Exit', command=self.quit)
############################## Sub-Menu ##############################
defaults = tk.Menu(self, activeborderwidth=1, tearoff=False)
self.add_cascade(label='Defaults', menu=defaults)
defaults.add_command(label='Display current defaults', command=self.display_defualts)
defaults.add_separator()
defaults.add_command(label='Make current selections default', command=self.change_defaults)
defaults.add_separator()
defaults.add_command(label='Restore deaults', command=self.restore_defaults)
############################## Sub-Menu ##############################
receipt_menu = tk.Menu(self, activeborderwidth=1, tearoff=False)
self.add_cascade(label='Receipt', menu=receipt_menu)
receipt_menu.add_command(label='Save receipt', command=self.save_receipt)
############################## Helper Functions ##########################
def save_receipt(self):
receipt_path = askdirectory(title='Save the Receipt?')
rows = database_handler.receipt(self.controller.cursor)
csv_handler.write_receipt(receipt_path, rows)
def display_defualts(self):
self.controller.frames[
'OutputFrame'].update_output_text('-' * 80 + '\n')
for default in self.controller.defaults['DEFAULT_LIST']:
self.controller.frames['OutputFrame'].update_output_text(default + '\n')
self.controller.frames['OutputFrame'].update_output_text('-' * 80 + '\n\n')
def change_defaults(self):
self.controller.defaults['DEFAULT_LIST'] = self.controller.frames[
'SelectionFrame'].current_selection()
with open('app_files/defaults.json', 'w') as jsonFile:
jsonFile.write(json.dumps(self.controller.defaults))
def restore_defaults(self):
self.controller.defaults['DEFAULT_LIST'] = self.controller.defaults['RESTORE']
with open('app_files/defaults.json', 'w') as jsonFile:
jsonFile.write(json.dumps(self.controller.defaults))
class OutputFrame(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
############################# UI Elements ############################
self.output = tkst.ScrolledText(self, wrap='word', bg='#000000', foreground='#00ff00')
############################### LAYOUT ###############################
self.output.pack(fill='both', expand='yes')
self.update_output_text('Hello.\n\n')
############################## Helper Functions ##########################
def update_output_text(self, message):
self.output.config(state='normal')
self.output.insert('insert', message)
self.output.see('end')
self.output.config(state='disabled')
class ButtonFrame(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
############################# UI Elements ############################
self.select_button = ttk.Button(
self, text='Select', command=self.get_path)
self.run_button = ttk.Button(
self, text='Run', command=lambda: self.controller.AppLogic.run())
self.check_var1 = IntVar()
self.check_var2 = IntVar()
self.check_var1.set(1)
self.check_var2.set(1)
check_box1 = Checkbutton(self, text='Create Face-Sheets', variable=self.check_var1,
onvalue=1, offvalue=0, height=5, width=15)
check_box2 = Checkbutton(self, text='File by District', variable=self.check_var2,
onvalue=1, offvalue=0, height=5, width=15)
############################### LAYOUT ###############################
pad = 10
self.run_button.pack(side='right', pady=pad, padx=pad)
self.select_button.pack(side='right', pady=pad, padx=pad)
self.run_button.config(state='disabled')
check_box1.pack(side='left')
check_box2.pack(side='left')
############################## Helper Functions ##########################
def get_path(self):
file_types = [('csv file ending with .csv', '*.csv'), ]
lerms_report = askopenfilename(filetypes=file_types)
if lerms_report:
self.controller.AppLogic.report_selected(lerms_report)
class SelectionFrame(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
mfont = font.Font(family='times', size=12)
width = mfont.measure('')
############################# UI Elements ############################
scrollbar = tk.Scrollbar(self, orient='vertical')
self.listbox = tk.Listbox(self, yscrollcommand=scrollbar.set, selectmode='multiple',
width=width, bg='#000000', foreground='#00ff00', selectbackground='#00ff00')
self.listbox.config(yscrollcommand=scrollbar.set,)
scrollbar.config(command=self.listbox.yview)
############################### LAYOUT ###############################
scrollbar.pack(side='right', fill='y')
self.listbox.pack(fill='both', expand='1')
self.update_list([' Incident Types '])
############################## Helper Functions ##########################
def update_list(self, selection_list):
self.listbox.delete(0, 'end')
selection_list = list(selection_list)
selection_list.sort()
for item in selection_list:
self.listbox.insert('end', item)
for default in self.controller.defaults['DEFAULT_LIST']:
if default in selection_list:
self.listbox.select_set(selection_list.index(default))
def current_selection(self):
selected = [self.listbox.get(item)
for item in self.listbox.curselection()]
return selected
class AppLogic(tk.Frame):
def __init__(self, controller):
self.controller = controller
self.controller.frames['OutputFrame'].update_output_text(
'Please select the Restorative Justice Excel file generated from LERMs.\n\n')
def report_selected(self, path):
file_name = os.path.basename(path)
self.controller.frames['OutputFrame'].update_output_text(
f'You have selected {file_name} as the report generated from LERMs.\n\n')
rows = csv_handler.open_csv(path)
if rows:
database_handler.insert_rows(self.controller.cursor, rows)
self.controller.db.commit()
selection_list = database_handler.offense_types(self.controller.cursor)
self.controller.frames['SelectionFrame'].update_list(selection_list)
self.controller.frames['ButtonFrame'].run_button.config(state='normal')
self.controller.frames['OutputFrame'].update_output_text(
'Select the incident types to be considered and press the Run button.\n\n')
else:
if not messagebox.askokcancel('Bad Headers!', 'Select a new csv file?'):
quit()
def run(self):
offense_list = self.controller.frames['SelectionFrame'].current_selection()
database_handler.fileter_data(self.controller.cursor, offense_list)
self.controller.db.commit()
create_face = self.controller.frames['ButtonFrame'].check_var1.get()
file_by_district = self.controller.frames['ButtonFrame'].check_var2.get()
if create_face:
results_path = askdirectory(title='Save the Results?')
for row in database_handler.query_status(self.controller.cursor, 0):
database_handler.update_status(self.controller.cursor, 100, row[0])
if create_face:
facesheet.assemble_sheet(row, results_path, file_by_district)
self.controller.db.commit()
self.controller.frames['OutputFrame'].update_output_text('Finished...\n\n')
if __name__ == '__main__':
app = RestorativeJusticeApp()
app.mainloop()
|
|
"""Support for repeating alerts when conditions are met."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TITLE,
DOMAIN as DOMAIN_NOTIFY,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ENTITY_ID,
CONF_NAME,
CONF_REPEAT,
CONF_STATE,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers import event, service
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.util.dt import now
_LOGGER = logging.getLogger(__name__)
DOMAIN = "alert"
CONF_CAN_ACK = "can_acknowledge"
CONF_NOTIFIERS = "notifiers"
CONF_SKIP_FIRST = "skip_first"
CONF_ALERT_MESSAGE = "message"
CONF_DONE_MESSAGE = "done_message"
CONF_TITLE = "title"
CONF_DATA = "data"
DEFAULT_CAN_ACK = True
DEFAULT_SKIP_FIRST = False
ALERT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_STATE, default=STATE_ON): cv.string,
vol.Required(CONF_REPEAT): vol.All(
cv.ensure_list,
[vol.Coerce(float)],
# Minimum delay is 1 second = 0.016 minutes
[vol.Range(min=0.016)],
),
vol.Required(CONF_CAN_ACK, default=DEFAULT_CAN_ACK): cv.boolean,
vol.Required(CONF_SKIP_FIRST, default=DEFAULT_SKIP_FIRST): cv.boolean,
vol.Optional(CONF_ALERT_MESSAGE): cv.template,
vol.Optional(CONF_DONE_MESSAGE): cv.template,
vol.Optional(CONF_TITLE): cv.template,
vol.Optional(CONF_DATA): dict,
vol.Required(CONF_NOTIFIERS): cv.ensure_list,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(ALERT_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
ALERT_SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_ids})
def is_on(hass, entity_id):
"""Return if the alert is firing and not acknowledged."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up the Alert component."""
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg[CONF_NAME]
watched_entity_id = cfg[CONF_ENTITY_ID]
alert_state = cfg[CONF_STATE]
repeat = cfg[CONF_REPEAT]
skip_first = cfg[CONF_SKIP_FIRST]
message_template = cfg.get(CONF_ALERT_MESSAGE)
done_message_template = cfg.get(CONF_DONE_MESSAGE)
notifiers = cfg[CONF_NOTIFIERS]
can_ack = cfg[CONF_CAN_ACK]
title_template = cfg.get(CONF_TITLE)
data = cfg.get(CONF_DATA)
entities.append(
Alert(
hass,
object_id,
name,
watched_entity_id,
alert_state,
repeat,
skip_first,
message_template,
done_message_template,
notifiers,
can_ack,
title_template,
data,
)
)
if not entities:
return False
async def async_handle_alert_service(service_call):
"""Handle calls to alert services."""
alert_ids = await service.async_extract_entity_ids(hass, service_call)
for alert_id in alert_ids:
for alert in entities:
if alert.entity_id != alert_id:
continue
alert.async_set_context(service_call.context)
if service_call.service == SERVICE_TURN_ON:
await alert.async_turn_on()
elif service_call.service == SERVICE_TOGGLE:
await alert.async_toggle()
else:
await alert.async_turn_off()
# Setup service calls
hass.services.async_register(
DOMAIN,
SERVICE_TURN_OFF,
async_handle_alert_service,
schema=ALERT_SERVICE_SCHEMA,
)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_alert_service, schema=ALERT_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, async_handle_alert_service, schema=ALERT_SERVICE_SCHEMA
)
for alert in entities:
alert.async_write_ha_state()
return True
class Alert(ToggleEntity):
"""Representation of an alert."""
_attr_should_poll = False
def __init__(
self,
hass,
entity_id,
name,
watched_entity_id,
state,
repeat,
skip_first,
message_template,
done_message_template,
notifiers,
can_ack,
title_template,
data,
):
"""Initialize the alert."""
self.hass = hass
self._attr_name = name
self._alert_state = state
self._skip_first = skip_first
self._data = data
self._message_template = message_template
if self._message_template is not None:
self._message_template.hass = hass
self._done_message_template = done_message_template
if self._done_message_template is not None:
self._done_message_template.hass = hass
self._title_template = title_template
if self._title_template is not None:
self._title_template.hass = hass
self._notifiers = notifiers
self._can_ack = can_ack
self._delay = [timedelta(minutes=val) for val in repeat]
self._next_delay = 0
self._firing = False
self._ack = False
self._cancel = None
self._send_done_message = False
self.entity_id = f"{DOMAIN}.{entity_id}"
event.async_track_state_change_event(
hass, [watched_entity_id], self.watched_entity_change
)
@property
def state(self):
"""Return the alert status."""
if self._firing:
if self._ack:
return STATE_OFF
return STATE_ON
return STATE_IDLE
async def watched_entity_change(self, ev):
"""Determine if the alert should start or stop."""
to_state = ev.data.get("new_state")
if to_state is None:
return
_LOGGER.debug("Watched entity (%s) has changed", ev.data.get("entity_id"))
if to_state.state == self._alert_state and not self._firing:
await self.begin_alerting()
if to_state.state != self._alert_state and self._firing:
await self.end_alerting()
async def begin_alerting(self):
"""Begin the alert procedures."""
_LOGGER.debug("Beginning Alert: %s", self._attr_name)
self._ack = False
self._firing = True
self._next_delay = 0
if not self._skip_first:
await self._notify()
else:
await self._schedule_notify()
self.async_write_ha_state()
async def end_alerting(self):
"""End the alert procedures."""
_LOGGER.debug("Ending Alert: %s", self._attr_name)
self._cancel()
self._ack = False
self._firing = False
if self._send_done_message:
await self._notify_done_message()
self.async_write_ha_state()
async def _schedule_notify(self):
"""Schedule a notification."""
delay = self._delay[self._next_delay]
next_msg = now() + delay
self._cancel = event.async_track_point_in_time(
self.hass, self._notify, next_msg
)
self._next_delay = min(self._next_delay + 1, len(self._delay) - 1)
async def _notify(self, *args):
"""Send the alert notification."""
if not self._firing:
return
if not self._ack:
_LOGGER.info("Alerting: %s", self._attr_name)
self._send_done_message = True
if self._message_template is not None:
message = self._message_template.async_render(parse_result=False)
else:
message = self._attr_name
await self._send_notification_message(message)
await self._schedule_notify()
async def _notify_done_message(self, *args):
"""Send notification of complete alert."""
_LOGGER.info("Alerting: %s", self._done_message_template)
self._send_done_message = False
if self._done_message_template is None:
return
message = self._done_message_template.async_render(parse_result=False)
await self._send_notification_message(message)
async def _send_notification_message(self, message):
msg_payload = {ATTR_MESSAGE: message}
if self._title_template is not None:
title = self._title_template.async_render(parse_result=False)
msg_payload.update({ATTR_TITLE: title})
if self._data:
msg_payload.update({ATTR_DATA: self._data})
_LOGGER.debug(msg_payload)
for target in self._notifiers:
await self.hass.services.async_call(
DOMAIN_NOTIFY, target, msg_payload, context=self._context
)
async def async_turn_on(self, **kwargs):
"""Async Unacknowledge alert."""
_LOGGER.debug("Reset Alert: %s", self._attr_name)
self._ack = False
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Async Acknowledge alert."""
_LOGGER.debug("Acknowledged Alert: %s", self._attr_name)
self._ack = True
self.async_write_ha_state()
async def async_toggle(self, **kwargs):
"""Async toggle alert."""
if self._ack:
return await self.async_turn_on()
return await self.async_turn_off()
|
|
__author__ = 'saeedamen'
from pythalesians.util.loggermanager import LoggerManager
from datetime import datetime
class TimeSeriesRequest:
# properties
#
# data_source eg. bbg, yahoo, quandl
# start_date
# finish_date
# tickers (can be list) eg. EURUSD
# category (eg. fx, equities, fixed_income, cal_event, fundamental)
# freq_mult (eg. 1)
# freq
# gran_freq (minute, daily, hourly, daily, weekly, monthly, yearly)
# fields (can be list)
# vendor_tickers (optional)
# vendor_fields (optional)
# cache_algo (eg. internet, disk, memory) - internet will forcibly download from the internet
# environment (eg. prod, backtest) - old data is saved with prod, backtest will overwrite the last data point
def __init__(self, data_source = None,
start_date = None, finish_date = None, tickers = None, category = None, freq_mult = None, freq = None,
gran_freq = None, cut = None,
fields = None, cache_algo = None,
vendor_tickers = None, vendor_fields = None,
environment = "backtest", trade_side = 'trade'
):
self.logger = LoggerManager().getLogger(__name__)
self.freq_mult = 1
if data_source is not None: self.data_source = data_source
if start_date is not None: self.start_date = start_date
if finish_date is not None: self.finish_date = finish_date
if tickers is not None: self.tickers = tickers
if category is not None: self.category = category
if gran_freq is not None: self.gran_freq = gran_freq
if freq_mult is not None: self.freq_mult = freq_mult
if freq is not None: self.freq = freq
if cut is not None: self.cut = cut
if fields is not None: self.fields = fields
if cache_algo is not None: self.cache_algo = cache_algo
if vendor_tickers is not None: self.vendor_tickers = vendor_tickers
if vendor_fields is not None: self.vendor_fields = vendor_fields
if environment is not None: self.environment = environment
if trade_side is not None: self.trade_side = trade_side
@property
def data_source(self):
return self.__data_source
@data_source.setter
def data_source(self, data_source):
valid_data_source = ['ats', 'bloomberg', 'dukascopy', 'fred', 'gain', 'google', 'quandl', 'yahoo']
if not data_source in valid_data_source:
self.logger.warning(data_source & " is not a defined data source.")
self.__data_source = data_source
@property
def category(self):
return self.__category
@category.setter
def category(self, category):
self.__category = category
@property
def tickers(self):
return self.__tickers
@tickers.setter
def tickers(self, tickers):
if not isinstance(tickers, list):
tickers = [tickers]
self.__tickers = tickers
@property
def fields(self):
return self.__fields
@fields.setter
def fields(self, fields):
valid_fields = ['open', 'high', 'low', 'close', 'volume', 'numEvents']
if not isinstance(fields, list):
fields = [fields]
for field_entry in fields:
if not field_entry in valid_fields:
i = 0
# self.logger.warning(field_entry + " is not a valid field.")
# add error checking
self.__fields = fields
@property
def vendor_tickers(self):
return self.__vendor_tickers
@vendor_tickers.setter
def vendor_tickers(self, vendor_tickers):
if not isinstance(vendor_tickers, list):
vednor_tickers = [vendor_tickers]
self.__vendor_tickers = vendor_tickers
@property
def vendor_fields(self):
return self.__vendor_fields
@vendor_fields.setter
def vendor_fields(self, vendor_fields):
if not isinstance(vendor_fields, list):
vendor_fields = [vendor_fields]
self.__vendor_fields = vendor_fields
@property
def freq(self):
return self.__freq
@freq.setter
def freq(self, freq):
freq = freq.lower()
valid_freq = ['tick', 'intraday', 'daily']
if not freq in valid_freq:
self.logger.warning(freq & " is not a defined frequency")
self.__freq = freq
@property
def gran_freq(self):
return self.__gran_freq
@gran_freq.setter
def gran_freq(self, gran_freq):
gran_freq = gran_freq.lower()
valid_gran_freq = ['tick', 'minute', 'hourly', 'pseudodaily', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly']
if not gran_freq in valid_gran_freq:
self.logger.warning(gran_freq & " is not a defined frequency")
if gran_freq in ['minute', 'hourly']:
self.__freq = 'intraday'
elif gran_freq in ['tick']:
self.__freq = 'tick'
else:
self.__freq = 'daily'
self.__gran_freq = gran_freq
@property
def freq_mult(self):
return self.__freq_mult
@freq_mult.setter
def freq_mult(self, freq_mult):
self.__freq_mult = freq_mult
@property
def start_date(self):
return self.__start_date
@start_date.setter
def start_date(self, start_date):
self.__start_date = self.date_parser(start_date)
@property
def finish_date(self):
return self.__finish_date
@finish_date.setter
def finish_date(self, finish_date):
self.__finish_date = self.date_parser(finish_date)
@property
def cut(self):
return self.__cut
@cut.setter
def cut(self, cut):
self.__cut = cut
def date_parser(self, date):
if isinstance(date, str):
# format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'
try:
date = datetime.strptime(date, '%b %d %Y %H:%M')
except:
# self.logger.warning("Attempted to parse date")
i = 0
# format expected '1 Jun 2005 01:33', '%d %b %Y %H:%M'
try:
date = datetime.strptime(date, '%d %b %Y %H:%M')
except:
# self.logger.warning("Attempted to parse date")
i = 0
try:
date = datetime.strptime(date, '%b %d %Y')
except:
# self.logger.warning("Attempted to parse date")
i = 0
try:
date = datetime.strptime(date, '%d %b %Y')
except:
# self.logger.warning("Attempted to parse date")
i = 0
return date
@property
def cache_algo(self):
return self.__cache_algo
@cache_algo.setter
def cache_algo(self, cache_algo):
cache_algo = cache_algo.lower()
valid_cache_algo = ['internet_load', 'internet_load_return', 'cache_algo', 'cache_algo_return']
if not cache_algo in valid_cache_algo:
self.logger.warning(cache_algo + " is not a defined caching scheme")
self.__cache_algo = cache_algo
@property
def environment(self):
return self.__environment
@environment.setter
def environment(self, environment):
environment = environment.lower()
valid_environment= ['prod', 'backtest']
if not environment in valid_environment:
self.logger.warning(environment + " is not a defined environment.")
self.__environment = environment
@property
def trade_side(self):
return self.__trade_side
@trade_side.setter
def trade_side(self, trade_side):
trade_side = trade_side.lower()
valid_trade_side = ['trade', 'bid', 'ask']
if not trade_side in valid_trade_side:
self.logger.warning(trade_side + " is not a defined trade side.")
self.__trade_side = trade_side
|
|
#! /usr/bin/env python
import click
import re
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
# Usage something like:
# merge_callers.py -t TUMOR -n NORMAL -m1 MuTect1.vcf -m2 MuTect2.vcf -s strelka.vcf -i human_g1k_v37_decoy.fasta.fai
@click.command(context_settings = dict( help_option_names = ['-h', '--help'] ))
@click.option('--tumorID', '-t', type=str, help='Tumor sample ID', required=True)
@click.option('--normalID', '-n', type=str, help='Normal sample ID', required=True)
@click.option('--mutect1VCF', '-m1', type=str, help='MuTect1 VCF file', required=True)
@click.option('--mutect2VCF', '-m2', type=str, help='MuTect1 VCF file', required=True)
@click.option('--strelkaVCF', '-s', type=str, help='Strelka VCF file', required=True)
@click.option('--genomeIndex', '-i', type=str, help='Index of the used genome (generated by samtools faidx)', required=True)
def mergeVCFs(tumorid, normalid, mutect1vcf, mutect2vcf, strelkavcf, genomeindex):
# this is the main processing routine
mutect2=parse_mutect2(mutect2vcf)
mutect1=parse_mutect1(mutect1vcf,tumorid,normalid)
strelka=parse_strelka_snvs(strelkavcf)
generate_output(mutect1, mutect2, strelka, tumorid, normalid, genomeindex)
plot_allele_freqs(mutect1, mutect2, strelka, tumorid)
def plot_allele_freqs(mutect1, mutect2, strelka, tumorid):
#columns = ['MuTect1','MuTect2', 'Strelka', 'M1M2I_M1','M1M2I_M2' 'M1SI_M1', 'M1SI_S','M2SI_M2', 'M2SI_S','M1M2SI_M1','M1M2SI_M2','M1M2SI_S' ]
#columns = ['MuTect1_singletons','MuTect2_singletons', 'Strelka_singletons', 'M1M2I', 'M1SI', 'M2SI','M1M2SI']
columns = ['MuTect1_singletons','MuTect2_singletons','Strelka_singletons','MuTect1_all','MuTect2_all','Strelka_all','MuTect1_MuTect2','MuTect1_Strelka','MuTect2_Strelka','MuTect1_MuTect2_Strelka']
count = np.zeros((10), dtype=np.int)
#allele_freq=np.empty(12)
allele_freq=np.empty(10)
#allele_freq[:] = numpy.NAN
all_snvs=set(mutect1['snvs'].keys()+mutect2['snvs'].keys()+strelka['snvs'].keys())
antal=0
for pos in all_snvs:
#this_variant=np.empty(12)
this_variant=np.empty(10)
this_variant[:]=-999
vcfinfo = {}
#Which caller(s) detected the variant?
if pos in mutect1['snvs']:
vcfinfo['mutect1']=mutect1['snvs'][pos]['ad']['tumor']
if pos in mutect2['snvs']:
vcfinfo['mutect2']=mutect2['snvs'][pos]['ad']['tumor']
if pos in strelka['snvs']:
vcfinfo['strelka']=strelka['snvs'][pos]['ad']['tumor']
#Singletons
if 'mutect1' in vcfinfo.keys() and 'mutect2' not in vcfinfo.keys() and 'strelka' not in vcfinfo.keys():
this_variant[0]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
count[0]=count[0]+1
if 'mutect1' not in vcfinfo.keys() and 'mutect2' in vcfinfo.keys() and 'strelka' not in vcfinfo.keys():
this_variant[1]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
count[1]=count[1]+1
if this_variant[1]>1:
print this_variant[1]
print mutect2['snvs'][pos]['ad']['tumor']
if 'mutect1' not in vcfinfo.keys() and 'mutect2' not in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
this_variant[2]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
count[2]=count[2]+1
#All calles by callers
if 'mutect1' in vcfinfo.keys():
this_variant[3]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
count[3]=count[3]+1
if 'mutect2' in vcfinfo.keys():
this_variant[4]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
count[4]=count[4]+1
if 'strelka' in vcfinfo.keys():
this_variant[5]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
count[5]=count[5]+1
#Intersection of two callers - allele frequencies calculated as mean of reported for callers
if 'mutect1' in vcfinfo.keys() and 'mutect2' in vcfinfo.keys():
#this_variant[3]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[4]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
this_variant[6]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1])))/2
count[6]=count[6]+1
if 'mutect1' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[5]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[6]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[7]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/2
count[7]=count[7]+1
if 'mutect2' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[7]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
#this_variant[8]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[8]=(float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))+float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/2
count[8]=count[8]+1
#Intersection of three callers - allele frequencies calculated as mean of reported for callers
if 'mutect1' in vcfinfo.keys() and 'mutect2' in vcfinfo.keys() and 'strelka' in vcfinfo.keys():
#this_variant[9]=float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1]))
#this_variant[10]=float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1]))
#this_variant[11]=float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1]))
this_variant[9]=(float(vcfinfo['mutect1'].split(",")[1])/(float(vcfinfo['mutect1'].split(",")[0])+float(vcfinfo['mutect1'].split(",")[1])) + float(vcfinfo['mutect2'].split(",")[1])/(float(vcfinfo['mutect2'].split(",")[0])+float(vcfinfo['mutect2'].split(",")[1])) + float(vcfinfo['strelka'].split(",")[1])/(float(vcfinfo['strelka'].split(",")[0])+float(vcfinfo['strelka'].split(",")[1])))/3
count[9]=count[9]+1
allele_freq=np.vstack((allele_freq, this_variant))
#Mask NaNs in allele_freq
masked_allele_freq=np.ma.masked_equal(allele_freq,-999)
allele_freqs_nonempty = [[y for y in row if y] for row in masked_allele_freq.T]
#Create plots and print to PDF file
numBoxes=10
pp = PdfPages(tumorid+'_allele_freqs.pdf')
fig, ax1 = plt.subplots(figsize=(10, 6))
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
x=range(1, len(columns)+1)
bp = plt.boxplot(allele_freqs_nonempty, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('SNVs called in '+tumorid+'\n')
ax1.set_xlabel('Call set')
ax1.set_ylabel('Alternative allele frequency')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = 1.2
bottom = 0
ax1.set_ylim(bottom, top)
xtickNames = plt.setp(ax1, xticklabels=columns)
plt.setp(xtickNames, rotation=45, fontsize=8)
#Print counts and medians above the boxes
for tick, label in zip(x, count):
ax1.text(tick, 1.1, 'n = '+str(label),horizontalalignment='center', size='x-small')
median_values=[]
for medline in bp['medians']:
median_values.append(str(round(medline.get_ydata()[0],2)))
for tick, label in zip(x, median_values):
ax1.text(tick, 1, 'm = '+str(label),horizontalalignment='center', size='x-small')
plt.savefig(pp, format='pdf')
pp.close()
print 'printed results to '+tumorid+'_allele_freqs.pdf'
def generate_output(mutect1, mutect2, strelka, tumorid, normalid, genomeIndex):
snv_file=tumorid+'.snvs.vcf'
avinput=tumorid+'.avinput'
sf = open(snv_file, 'w')
ai = open(avinput, 'w')
sf.write("%s\n" %("##fileformat=VCFv4.2"))
sf.write("%s%s\n" %("##date=",str(datetime.now())))
sf.write("%s%s\n" %("##source=",sys.argv[0]))
sf.write("%s\n" %("##FILTER=<ID=CONCORDANT,Description=\"Called by all three callers (MuTect1, MuTect2 and Strelka)\""))
sf.write("%s\n" %("##FILTER=<ID=DISCORDANT,Description=\"NOT called by all three callers\""))
sf.write("%s\n" %("##INFO=<ID=M1,Number=.,Type=String,Description=\"Called by MuTect1\""))
sf.write("%s\n" %("##INFO=<ID=M2,Number=.,Type=String,Description=\"Called by MuTect2\""))
sf.write("%s\n" %("##INFO=<ID=S,Number=.,Type=String,Description=\"Called by Strelka\""))
sf.write("%s\n" %("##FORMAT=<ID=ADM1,Number=.,Type=Integer,Description=\"Allelic depths reported by MuTect1 for the ref and alt alleles in the order listed\""))
sf.write("%s\n" %("##FORMAT=<ID=ADM2,Number=.,Type=Integer,Description=\"Allelic depths reported by MuTect2 for the ref and alt alleles in the order listed\""))
sf.write("%s\n" %("##FORMAT=<ID=ADS,Number=.,Type=Integer,Description=\"Allelic depths reported by Strelka for the ref and alt alleles in the order listed\""))
sf.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %('#CHROM', 'POS','ID', 'REF', 'ALT','QUAL', 'FILTER', 'INFO','FORMAT', tumorid, normalid))
#All mutated snvs:
all_snvs=set(mutect1['snvs'].keys()+mutect2['snvs'].keys()+strelka['snvs'].keys())
antal=0
sorted_pos=sort_positions(all_snvs, genomeIndex)
for pos in sorted_pos:
#for pos in all_snvs:
vcfinfo = {}
#Which caller(s) detected the variant?
if pos in mutect1['snvs']:
vcfinfo['mutect1']=mutect1['snvs'][pos]['info']
if pos in mutect2['snvs']:
vcfinfo['mutect2']=mutect2['snvs'][pos]['info']
if pos in strelka['snvs']:
vcfinfo['strelka']=strelka['snvs'][pos]['info']
called_by=vcfinfo.keys()
#Do we have the same basic info from all callers? Should be...
if all(value == vcfinfo[called_by[0]] for value in vcfinfo.values()):
format=''
gf_tumor=''
gf_normal=''
callers=''
for c in called_by:
if c=='mutect1':
callers=callers+'M1;'
format=format+'ADM1:'
gf_tumor=gf_tumor+mutect1['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+mutect1['snvs'][pos]['ad']['normal']+':'
elif c=='mutect2':
callers=callers+'M2;'
format=format+'ADM2:'
gf_tumor=gf_tumor+mutect2['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+mutect2['snvs'][pos]['ad']['normal']+':'
elif c=='strelka':
callers=callers+'S;'
format=format+'ADS:'
gf_tumor=gf_tumor+strelka['snvs'][pos]['ad']['tumor']+':'
gf_normal=gf_normal+strelka['snvs'][pos]['ad']['normal']+':'
callers = callers[:-1]
format = format[:-1]
gf_tumor = gf_tumor[:-1]
gf_normal = gf_normal[:-1]
antal = antal+1
filter="DISCORDANT"
if len(called_by)==3:
filter="CONCORDANT"
vcfinfolist=vcfinfo[called_by[0]].split('\t')
baseinfo=vcfinfolist[0]+'\t'+vcfinfolist[1]+'\tNA\t'+vcfinfolist[2]+'\t'+vcfinfolist[3]+'\t'+'.'
sf.write("%s\t%s\t%s\t%s\t%s\t%s\n" %(baseinfo,filter, callers, format, gf_tumor, gf_normal))
ai.write("%s\n" %(vcfinfo[called_by[0]]))
else:
print "Conflict in ref and alt alleles between callers "+called_by+" at pos "+pos
def sort_positions(positions, genomeIndex):
CHROMOSOMES = []
selected = []
sorted = []
for line in open(genomeIndex, 'r'):
line = line.strip()
info = line.split("\t")
CHROMOSOMES.append(info[0])
selected.append([])
for pos in positions:
chr_pos=pos.split("_")
if chr_pos[0] in CHROMOSOMES:
selected[CHROMOSOMES.index(chr_pos[0])].append(int(chr_pos[1]))
for chr in CHROMOSOMES:
selected[CHROMOSOMES.index(chr)].sort()
for pos in selected[CHROMOSOMES.index(chr)]:
sorted.append(chr+'_'+str(pos))
return sorted
def parse_mutect2(vcf):
snvs = {}
indels = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for "TUMOR" and "NORMAL"
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in ['TUMOR', 'NORMAL']:
datacolumn[info[col]] = col
else:
print "ERROR: MuTect2 VCF file does not contain column for TUMOR or NORMAL"
break
if not line.startswith("#"):
filter1=re.compile('alt_allele_in_normal')
filter2=re.compile('clustered_events')
filter3=re.compile('germline_risk')
filter4=re.compile('homologous_mapping_event')
filter5=re.compile('multi_event_alt_allele_in_normal')
filter6=re.compile('panel_of_normals')
filter7=re.compile('str_contraction')
filter8=re.compile('t_lod_fstar')
filter9=re.compile('triallelic_site')
f1=filter1.search(line)
f2=filter2.search(line)
f3=filter3.search(line)
f4=filter4.search(line)
f5=filter5.search(line)
f6=filter6.search(line)
f7=filter7.search(line)
f8=filter8.search(line)
f9=filter9.search(line)
if not (f1 or f2 or f3 or f4 or f5 or f6 or f7 or f8 or f9):
info=line.split("\t")
pos=info[0]+'_'+info[1]
vcfinfo=info[0]+'\t'+info[1]+'\t'+info[3]+'\t'+info[4]
ad_tumor=info[datacolumn['TUMOR']].split(":")[1]
ad_normal=info[datacolumn['NORMAL']].split(":")[1]
ref=info[3]
alt=info[4]
alt_alleles = alt.split(",")
if len(alt_alleles) == 1:
#Indels
if len(ref)>1 or len(alt)>1:
indels[pos] = {}
indels[pos]['info']=vcfinfo
indels[pos]['ad'] = {}
indels[pos]['ad']['tumor']=ad_tumor
indels[pos]['ad']['normal']=ad_normal
#snvs
else:
snvs[pos] = {}
snvs[pos]['info']=vcfinfo
snvs[pos]['ad'] = {}
snvs[pos]['ad']['tumor']=ad_tumor
snvs[pos]['ad']['normal']=ad_normal
else:
print "WARNING: MuTect2 variant with multiple alternative alleles detected; skipped and not used in merged callset:"
print line
return {'indels':indels,'snvs':snvs}
def parse_mutect1(vcf, tumorid, normalid):
snvs = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for each sample
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in [tumorid, normalid]:
datacolumn[info[col]]=col
else:
print "ERROR: sample ids other than "+tumorid+" or "+normalid+" detected in MuTect1 vcf"
break
if not line.startswith("#"):
filter1=re.compile('REJECT')
f1=filter1.search(line)
if not (f1):
info=line.split("\t")
pos = info[0] + '_' + info[1]
vcfinfo = info[0] + '\t' + info[1] + '\t' + info[3] + '\t' + info[4]
ad_tumor = info[datacolumn[tumorid]].split(":")[1]
ad_normal = info[datacolumn[normalid]].split(":")[1]
alt=info[4]
alt_alleles=alt.split(",")
if len(alt_alleles) == 1:
snvs[pos] = {}
snvs[pos]['info']=vcfinfo
snvs[pos]['ad'] = {}
snvs[pos]['ad']['tumor']=ad_tumor
snvs[pos]['ad']['normal']=ad_normal
else:
print "WARNING: MuTect1 variant with multiple alternative alleles detected; skipped and not used in merged callset."
print line
return {'snvs':snvs}
def parse_strelka_snvs(vcf):
snvs = {}
datacolumn = {}
for line in open(vcf, 'r'):
line=line.strip()
# Extract column in vcf file for "TUMOR" and "NORMAL"
if line.startswith("#CHROM"):
info = line.split("\t")
for col in range(9, len(info)):
if info[col] in ['TUMOR', 'NORMAL']:
datacolumn[info[col]] = col
else:
print "ERROR: Strelka VCF file does not contain column for TUMOR or NORMAL"
break
if not line.startswith("#"):
info=line.split("\t")
pos=info[0]+'_'+info[1]
ref=info[3]
alt=info[4]
ad_normal = {}
ad_tumor = {}
#Using tiers 2 data
ad_normal['A']=int(info[datacolumn['NORMAL']].split(":")[4].split(",")[1])
ad_normal['C']=int(info[datacolumn['NORMAL']].split(":")[5].split(",")[1])
ad_normal['G']=int(info[datacolumn['NORMAL']].split(":")[6].split(",")[1])
ad_normal['T']=int(info[datacolumn['NORMAL']].split(":")[7].split(",")[1])
ad_tumor['A'] = int(info[datacolumn['TUMOR']].split(":")[4].split(",")[1])
ad_tumor['C'] = int(info[datacolumn['TUMOR']].split(":")[5].split(",")[1])
ad_tumor['G'] = int(info[datacolumn['TUMOR']].split(":")[6].split(",")[1])
ad_tumor['T'] = int(info[datacolumn['TUMOR']].split(":")[7].split(",")[1])
snvs[pos] = {}
snvs[pos]['ad'] = {}
# If several alternative alleles are detected in the tumor, report the most highly abundant one and print a warning message.
alt_allele=''
alt_depth_tumor = 0
alt_alt_normal = 0
alt_alleles=alt.split(",")
for allele in alt_alleles:
if ad_tumor[allele] > alt_depth_tumor:
alt_depth_tumor=ad_tumor[allele]
alt_depth_normal=ad_normal[allele]
alt_allele=allele
if len(alt) > 1:
print "WARNING: Strelka variant with multiple alternative alleles detected. Reporting the alternative allele with highest read count:"
print line
vcfinfo = info[0] + '\t' + info[1] + '\t' + info[3] + '\t' + alt_allele
snvs[pos]['info'] = vcfinfo
snvs[pos]['ad']['tumor']=str(ad_tumor[ref])+','+str(alt_depth_tumor)
snvs[pos]['ad']['normal']=str(ad_normal[ref])+','+str(alt_depth_normal)
return {'snvs':snvs}
if __name__ == "__main__":
mergeVCFs()
|
|
from modularodm import Q
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests import utils
from framework.auth.core import Auth
from tests.base import ApiTestCase
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
ProjectFactory,
InstitutionFactory
)
from osf_tests.utils import mock_archive
from website.models import MetaSchema
from website.project.model import ensure_schemas
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.search import search
class ApiSearchTestCase(ApiTestCase):
def setUp(self):
super(ApiSearchTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_one = AuthUserFactory(fullname='Kanye Omari West')
self.user_one.schools = [{
'degree': 'English',
'institution': 'Chicago State University'
}]
self.user_one.jobs = [{
'title': 'Producer',
'institution': 'GOOD Music, Inc.'
}]
self.user_one.save()
self.user_two = AuthUserFactory(fullname='Chance The Rapper')
self.institution = InstitutionFactory(name='Social Experiment')
self.user_two.affiliated_institutions.add(self.institution)
self.user_two.save()
# self.institution.save()
self.project = ProjectFactory(title='The Life of Pablo', creator=self.user_one, is_public=True)
self.project.set_description('Name one genius who ain\'t crazy', auth=Auth(self.user_one), save=True)
self.project.add_tag('Yeezus', auth=Auth(self.user_one), save=True)
self.project_two = ProjectFactory(title='Graduation', creator=self.user_one, is_public=True)
self.private_project = ProjectFactory(title='Coloring Book', creator=self.user_two)
self.component = NodeFactory(parent=self.project, title='Ultralight Beam', creator=self.user_two, is_public=True)
self.component.set_description('This is my part, nobody else speak', auth=Auth(self.user_two), save=True)
self.component.add_tag('trumpets', auth=Auth(self.user_two), save=True)
self.component_two = NodeFactory(parent=self.project, title='Highlights', creator=self.user_one, is_public=True)
self.private_component = NodeFactory(parent=self.project, title='Wavves', creator=self.user_one)
self.file = utils.create_test_file(self.component, self.user_one, filename='UltralightBeam.mp3')
self.file_two = utils.create_test_file(self.component_two, self.user_one, filename='Highlights.mp3')
self.private_file = utils.create_test_file(self.private_component, self.user_one, filename='Wavves.mp3')
def tearDown(self):
super(ApiSearchTestCase, self).tearDown()
search.delete_all()
class TestSearch(ApiSearchTestCase):
def setUp(self):
super(TestSearch, self).setUp()
self.url = '/{}search/'.format(API_BASE)
def test_search_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
search_fields = res.json['search_fields']
users_found = search_fields['users']['related']['meta']['total']
files_found = search_fields['files']['related']['meta']['total']
projects_found = search_fields['projects']['related']['meta']['total']
components_found = search_fields['components']['related']['meta']['total']
registrations_found = search_fields['registrations']['related']['meta']['total']
assert_equal(users_found, 3)
assert_equal(files_found, 2)
assert_equal(projects_found, 2)
assert_equal(components_found, 2)
assert_equal(registrations_found, 0)
def test_search_auth(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
search_fields = res.json['search_fields']
users_found = search_fields['users']['related']['meta']['total']
files_found = search_fields['files']['related']['meta']['total']
projects_found = search_fields['projects']['related']['meta']['total']
components_found = search_fields['components']['related']['meta']['total']
registrations_found = search_fields['registrations']['related']['meta']['total']
assert_equal(users_found, 3)
assert_equal(files_found, 2)
assert_equal(projects_found, 2)
assert_equal(components_found, 2)
assert_equal(registrations_found, 0)
def test_search_fields_links(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
search_fields = res.json['search_fields']
users_link = search_fields['users']['related']['href']
files_link = search_fields['files']['related']['href']
projects_link = search_fields['projects']['related']['href']
components_link = search_fields['components']['related']['href']
registrations_link = search_fields['registrations']['related']['href']
assert_in('/{}search/users/?q=%2A'.format(API_BASE), users_link)
assert_in('/{}search/files/?q=%2A'.format(API_BASE), files_link)
assert_in('/{}search/projects/?q=%2A'.format(API_BASE), projects_link)
assert_in('/{}search/components/?q=%2A'.format(API_BASE), components_link)
assert_in('/{}search/registrations/?q=%2A'.format(API_BASE), registrations_link)
def test_search_fields_links_with_query(self):
url = '{}?q=science'.format(self.url)
res = self.app.get(url)
assert_equal(res.status_code, 200)
search_fields = res.json['search_fields']
users_link = search_fields['users']['related']['href']
files_link = search_fields['files']['related']['href']
projects_link = search_fields['projects']['related']['href']
components_link = search_fields['components']['related']['href']
registrations_link = search_fields['registrations']['related']['href']
assert_in('/{}search/users/?q=science'.format(API_BASE), users_link)
assert_in('/{}search/files/?q=science'.format(API_BASE), files_link)
assert_in('/{}search/projects/?q=science'.format(API_BASE), projects_link)
assert_in('/{}search/components/?q=science'.format(API_BASE), components_link)
assert_in('/{}search/registrations/?q=science'.format(API_BASE), registrations_link)
class TestSearchComponents(ApiSearchTestCase):
def setUp(self):
super(TestSearchComponents, self).setUp()
self.url = '/{}search/components/'.format(API_BASE)
def test_search_public_component_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.component.title, res)
assert_in(self.component_two.title, res)
def test_search_public_component_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.component.title, res)
assert_in(self.component_two.title, res)
def test_search_public_component_contributor(self):
res = self.app.get(self.url, auth=self.user_two)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.component.title, res)
assert_in(self.component_two.title, res)
def test_search_private_component_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_not_in(self.private_component.title, res)
def test_search_private_component_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
assert_not_in(self.private_component.title, res)
def test_search_private_component_contributor(self):
res = self.app.get(self.url, auth=self.user_two)
assert_equal(res.status_code, 200)
assert_not_in(self.private_component.title, res)
def test_search_component_by_title(self):
url = '{}?q={}'.format(self.url, 'beam')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.component.title, res.json['data'][0]['attributes']['title'])
def test_search_component_by_description(self):
url = '{}?q={}'.format(self.url, 'speak')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.component.title, res.json['data'][0]['attributes']['title'])
def test_search_component_by_tags(self):
url = '{}?q={}'.format(self.url, 'trumpets')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.component.title, res.json['data'][0]['attributes']['title'])
def test_search_component_by_contributor(self):
url = '{}?q={}'.format(self.url, 'Chance')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.component.title, res.json['data'][0]['attributes']['title'])
def test_search_component_no_results(self):
url = '{}?q={}'.format(self.url, 'Ocean')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 0)
def test_search_component_bad_query(self):
url = '{}?q={}'.format(self.url, 'www.spam.com/help/twitter/')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
class TestSearchFiles(ApiSearchTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.url = '/{}search/files/'.format(API_BASE)
def test_search_public_file_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.file.name, res)
assert_in(self.file_two.name, res)
def test_search_public_file_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.file.name, res)
assert_in(self.file_two.name, res)
def test_search_public_file_contributor(self):
res = self.app.get(self.url, auth=self.user_one)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.file.name, res)
assert_in(self.file_two.name, res)
def test_search_private_file_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_not_in(self.private_file.name, res)
def test_search_private_file_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
assert_not_in(self.private_file.name, res)
def test_search_private_file_contributor(self):
res = self.app.get(self.url, auth=self.user_one)
assert_equal(res.status_code, 200)
assert_not_in(self.private_file.name, res)
def test_search_file_by_name(self):
url = '{}?q={}'.format(self.url, 'highlights')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.file_two.name, res.json['data'][0]['attributes']['name'])
class TestSearchProjects(ApiSearchTestCase):
def setUp(self):
super(TestSearchProjects, self).setUp()
self.url = '/{}search/projects/'.format(API_BASE)
def test_search_public_project_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.project.title, res)
assert_in(self.project_two.title, res)
def test_search_public_project_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.project.title, res)
assert_in(self.project_two.title, res)
def test_search_public_project_contributor(self):
res = self.app.get(self.url, auth=self.user_one)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.project.title, res)
assert_in(self.project_two.title, res)
def test_search_private_project_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_not_in(self.private_project.title, res)
def test_search_private_project_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
assert_not_in(self.private_project.title, res)
def test_search_private_project_contributor(self):
res = self.app.get(self.url, auth=self.user_two)
assert_equal(res.status_code, 200)
assert_not_in(self.private_project.title, res)
def test_search_project_by_title(self):
url = '{}?q={}'.format(self.url, 'pablo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.project.title, res.json['data'][0]['attributes']['title'])
def test_search_project_by_description(self):
url = '{}?q={}'.format(self.url, 'genius')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.project.title, res.json['data'][0]['attributes']['title'])
def test_search_project_by_tags(self):
url = '{}?q={}'.format(self.url, 'Yeezus')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.project.title, res.json['data'][0]['attributes']['title'])
def test_search_project_by_contributor(self):
url = '{}?q={}'.format(self.url, 'kanye')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.project.title, res)
assert_in(self.project_two.title, res)
def test_search_project_no_results(self):
url = '{}?q={}'.format(self.url, 'chicago')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 0)
def test_search_project_bad_query(self):
url = '{}?q={}'.format(self.url, 'www.spam.com/help/facebook/')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
class TestSearchRegistrations(ApiSearchTestCase):
def setUp(self):
super(TestSearchRegistrations, self).setUp()
self.url = '/{}search/registrations/'.format(API_BASE)
ensure_schemas()
self.schema = MetaSchema.find_one(
Q('name', 'eq', 'Replication Recipe (Brandt et al., 2013): Post-Completion') &
Q('schema_version', 'eq', LATEST_SCHEMA_VERSION)
)
with mock_archive(self.project, autocomplete=True, autoapprove=True, schema=self.schema) as registration:
self.registration = registration
with mock_archive(self.project_two, autocomplete=True, autoapprove=True,
schema=self.schema) as registration_two:
self.registration_two = registration_two
with mock_archive(self.private_project, autocomplete=True, autoapprove=True,
schema=self.schema) as private_registration:
self.private_registration = private_registration
self.private_registration.is_public = False
self.private_registration.save()
# TODO: This shouldn't be necessary, but tests fail if we don't do this. Investigate further.
self.private_registration.update_search()
def test_search_public_registration_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.registration.title, res)
assert_in(self.registration_two.title, res)
def test_search_public_registration_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.registration.title, res)
assert_in(self.registration_two.title, res)
def test_search_public_registration_contributor(self):
res = self.app.get(self.url, auth=self.user_one)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.registration.title, res)
assert_in(self.registration_two.title, res)
def test_search_private_registration_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_not_in(self.private_registration.title, res)
def test_search_private_registration_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
assert_not_in(self.private_registration.title, res)
def test_search_private_registration_contributor(self):
res = self.app.get(self.url, auth=self.user_two)
assert_equal(res.status_code, 200)
assert_not_in(self.private_registration.title, res)
def test_search_registration_by_title(self):
url = '{}?q={}'.format(self.url, 'graduation')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.registration_two.title, res.json['data'][0]['attributes']['title'])
def test_search_registration_by_description(self):
url = '{}?q={}'.format(self.url, 'crazy')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.registration.title, res.json['data'][0]['attributes']['title'])
def test_search_registration_by_tags(self):
url = '{}?q={}'.format(self.url, 'yeezus')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.registration.title, res.json['data'][0]['attributes']['title'])
def test_search_registration_by_contributor(self):
url = '{}?q={}'.format(self.url, 'west')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.registration.title, res)
assert_in(self.registration_two.title, res)
def test_search_registration_no_results(self):
url = '{}?q={}'.format(self.url, '79th')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 0)
def test_search_registration_bad_query(self):
url = '{}?q={}'.format(self.url, 'www.spam.com/help/snapchat/')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
class TestSearchUsers(ApiSearchTestCase):
def setUp(self):
super(TestSearchUsers, self).setUp()
self.url = '/{}search/users/'.format(API_BASE)
def test_search_users_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.user.fullname, res)
assert_in(self.user.fullname, res)
def test_search_users_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 2)
assert_in(self.user.fullname, res)
assert_in(self.user.fullname, res)
def test_search_users_by_given_name(self):
url = '{}?q={}'.format(self.url, 'Kanye')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.user_one.given_name, res.json['data'][0]['attributes']['given_name'])
def test_search_users_by_middle_name(self):
url = '{}?q={}'.format(self.url, 'Omari')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.user_one.middle_names[0], res.json['data'][0]['attributes']['middle_names'][0])
def test_search_users_by_family_name(self):
url = '{}?q={}'.format(self.url, 'West')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.user_one.family_name, res.json['data'][0]['attributes']['family_name'])
def test_search_users_by_job(self):
url = '{}?q={}'.format(self.url, 'producer')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.user_one.fullname, res.json['data'][0]['attributes']['full_name'])
def test_search_users_by_school(self):
url = '{}?q={}'.format(self.url, 'Chicago')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.user_one.fullname, res.json['data'][0]['attributes']['full_name'])
class TestSearchInstitutions(ApiSearchTestCase):
def setUp(self):
super(TestSearchInstitutions, self).setUp()
self.url = '/{}search/institutions/'.format(API_BASE)
def test_search_institutions_no_auth(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_in(self.institution.name, res)
def test_search_institutions_auth(self):
res = self.app.get(self.url, auth=self.user)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_in(self.institution.name, res)
def test_search_institutions_by_name(self):
url = '{}?q={}'.format(self.url, 'Social')
res = self.app.get(url)
assert_equal(res.status_code, 200)
num_results = len(res.json['data'])
total = res.json['links']['meta']['total']
assert_equal(num_results, total, 1)
assert_equal(self.institution.name, res.json['data'][0]['attributes']['name'])
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
__author__ = '[email protected] (Ben Vanik)'
import os
import string
import sublime
import sublime_plugin
import di
import views
PACKAGE_DIR = os.getcwdu()
# DEBUG: before possibly reloading the di module, we need to clean it up
views.cleanup_all()
di.cleanup_module()
# DEBUG: use reimport to update all modules that have changed - this is needed
# because Sublime Text will only reload modules in the plugin root
from third_party.reimport import reimport, modified
modified_modules = modified(os.path.relpath('di', PACKAGE_DIR))
if len(modified_modules):
print 'STDI: modules changed, reloading: %s' % (modified_modules)
reimport(*modified_modules)
# TODO(benvanik): move to SourceView
def _get_syntax_name(view):
"""Gets the name of the syntax used in the given view.
Args:
view: View.
Returns:
The name of the syntax used in the given view.
"""
syntax = view.settings().get('syntax')
return os.path.splitext(os.path.basename(syntax))[0]
class DebugPlugin(object):
"""
"""
def __init__(self, *args, **kwargs):
# Cached providers, mapped by provider URI
self._providers = {}
# Active debuggers, mapped by instance URI
self._debuggers = {}
# The debugger for each provider, mapped by provider URI
# TODO(benvanik): remove this - it limits things to one active session
self._debuggers_by_provider = {}
# Breakpoint list
breakpoint_file = os.path.join(sublime.packages_path(),
'..',
'Settings',
'Breakpoints.sublime_session')
self._breakpoint_listener = BreakpointListener(self)
self._breakpoint_list = di.load_breakpoint_list(breakpoint_file,
self._breakpoint_listener)
# Status manager
self._status_manager = StatusManager(self)
# Active location, if one is set
self._active_location = None
# All source views that exist, by view.id()
self._source_views = {}
# Scan all open views to build source views
# TODO(benvanik): find a way to prevent this
for window in sublime.windows():
for view in window.views():
self.get_source_view(view)
def debuggers(self):
return self._debuggers.values()
def breakpoint_list(self):
return self._breakpoint_list
def status_manager(self):
return self._status_manager
def show_status_message(self, value):
"""Shows a status message.
Args:
value: Message.
"""
self._status_manager.show_message(value)
def _get_provider_for_uri(self, uri):
"""Gets a debugging provider for a URI.
Creates or returns a cached provider.
Args:
uri: URI to get the provider for.
Returns:
An InstanceProvider, if one was found.
"""
if uri in self._providers:
provider = self._providers.get(uri)
else:
provider = di.create_provider(uri)
self._providers[uri] = provider
return provider
def launch_debugger(self, target_window, provider_uri, attach, callback=None):
"""Launches a debugger.
Args:
target_window: Target window.
provider_uri: Provider URI.
attach: True to attach to an existing instance.
callback: Callback that will receive the Debugger or None if it failed or
already existed.
"""
provider = self._get_provider_for_uri(provider_uri)
if not provider:
print 'STDI: no provider found for URI %s' % (provider_uri)
self.show_status_message('No provider found for URI %s' % (provider_uri))
sublime.set_timeout(lambda: callback(None))
return
if attach:
print 'DEBUG: would attach'
else:
print 'DEBUG: would launch'
# Query instances async
def _queried_instances(instance_infos):
if not len(instance_infos):
print 'STDI: no instances found on provider'
self.show_status_message('No debuggable instances found!')
callback(None)
if not provider.is_single_instance():
# Need to show a list
items = []
for instance_info in instance_infos:
items.append([
instance_info.display_name(),
instance_info.display_info(),
])
def _item_selected(index):
if index == -1:
callback(None)
return
instance_info = instance_infos[index]
self._attach_to_instance(target_window, instance_info)
target_window.show_quick_panel(
items, _item_selected, sublime.MONOSPACE_FONT)
else:
# Pick the only one we have
self._attach_to_instance(target_window, instance_infos[0])
provider.query_instances(_queried_instances)
def _attach_to_instance(self, target_window, instance_info):
"""Attaches to an instance.
Args:
target_window: Target window.
instance_info: Instance to attach to.
"""
# Ensure not running
if instance_info.uri() in self._debuggers:
return None
# Create
provider = instance_info.provider()
listener = DebuggerListener(self)
debugger = instance_info.attach_debugger(listener)
debugger.set_target_window(target_window)
self._debuggers[instance_info.uri()] = debugger
self._debuggers_by_provider[provider.uri()] = debugger
debugger.attach()
self._status_manager.show_message('Attaching debugger...')
self._status_manager.update()
return debugger
def get_debugger_provider_uri_for_view(self, view):
"""Gets the debugger provider URI for the given view.
Args:
view: View.
Returns:
A debugger provider URI, if one is defined.
"""
settings = view.settings()
return settings.get('debug_target', None)
def get_debugger_for_view(self, view):
"""Gets the active debugger for the given view.
Args:
view: View.
Returns:
A Debugger, if one is attached.
"""
provider_uri = self.get_debugger_provider_uri_for_view(view)
return self._debuggers_by_provider.get(provider_uri, None)
def remove_debugger(self, debugger):
"""Removes a debugger from the active table.
Args:
debugger: Debugger instance.
"""
instance_uri = debugger.instance_info().uri()
provider_uri = debugger.provider().uri()
if not instance_uri in self._debuggers:
return
del self._debuggers[instance_uri]
del self._debuggers_by_provider[provider_uri]
self._status_manager.update()
def translate_uri(self, uri):
"""Translates a URI to a source path.
Args:
uri: URI.
Returns:
A source path that can be used with ST or None if no mapping exists.
"""
# TODO(benvanik): translate uri (source map/etc)
return uri
def get_source_view(self, view, create=True):
"""Gets a SourceView for the given ST view.
Args:
view: ST view.
create: True to create if needed.
Returns:
A SourceView, created on demand if needed.
"""
source_view = self._source_views.get(view.id(), None)
if not source_view and create:
source_view = SourceView(self, view)
self._source_views[view.id()] = source_view
return source_view
def source_views_for_uri(self, uri):
"""Iterates all source views with the given URI.
Args:
uri: URI.
"""
translated_path = self.translate_uri(uri)
for source_view in self._source_views.values():
if source_view.file_name() == translated_path:
yield source_view
def cleanup_source_view(self, view):
"""Removes a SourceView for the given ST view.
Args:
view: ST view.
"""
source_view = self._source_views.get(view.id(), None)
if source_view:
source_view.cleanup()
del self._source_views[view.id()]
def active_location(self):
return self._active_location
def set_active_location(self, debugger, location):
"""Sets the active location, opening views and changing focus.
Args:
debugger: Debugger that is requesting the location change.
location: (uri, line, column) location.
"""
if not location:
self.clear_active_location()
return
(uri, line, column) = location
self._active_location = (uri, line, column)
translated_path = self.translate_uri(uri)
full_path = '%s:%s:%s' % (translated_path, line, column)
window = debugger.target_window()
new_view = window.open_file(full_path, sublime.ENCODED_POSITION |
0)#sublime.TRANSIENT)
new_view = self.get_source_view(new_view)
if not new_view.is_loading():
window.focus_view(new_view.view())
# Enumerate all views we know about and update them
# TODO(benvanik): faster - lookup by filename
for source_view in self._source_views.values():
if source_view.file_name() == translated_path:
source_view.set_active_location(location)
else:
source_view.clear_active_location()
def clear_active_location(self):
"""Clears the active location.
"""
if not self._active_location:
return
(uri, line, column) = self._active_location
for source_view in self.source_views_for_uri(uri):
source_view.clear_active_location()
self._active_location = None
class StatusManager(object):
"""Status UI manager.
Controls view and window status bars. Views should attach to the status
manager to get relevant information.
"""
def __init__(self, plugin, *args, **kwargs):
"""Initializes the status manager.
Args:
plugin: Parent plugin.
"""
self._plugin = plugin
def show_message(self, value):
"""Shows a temporary message in the status bar.
Args:
value: Message to show.
"""
sublime.status_message(value)
def show_error(self, value, ask_retry=False):
"""Shows an error message to the user.
This is an annoying popup, so use sparingly.
Args:
value: Message to show.
ask_retry: True to ask the user if they want to retry.
Returns:
True if the user chose to retry.
"""
if ask_retry:
return sublime.ok_cancel_dialog(value, 'Retry')
else:
sublime.error_message(value)
return False
def update(self):
"""Updates the active view, if any.
"""
view = sublime.active_window().active_view()
if view:
self.update_view(view)
def update_view(self, view):
"""Updates the status in the given view.
Args:
view: View.
"""
debugger = self._plugin.get_debugger_for_view(view)
if not debugger:
view.erase_status('stdi')
return
instance_info = debugger.instance_info()
message = 'Debugging %s' % (instance_info.uri())
view.set_status('stdi', message)
class SourceView(object):
"""A ST view wrapper that manages a single views overlays and state.
A DebugPlugin will manage these views, creating and deleting them as required,
to enable sensible control over the custom regions/etc added to a normal ST
view.
This type uses metaprogramming to make it act like an ST view (mostly).
"""
def __init__(self, plugin, view, *args, **kwargs):
"""Initializes a source view.
Args:
plugin: Parent plugin.
view: ST view.
"""
self._plugin = plugin
self._view = view
self._active_location = None
self._breakpoint_regions = {}
def __getattr__(self, name):
if hasattr(self._view, name):
return getattr(self._view, name)
raise AttributeError('Attribute %s not found' % (name))
def view(self):
return self._view
def cleanup(self):
"""Called before the view is disposed to cleanup all changes.
"""
self.erase_regions('stdi_view_active')
for key in self._breakpoint_regions.values():
self.erase_regions(key)
def on_load(self):
"""Called once the view has loaded.
"""
self.set_active_location(self._active_location)
if self._active_location:
self.window().focus_view(self._view)
def location_to_region(self, location):
"""Converts a location to a region.
Assumes the location is in the current view.
Args:
location: (uri, line, column) location.
Returns:
A sublime.Region.
"""
(uri, line, column) = location
self._active_location = (uri, line, column)
point = self.text_point(line - 1, column - 1)
return self.line(point)
def active_location(self):
return self._active_location
def set_active_location(self, location):
self.clear_active_location()
if not location:
return
region = self.location_to_region(location)
# Pick based on breakpoint/exception/etc
# TODO(benvanik): pick icon/style
scope = 'invalid' #'stdi.gutter.active_line'
icon = 'bookmark'
self.add_regions('stdi_view_active',
[region],
scope,
icon,
sublime.DRAW_EMPTY)
self.show(region.begin())
def clear_active_location(self):
if not self._active_location:
return
self.erase_regions('stdi_view_active')
self._active_location = None
def add_breakpoint(self, breakpoint):
location = breakpoint.location()
region = self.location_to_region(location)
# TODO(benvanik): pick icon/style
scope = 'stdi.gutter.breakpoint'
icon = 'dot'
key = 'stdi_view_breakpoint_%s' % (breakpoint.id())
self.add_regions(key,
[region],
scope,
icon,
sublime.HIDDEN)
self._breakpoint_regions[breakpoint.id()] = key
def change_breakpoint(self, breakpoint):
# Easy!
self.remove_breakpoint(breakpoint)
self.add_breakpoint(breakpoint)
def remove_breakpoint(self, breakpoint):
key = self._breakpoint_regions.get(breakpoint.id(), None)
if not key:
return
self.erase_regions(key)
del self._breakpoint_regions[breakpoint.id()]
class CallstackView(views.CustomView):
"""A view that models a callstack, displaying and handling frame navigation.
"""
def __init__(self, window, debugger, *args, **kwargs):
"""Initializes a callstack view.
Args:
window: Target sublime window.
debugger: Debugger.
"""
super(CallstackView, self).__init__(window, debugger, 'Callstack',
*args, **kwargs)
if window.num_groups() == 4:
window.set_view_index(self._view, 2, 0)
elif window.num_groups() > 1:
window.set_view_index(self._view, 1, 0)
def update(self, snapshot):
view = self.view()
view.set_read_only(False)
edit = view.begin_edit()
view.erase(edit, sublime.Region(0, view.size()))
frame_regions = []
frame_info_regions = []
source_info_regions = []
handle_set = snapshot.handle_set()
for frame in snapshot.frames():
location = frame.location()
s = '%s: %s' % (frame.ordinal(), frame.formatted_call(handle_set))
s = string.ljust(s, 120) + '\n'
view.insert(edit, view.size(), s)
frame_info_region = view.line(view.size() - 2)
frame_info_regions.append(frame_info_region)
s = ' %s@%s:%s\n' % (location[0], location[1], location[2])
view.insert(edit, view.size(), s)
source_info_region = view.line(view.size() - 2)
source_info_regions.append(source_info_region)
frame_regions.append(sublime.Region(frame_info_region.begin(),
source_info_region.end()))
# print ' is_constructor: %s' % (frame.is_constructor())
# print ' is_at_return: %s' % (frame.is_at_return())
# print ' function: %s' % (handle_set.get_value(frame.function_ref()))
# print ' this: %s' % (handle_set.get_value(frame.this_ref()))
# print ' arguments:'
# for var in frame.argument_refs():
# print ' %s = %s' % (var[0], handle_set.get_value(var[1]))
# Mark info regions
view.add_regions(
'stdi_callstack_frame_info',
frame_info_regions,
'string') #'stdi.callstack.frame_info',
# Mark source regions
view.add_regions(
'stdi_callstack_source_info',
source_info_regions,
'comment') #'stdi.callstack.source_info',
# Mark active frame
scope = 'stdi.gutter.breakpoint'
icon = 'dot'
view.add_regions(
'stdi_callstack_active_frame',
[frame_regions[0]],
'stdi.callstack.active_frame',
'dot',
sublime.HIDDEN)
view.end_edit(edit)
view.set_read_only(True)
class _VariableNode(views.TreeNode):
def __init__(self, view, debugger, handle_set, key, value, *args, **kwargs):
super(_VariableNode, self).__init__(view, *args, **kwargs)
self._debugger = debugger
self._handle_set = handle_set
self._key = key
self._value = value
assert value
def label(self):
return self._key
def description(self):
return str(self._value)
def has_children(self):
return (self._value.handle_type() == 'object' or
self._value.handle_type() == 'function')
def query_children(self, callback):
handle_ids = []
for p in self._value.properties():
handle_ids.append(p.ref())
def _on_query_values(handle_set):
nodes = []
for p in self._value.properties():
key = p.name()
value = handle_set.get_value(p.ref())
nodes.append(_VariableNode(
self.view(), self._debugger, handle_set, key, value))
callback(nodes)
self._debugger.query_values(handle_ids, _on_query_values)
class _ScopeNode(_VariableNode):
def __init__(self, view, debugger, handle_set, scope, *args, **kwargs):
value = handle_set.get_value(scope.object_ref())
super(_ScopeNode, self).__init__(view, debugger, handle_set,
scope.scope_name(), value, *args, **kwargs)
self._scope = scope
def description(self):
return None
def has_children(self):
return True
class _RootVariablesNode(views.TreeNode):
def __init__(self, view, debugger, *args, **kwargs):
super(_RootVariablesNode, self).__init__(view, *args, **kwargs)
self._debugger = debugger
self._handle_set = None
self._scopes = None
def label(self):
return 'Variables'
def description(self):
return None
def has_children(self):
return True if self._scopes else False
def query_children(self, callback):
nodes = []
if self._scopes:
for scope in self._scopes:
nodes.append(_ScopeNode(self.view(), self._debugger, self._handle_set,
scope))
callback(nodes)
def can_collapse(self):
return False
def update(self, handle_set, scopes):
self.set_expanded(False)
self._handle_set = handle_set
self._scopes = scopes
self.set_expanded(True)
class VariablesView(views.TreeView):
"""A view that displays scope variables.
"""
def __init__(self, window, debugger, *args, **kwargs):
"""Initializes a variables view.
Args:
window: Target sublime window.
debugger: Debugger.
"""
super(VariablesView, self).__init__(window, debugger, 'Variables',
*args, **kwargs)
if window.num_groups() == 4:
window.set_view_index(self._view, 3, 0)
elif window.num_groups() > 1:
window.set_view_index(self._view, 1, 0)
def update(self, snapshot):
debugger = self.debugger()
# TODO(benvanik); active frame
frame = snapshot.frames()[0]
def _on_frame_scopes(handle_set, scopes):
root_node = _RootVariablesNode(self.view(), self.debugger())
root_node.update(handle_set, scopes)
self.reset(root_node)
debugger.query_frame_scopes(frame, _on_frame_scopes)
class EventListener(sublime_plugin.EventListener):
def on_new(self, view):
plugin().get_source_view(view)
def on_clone(self, view):
plugin().get_source_view(view)
def on_load(self, view):
source_view = plugin().get_source_view(view, create=True)
if source_view:
source_view.on_load()
def on_close(self, view):
plugin().cleanup_source_view(view)
def on_post_save(self, view):
# Notify all active debuggers that the given file has changed - they can
# do what they want with that information
uri = view.file_name()
if not uri:
return
new_source = view.substr(sublime.Region(0, view.size()))
for debugger in plugin().debuggers():
debugger.change_source(uri, new_source)
def on_selection_modified(self, view):
custom_view = views.get_custom_view(view)
if custom_view:
custom_view.on_selection_modified()
def on_activated(self, view):
plugin().status_manager().update_view(view)
def on_deactivated(self, view):
plugin().status_manager().update_view(view)
class BreakpointListener(di.BreakpointListener):
"""Handles breakpoint list events.
"""
def __init__(self, plugin, *args, **kwargs):
super(BreakpointListener, self).__init__(*args, **kwargs)
self._plugin = plugin
def on_breakpoint_add(self, breakpoint):
print 'EVENT: on_breakpoint_add'
# Update all views
if breakpoint.type() == 'location':
location_uri = breakpoint.location()[0]
for source_view in plugin().source_views_for_uri(location_uri):
source_view.add_breakpoint(breakpoint)
# Update all debuggers
for debugger in plugin().debuggers():
debugger.add_breakpoint(breakpoint)
def on_breakpoint_change(self, breakpoint):
print 'EVENT: on_breakpoint_change'
if breakpoint.type() == 'location':
location_uri = breakpoint.location()[0]
for source_view in plugin().source_views_for_uri(location_uri):
source_view.change_breakpoint(breakpoint)
# Update all debuggers
for debugger in plugin().debuggers():
debugger.change_breakpoint(breakpoint)
def on_breakpoint_remove(self, breakpoint):
print 'EVENT: on_breakpoint_remove'
if breakpoint.type() == 'location':
location_uri = breakpoint.location()[0]
for source_view in plugin().source_views_for_uri(location_uri):
source_view.remove_breakpoint(breakpoint)
# Update all debuggers
for debugger in plugin().debuggers():
debugger.remove_breakpoint(breakpoint)
class DebuggerListener(di.DebuggerListener):
"""Handles debugger events.
"""
def __init__(self, plugin, *args, **kwargs):
super(DebuggerListener, self).__init__(*args, **kwargs)
self._plugin = plugin
self._callstack_view = None
self._variables_view = None
def on_attach(self, *args, **kwargs):
print 'EVENT: on_attach'
# Add all breakpoints
debugger = self.debugger()
breakpoint_list = plugin().breakpoint_list()
for breakpoint in breakpoint_list.breakpoints():
debugger.add_breakpoint(breakpoint)
def on_detach(self, reason, *args, **kwargs):
print 'EVENT: on_detach(%s)' % (reason)
plugin().remove_debugger(self.debugger())
plugin().clear_active_location()
if self._callstack_view:
self._callstack_view.close()
if self._variables_view:
self._variables_view.close()
status_manager = self._plugin.status_manager()
detach_message = 'Detached'
if reason:
detach_message += ': %s' % (reason)
status_manager.show_message(detach_message)
# TODO(benvanik): don't show errors, they are annoying
if reason:
status_manager.show_error(detach_message)
def on_suspend(self, *args, **kwargs):
print 'EVENT: on_suspend'
def on_resume(self, *args, **kwargs):
print 'EVENT: on_resume'
plugin().clear_active_location()
if self._callstack_view:
self._callstack_view.clear()
if self._variables_view:
self._variables_view.clear()
def on_snapshot(self, snapshot, *args, **kwargs):
print 'EVENT: on_snapshot'
handle_set = snapshot.handle_set()
for frame in snapshot.frames():
location = frame.location()
print 'frame %s: %s@%s:%s' % (frame.ordinal(), location[0],
location[1],
location[2])
print ' is_constructor: %s' % (frame.is_constructor())
print ' is_at_return: %s' % (frame.is_at_return())
print ' function: %s' % (handle_set.get_value(frame.function_ref()))
print ' this: %s' % (handle_set.get_value(frame.this_ref()))
print ' arguments:'
for var in frame.argument_refs():
print ' %s = %s' % (var[0], handle_set.get_value(var[1]))
print ' locals:'
for var in frame.local_refs():
print ' %s = %s' % (var[0], handle_set.get_value(var[1]))
debugger = self.debugger()
if not self._callstack_view:
self._callstack_view = CallstackView(sublime.active_window(), debugger)
self._callstack_view.focus()
self._callstack_view.update(snapshot)
if not self._variables_view:
self._variables_view = VariablesView(sublime.active_window(), debugger)
self._variables_view.focus()
self._variables_view.update(snapshot)
def on_break(self, location, breakpoints_hit, *args, **kwargs):
print 'EVENT: on_break(%s@%s:%s)' % (location[0], location[1], location[2])
if len(breakpoints_hit):
print ' breakpoints hit: %s' % (breakpoints_hit)
plugin().set_active_location(self.debugger(), location)
def on_exception(self, location, is_uncaught, exception,
*args, **kwargs):
print 'EVENT: on_exception(%s@%s:%s)' % (location[0], location[1],
location[2])
self._update_snapshot(snapshot)
plugin().set_active_location(self.debugger(), location)
class _WindowCommand(sublime_plugin.WindowCommand):
"""Global command, executed via command palette or key presses.
"""
def get_debugger_provider_uri(self):
"""Gets the debugger provider URI for the current view.
Returns:
A debugger provider URI, if one is defined.
"""
view = self.window.active_view()
if not view:
return None
return plugin().get_debugger_provider_uri_for_view(view)
def has_debugger_configured(self):
"""Whether a debugger for the current view has been configured.
Returns:
True if a debugger has been configured.
"""
if self.get_debugger():
return True
return not not self.get_debugger_provider_uri()
def launch_debugger(self, attach=False, callback=None):
"""Launches a debugger.
Args:
attach: Attach to an existing instance.
callback: Callback to call with the Debugger or none if it could not be
created, already existed, or the action was cancelled.
"""
def _dummy(debugger):
pass
callback = callback or _dummy
provider_uri = self.get_debugger_provider_uri()
if not provider_uri:
print 'STDI: no debug provider configured'
plugin().show_status_message('No debug provider configured')
sublime.set_timeout(lambda: callback(None), 0)
return
# Launch!
plugin().launch_debugger(self.window, provider_uri, attach, callback)
def get_debugger(self):
"""Gets the active debugger for the active window/view.
Returns:
A Debugger, if one is attached.
"""
view = self.window.active_view()
if view:
return plugin().get_debugger_for_view(view)
else:
return None
def get_view_uri(self):
"""Gets the URI of the current view, if any.
Returns:
A URI or None if there is no view or the file has not yet been named.
"""
view = self.window.active_view()
if view:
return view.file_name()
return None
class StdiToggleAllBreakpoints(_WindowCommand):
"""Enables/disables all breakpoints.
"""
def run(self, action):
print 'toggle all breakpoints: %s' % (action)
class StdiLaunchDebuggerCommand(_WindowCommand):
"""Launches a configured target app and attaches the debugger.
"""
def run(self):
self.launch_debugger()
def is_enabled(self):
return not self.get_debugger()
def is_visible(self):
return not self.get_debugger()
class StdiAttachDetachDebuggerCommand(_WindowCommand):
"""Attaches or detach to a configured target app if it is already running.
"""
def run(self):
if not self.get_debugger():
self.launch_debugger(attach=True)
else:
plugin().show_status_message('Detaching debugger...')
debugger = self.get_debugger()
debugger.detach(terminate=False)
plugin().remove_debugger(debugger)
def description(self):
if not self.get_debugger():
return 'Attach Debugger'
else:
return 'Detach Debugger'
class _ControlCommand(_WindowCommand):
"""Command that controls debugger flow.
"""
def is_visible(self):
return self.get_debugger()
class StdiStopDebugger(_ControlCommand):
"""Detach debugger and terminate.
"""
def run(self):
plugin().show_status_message('Stopping debugger...')
debugger = self.get_debugger()
debugger.detach(terminate=True)
plugin().remove_debugger(debugger)
def is_enabled(self):
return self.get_debugger()
class StdiDebugPauseCommand(_ControlCommand):
"""Debugger control: pause/continue.
"""
def run(self):
debugger = self.get_debugger()
if debugger.is_running():
plugin().show_status_message('Pausing...')
debugger.suspend()
else:
plugin().show_status_message('Resuming...')
debugger.resume()
def is_enabled(self):
debugger = self.get_debugger()
return debugger and (debugger.can_suspend() or
debugger.can_resume())
class StdiDebugStepOverCommand(_ControlCommand):
"""Debugger control: step over.
"""
def run(self):
plugin().show_status_message('Stepping over...')
debugger = self.get_debugger()
debugger.step_over()
def is_enabled(self):
debugger = self.get_debugger()
return debugger and debugger.can_step_over()
class StdiDebugStepInCommand(_ControlCommand):
"""Debugger control: step in.
"""
def run(self):
plugin().show_status_message('Stepping in...')
debugger = self.get_debugger()
debugger.step_in()
def is_enabled(self):
debugger = self.get_debugger()
return debugger and debugger.can_step_in()
class StdiDebugStepOutCommand(_ControlCommand):
"""Debugger control: step out.
"""
def run(self):
plugin().show_status_message('Stepping out...')
debugger = self.get_debugger()
debugger.step_out()
def is_enabled(self):
debugger = self.get_debugger()
return debugger and debugger.can_step_out()
class StdiEvaluate(_WindowCommand):
"""Evaluate an expression in the current context.
"""
def run(self):
debugger = self.get_debugger()
# TODO(benvanik): show an input panel, ask for expression
#plugin().show_status_message('Evaluating expression...')
#debugger.evaluate()
def is_enabled(self):
debugger = self.get_debugger()
return debugger and debugger.can_evaluate()
def is_visible(self):
return self.get_debugger()
class _ContextCommand(_WindowCommand):
"""Context menu command.
"""
# TODO(benvanik): could make this get_locations to enable actions with
# multiple selection, but I don't use that so I don't care!
def get_location(self, include_column=False):
"""Gets the cursor location of the current view.
Args:
include_column: True to include the column number, otherwise it will be
0 to indicate the entire line.
Returns:
A (uri, line, column) location or None if no selection.
"""
view = self.window.active_view()
if not view or not len(view.sel()):
return None
sel = view.sel()[0]
(line, column) = view.rowcol(sel.a)
if not include_column:
column = 0
return (self.get_view_uri(), line + 1, column + 1)
class StdiContinueToHereCommand(_ContextCommand):
"""Continues until the clicked line is executed.
If no debugger is attached this will attempt to attach one.
"""
def run(self):
debugger = self.get_debugger()
if debugger:
print 'continue to here'
else:
print 'launch and continue to here'
self.launch_debugger()
#debugger.continue_to(...)
def is_visible(self):
if not super(StdiContinueToHereCommand, self).is_visible():
return False
return self.has_debugger_configured()
def description(self):
if self.get_debugger():
return 'Continue to Here'
else:
return 'Launch and Continue to Here'
class _BreakpointContextCommand(_ContextCommand):
"""Context menu command that relates to breakpoints.
"""
def get_line_breakpoint(self):
"""Get the breakpoint on the currently clicked line.
Returns:
A Breakpoint on the clicked line, if one exists.
"""
location = self.get_location()
if not location:
return
breakpoint_list = plugin().breakpoint_list()
return breakpoint_list.get_breakpoint_at_location(location)
class StdiAddRemoveBreakpointCommand(_BreakpointContextCommand):
"""Adds a new breakpoint on the clicked line.
"""
def run(self):
location = self.get_location()
if not location:
return
breakpoint_list = plugin().breakpoint_list()
breakpoint = breakpoint_list.get_breakpoint_at_location(location)
if not breakpoint:
breakpoint_list.create_breakpoint_at_location(location)
plugin().show_status_message(
'Added breakpoint at line %s' % (location[1]))
else:
breakpoint_list.remove_breakpoint(breakpoint)
plugin().show_status_message(
'Removed breakpoint at line %s' % (location[1]))
def description(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint:
return 'Add Breakpoint'
else:
return 'Remove Breakpoint'
class StdiToggleBreakpointCommand(_BreakpointContextCommand):
"""Enables or disables the breakpoint on the clicked line.
"""
def run(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint:
return
breakpoint.set_enabled(not breakpoint.is_enabled())
def is_visible(self):
if not super(StdiToggleBreakpointCommand, self).is_visible():
return False
return self.get_line_breakpoint()
def description(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint:
return 'None'
if not breakpoint.is_enabled():
return 'Enable Breakpoint'
else:
return 'Disable Breakpoint'
class StdiEditBreakpointConditionCommand(_BreakpointContextCommand):
"""Edits the breakpoint condition on the clicked line.
"""
def run(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint:
return
def _on_done(new_value):
new_value = new_value.strip()
if not len(new_value):
new_value = None
breakpoint.set_condition(new_value)
input_view = self.window.show_input_panel(
'New Condition:',
breakpoint.condition() or '',
_on_done, None, None)
input_view.run_command('select_all')
def is_visible(self):
if not super(StdiEditBreakpointConditionCommand, self).is_visible():
return False
return self.get_line_breakpoint()
def description(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint or not breakpoint.condition():
return 'Edit Condition...'
else:
return 'Condition: \'%s\'...' % (breakpoint.condition())
class StdiIgnoreBreakpointCommand(_BreakpointContextCommand):
"""Edits the breakpoint ignore count on the clicked line.
"""
def run(self):
breakpoint = self.get_line_breakpoint()
if not breakpoint:
return
def _on_done(new_value):
try:
new_value = int(new_value)
except:
return
for debugger in plugin().debuggers():
debugger.ignore_breakpoint(breakpoint, new_value)
input_view = self.window.show_input_panel(
'Ignore Hits:',
'1',
_on_done, None, None)
input_view.run_command('select_all')
def is_visible(self):
if not super(StdiIgnoreBreakpointCommand, self).is_visible():
return False
if not len(plugin().debuggers()):
return False
return self.get_line_breakpoint()
class StdiPositionedContextMenuCommand(sublime_plugin.TextCommand):
"""Very hacky way of moving selection when the user right clicks.
This enables us to right-click -> add breakpoint at the point where the
user actually clicked, instead of wherever their selection happened to be.
"""
def run_(self, args):
self.view.run_command("drag_select", {'event': args['event']})
# new_sel = self.view.sel()
# click_point = new_sel[0].a
# (line, column) = self.view.rowcol(click_point)
# print '%s:%s (%s)' % (line, column, click_point)
self.view.run_command('context_menu', args)
# Global plugin
_plugin = DebugPlugin()
def plugin():
global _plugin
return _plugin
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from cStringIO import StringIO
import posixpath
import sys
from zipfile import BadZipfile, ZipFile
import appengine_blobstore as blobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import urlfetch
from docs_server_utils import StringIdentity
from file_system import FileNotFoundError, FileSystem, StatInfo
from future import Future, Gettable
from object_store_creator import ObjectStoreCreator
import url_constants
_GITHUB_REPOS_NAMESPACE = 'GithubRepos'
def _LoadCredentials(object_store_creator):
'''Returns (username, password) from |password_store|.
'''
password_store = object_store_creator.Create(
GithubFileSystem,
app_version=None,
category='password',
start_empty=False)
password_data = password_store.GetMulti(('username', 'password')).Get()
return password_data.get('username'), password_data.get('password')
class GithubFileSystem(FileSystem):
'''Allows reading from a github.com repository.
'''
@staticmethod
def Create(owner, repo, object_store_creator):
'''Creates a GithubFileSystem that corresponds to a single github repository
specified by |owner| and |repo|.
'''
return GithubFileSystem(
url_constants.GITHUB_REPOS,
owner,
repo,
object_store_creator,
AppEngineUrlFetcher)
@staticmethod
def ForTest(repo, fake_fetcher, path=None, object_store_creator=None):
'''Creates a GithubFIleSystem that can be used for testing. It reads zip
files and commit data from server2/test_data/github_file_system/test_owner
instead of github.com. It reads from files specified by |repo|.
'''
return GithubFileSystem(
path if path is not None else 'test_data/github_file_system',
'test_owner',
repo,
object_store_creator or ObjectStoreCreator.ForTest(),
fake_fetcher)
def __init__(self, base_url, owner, repo, object_store_creator, Fetcher):
self._repo_key = '%s/%s' % (owner, repo)
self._repo_url = '%s/%s/%s' % (base_url, owner, repo)
self._blobstore = blobstore.AppEngineBlobstore()
# Lookup the chrome github api credentials.
self._username, self._password = _LoadCredentials(object_store_creator)
self._fetcher = Fetcher(self._repo_url)
self._stat_cache = object_store_creator.Create(
GithubFileSystem, category='stat-cache')
self._repo_zip = Future(value=None)
def _GetNamelist(self):
'''Returns a list of all file names in a repository zip file.
'''
zipfile = self._repo_zip.Get()
if zipfile is None:
return []
return zipfile.namelist()
def _GetVersion(self):
'''Returns the currently cached version of the repository. The version is a
'sha' hash value.
'''
return self._stat_cache.Get(self._repo_key).Get()
def _FetchLiveVersion(self):
'''Fetches the current repository version from github.com and returns it.
The version is a 'sha' hash value.
'''
# TODO(kalman): Do this asynchronously (use FetchAsync).
result = self._fetcher.Fetch(
'commits/HEAD', username=self._username, password=self._password)
try:
return json.loads(result.content)['commit']['tree']['sha']
except (KeyError, ValueError):
logging.warn('Error parsing JSON from repo %s' % self._repo_url)
def Refresh(self):
'''Compares the cached and live stat versions to see if the cached
repository is out of date. If it is, an async fetch is started and a
Future is returned. When this Future is evaluated, the fetch will be
completed and the results cached.
If no update is needed, None will be returned.
'''
version = self._FetchLiveVersion()
repo_zip_url = self._repo_url + '/zipball'
def persist_fetch(fetch):
'''Completes |fetch| and stores the results in blobstore.
'''
try:
blob = fetch.Get().content
except urlfetch.DownloadError:
logging.error(
'%s: Failed to download zip file from repository %s' % repo_zip_url)
else:
try:
zipfile = ZipFile(StringIO(blob))
except BadZipfile as error:
logging.error(
'%s: Bad zip file returned from url %s' % (error, repo_zip_url))
else:
self._blobstore.Set(repo_zip_url, blob, _GITHUB_REPOS_NAMESPACE)
self._repo_zip = Future(value=zipfile)
self._stat_cache.Set(self._repo_key, version)
# If the cached and live stat versions are different fetch the new repo.
if version != self._stat_cache.Get('stat').Get():
fetch = self._fetcher.FetchAsync(
'zipball', username=self._username, password=self._password)
return Future(delegate=Gettable(lambda: persist_fetch(fetch)))
return Future(value=None)
def Read(self, paths, binary=False):
'''Returns a directory mapping |paths| to the contents of the file at each
path. If path ends with a '/', it is treated as a directory and is mapped to
a list of filenames in that directory.
|binary| is ignored.
'''
names = self._GetNamelist()
if not names:
# No files in this repository.
def raise_file_not_found():
raise FileNotFoundError('No paths can be found, repository is empty')
return Future(delegate=Gettable(raise_file_not_found))
else:
prefix = names[0].split('/')[0]
reads = {}
for path in paths:
full_path = posixpath.join(prefix, path)
if path == '' or path.endswith('/'): # If path is a directory...
trimmed_paths = []
for f in filter(lambda s: s.startswith(full_path), names):
if not '/' in f[len(full_path):-1] and not f == full_path:
trimmed_paths.append(f[len(full_path):])
reads[path] = trimmed_paths
else:
try:
reads[path] = self._repo_zip.Get().read(full_path)
except KeyError as error:
return Future(exc_info=(FileNotFoundError,
FileNotFoundError(error),
sys.exc_info()[2]))
return Future(value=reads)
def Stat(self, path):
'''Stats |path| returning its version as as StatInfo object. If |path| ends
with a '/', it is assumed to be a directory and the StatInfo object returned
includes child_versions for all paths in the directory.
File paths do not include the name of the zip file, which is arbitrary and
useless to consumers.
Because the repository will only be downloaded once per server version, all
stat versions are always 0.
'''
# Trim off the zip file's name.
path = path.lstrip('/')
trimmed = [f.split('/', 1)[1] for f in self._GetNamelist()]
if path not in trimmed:
raise FileNotFoundError("No stat found for '%s' in %s" % (path, trimmed))
version = self._GetVersion()
child_paths = {}
if path == '' or path.endswith('/'):
# Deal with a directory
for f in filter(lambda s: s.startswith(path), trimmed):
filename = f[len(path):]
if not '/' in filename and not f == path:
child_paths[filename] = StatInfo(version)
return StatInfo(version, child_paths or None)
def GetIdentity(self):
return '%s' % StringIdentity(self.__class__.__name__ + self._repo_key)
def __repr__(self):
return '<%s: key=%s, url=%s>' % (type(self).__name__,
self._repo_key,
self._repo_url)
|
|
import json
from datetime import datetime
from unittest.mock import patch
from django.http import HttpRequest
from django.test import RequestFactory, SimpleTestCase, TestCase
from django.test.utils import override_settings
from django.utils import timezone
from cspreports import utils
from cspreports.models import CSPReport
from cspreports.utils import get_midnight, parse_date_input
JSON_CONTENT_TYPE = 'application/json'
class UtilsTest(TestCase):
def test_config(self):
""" Test that the various CSP_REPORTS_X settings correctly control which handlers are
called.
"""
mock_paths = [
"cspreports.utils.email_admins",
"cspreports.utils.save_report",
"cspreports.utils.log_report",
]
corresponding_settings = [
"CSP_REPORTS_EMAIL_ADMINS",
"CSP_REPORTS_SAVE",
"CSP_REPORTS_LOG",
]
for i in range(len(mock_paths)):
mocks = [patch(path) for path in mock_paths]
settings_overrides = {
setting: True if j == i else False
for j, setting in enumerate(corresponding_settings)
}
with override_settings(**settings_overrides):
with mocks[0] as mocked_object_0, mocks[1] as mocked_object_1, mocks[2] as mocked_object_2:
mocked_objects = [mocked_object_0, mocked_object_1, mocked_object_2]
request = HttpRequest()
utils.process_report(request)
for k, mocked_object in enumerate(mocked_objects):
if k == i:
self.assertTrue(mocked_object.called)
else:
self.assertFalse(mocked_object.called)
@override_settings(CSP_REPORTS_LOG_LEVEL='warning')
def test_log_report(self):
""" Test that the `log_report` handler correctly logs at the right level. """
request = HttpRequest()
report = '{"document-uri": "http://example.com/"}'
formatted_report = utils.format_report(report)
request._body = report
with patch("cspreports.utils.logger.warning") as warning_mock:
utils.log_report(request)
self.assertTrue(warning_mock.called)
log_message = warning_mock.call_args[0][0] % warning_mock.call_args[0][1:]
self.assertTrue(formatted_report in log_message)
def test_email_admins(self):
""" Test that the `email_admins` handler correctly sends an email. """
request = HttpRequest()
report = '{"document-uri": "http://example.com/"}'
formatted_report = utils.format_report(report)
request._body = report
# Note that we are mocking the *Django* mail_admins function here.
with patch("cspreports.utils.mail_admins") as mock_mail_admins:
utils.email_admins(request)
self.assertTrue(mock_mail_admins.called)
message = mock_mail_admins.call_args[0][1]
self.assertTrue(formatted_report in message)
def test_format_report_handles_invalid_json(self):
""" Test that `format_report` doesn't trip up on invalid JSON.
Note: this is about not getting a ValueError, rather than any kind of security thing.
"""
invalid_json = '{"key": undefined_variable, nonsense here}'
try:
formatted = utils.format_report(invalid_json)
except ValueError as e:
self.fail("format_report did not handle invalid JSON: %s" % e)
# we expect our invalid JSON to remain in the output, as is
self.assertTrue(invalid_json in formatted)
def test_run_additional_handlers(self):
""" Test that the run_additional_handlers function correctly calls each of the specified custom
handler functions.
"""
# utils stores a cache of the handlers (for efficiency, so kill that)
utils._additional_handlers = None
request = HttpRequest()
with override_settings(
CSP_REPORTS_ADDITIONAL_HANDLERS=["cspreports.tests.test_utils.my_handler"],
CSP_REPORTS_EMAIL_ADMINS=False,
CSP_REPORTS_LOG=False,
CSP_REPORTS_SAVE=False,
):
utils.process_report(request)
self.assertTrue(request.my_handler_called)
@override_settings(CSP_REPORTS_FILTER_FUNCTION='cspreports.tests.test_utils.example_filter')
def test_filter_function(self):
""" Test that setting CSP_REPORTS_FILTER_FUNCTION allows the given function to filter out
requests.
"""
report1 = '{"document-uri": "http://not-included.com/"}'
report2 = '{"document-uri": "http://included.com/"}'
request = HttpRequest()
request._body = report1
with patch('cspreports.utils.log_report') as log_patch:
utils.process_report(request)
self.assertFalse(log_patch.called)
request._body = report2
utils.process_report(request)
self.assertTrue(log_patch.called)
def my_handler(request):
# just set an attribute so that we can see that this function has been called
request.my_handler_called = True
def example_filter(request):
""" Filters out reports with a 'document-uri' not from included.com. """
report = json.loads(request.body)
doc_uri = report.get('document-uri', '')
if doc_uri.startswith('http://included.com'):
return True
return False
class TestParseDateInput(SimpleTestCase):
"""Test `parse_date_input` function."""
def test_aware(self):
with self.settings(USE_TZ=True, TIME_ZONE='Europe/Prague'):
self.assertEqual(parse_date_input('2016-05-25'), timezone.make_aware(datetime(2016, 5, 25)))
def test_naive(self):
with self.settings(USE_TZ=False):
self.assertEqual(parse_date_input('2016-05-25'), datetime(2016, 5, 25))
def test_invalid_date(self):
with self.assertRaisesMessage(ValueError, 'is not a valid date.'):
parse_date_input('2016-13-25')
def test_invalid_input(self):
with self.assertRaisesMessage(ValueError, 'is not a valid date.'):
parse_date_input('INVALID')
class TestGetMidnight(SimpleTestCase):
"""Test `get_midnight` function."""
def test_aware(self):
with self.settings(USE_TZ=True, TIME_ZONE='Europe/Prague'):
# 00:05 in CEST is 22:05 day before in UTC
mock_now = datetime(2016, 4, 26, 22, 5, tzinfo=timezone.utc)
with patch('cspreports.utils.now', return_value=mock_now):
self.assertEqual(get_midnight(), datetime(2016, 4, 26, 22, 0, tzinfo=timezone.utc))
def test_naive(self):
with self.settings(USE_TZ=False):
mock_now = datetime(2016, 4, 27, 12, 34)
with patch('cspreports.utils.now', return_value=mock_now):
self.assertEqual(get_midnight(), datetime(2016, 4, 27))
class SaveReportTest(TestCase):
def test_save_report_missing_root_element(self):
""" Test that the `save_report` handler correctly saves to the DB. """
assert CSPReport.objects.count() == 0 # sanity
body = '{"document-uri": "http://example.com/"}'
request = RequestFactory(HTTP_USER_AGENT='Agent007').post('/dummy/', body, content_type=JSON_CONTENT_TYPE)
utils.save_report(request)
reports = CSPReport.objects.all()
self.assertQuerysetEqual(reports.values_list('user_agent'), [('Agent007', )], transform=tuple)
report = reports[0]
self.assertEqual(report.json, body)
self.assertFalse(report.is_valid)
def test_save_report_no_agent(self):
"""Test that the `save_report` handler correctly handles missing user agent header."""
request = RequestFactory().post('/dummy/', '{"document-uri": "http://example.com/"}',
content_type=JSON_CONTENT_TYPE)
utils.save_report(request)
report = CSPReport.objects.first()
self.assertQuerysetEqual(report.user_agent, '')
def test_save_report_correct_format_missing_mandatory_fields(self):
""" Test that the `save_report` saves CSPReport instance even if some required CSP Report
fields are missing. However, the report should have its 'is_valid' field set to False.
"""
assert CSPReport.objects.count() == 0 # sanity
body = {
'csp-report': {
'document-uri': 'http://protected.example.cz/',
'referrer': '', # Required, but (for some reason) we treat an empty value as valid
'blocked-uri': '', # Ditto
'violated-directive': 'Very protective directive.',
'original-policy': 'Nothing is allowed.'
}
}
request = RequestFactory(HTTP_USER_AGENT='Agent007').post('/dummy/', json.dumps(body),
content_type=JSON_CONTENT_TYPE)
utils.save_report(request)
reports = CSPReport.objects.all()
self.assertQuerysetEqual(reports.values_list('user_agent'), [('Agent007', )], transform=tuple)
report = reports[0]
self.assertEqual(report.json, json.dumps(body))
self.assertTrue(report.is_valid)
def test_save_report_correct_optional_fields(self):
""" Test that the `save_report` saves all field values correctly, including coercion to the
correct type(s).
"""
assert CSPReport.objects.count() == 0 # sanity
body = {
'csp-report': {
'document-uri': 'http://protected.example.cz/',
'referrer': 'http://referrer.example.cz/',
'blocked-uri': 'http://dangerous.example.cz/',
'violated-directive': 'Very protective directive.',
'original-policy': 'Nothing is allowed.',
'source-file': 'nasty-script.js',
'status-code': 0,
'line-number': '36',
'column-number': 32,
}
}
request = RequestFactory(HTTP_USER_AGENT='Agent007').post('/dummy/', json.dumps(body),
content_type=JSON_CONTENT_TYPE)
utils.save_report(request)
report = CSPReport.objects.first()
self.assertEqual(report.json, json.dumps(body))
self.assertEqual(report.user_agent, 'Agent007')
self.assertEqual(report.document_uri, 'http://protected.example.cz/')
self.assertEqual(report.referrer, 'http://referrer.example.cz/')
self.assertEqual(report.blocked_uri, 'http://dangerous.example.cz/')
self.assertEqual(report.violated_directive, 'Very protective directive.')
self.assertEqual(report.original_policy, 'Nothing is allowed.')
self.assertEqual(report.source_file, 'nasty-script.js')
self.assertEqual(report.status_code, 0)
self.assertEqual(report.line_number, 36)
self.assertEqual(report.column_number, 32)
self.assertTrue(report.is_valid)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author : Santosh
# e-mail : kcraj2[AT]gmail[DOT]com
# Date created : 08 Jul 2016
# Last modified : 11 Jul 2016
"""------------------------------------------------------------------------
Browse the WSJ0 and WSJ1 corpora, parse the prompts (transcriptions),
and categorize the utts into documents (documents are articles).
This article info is obtained from the <PROMPT_ID>.
Dumps the unique train/test keys in data/
------------------------------------------------------------------------"""
import os
import sys
# import socket
import string
import argparse
import re
def read_lexicon(lex_file):
""" Read the lexicon and load it in dictionary """
lex = {}
phs = {}
with open(lex_file, "r") as fpr:
for line in fpr:
line = line.strip().lower()
tokens = line.split(" ")
ph_seq = ""
for i, tok in enumerate(tokens):
if tok.strip() == '':
continue
else:
if i > 0:
ph = re.sub('[0-9]', '', tok)
phs[tok] = ph
ph_seq += ph + " "
lex[tokens[0]] = ph_seq.strip()
if VERBOSE:
print('No. of phonemes:', len(phs), 'after mapping:',
len(set(list(phs.values()))))
print('No. of words in lexicon:', len(lex))
return lex
def read_all_prompts(fpaths):
""" Get all prompts in one list """
data = []
for fp in fpaths:
with open(fp, 'r') as fpr:
data += fpr.read().split("\n")
new_data = [d for d in data if len(d.strip()) > 1]
return new_data
def read_simple_flist(fpath):
""" read every line and put in list """
fids = []
with open(fpath, 'r') as fpr:
fids = fpr.read().split("\n")
if fids[-1].strip() == '':
fids = fids[:-1]
return fids
def get_ptx_fpaths(out_fpath_file):
""" Get all the file paths of prompts files """
os.system("find " + WSJ0 + " -type f -name \"*.ptx\" > " + out_fpath_file)
os.system("find " + WSJ1 + " -type f -name \"*.ptx\" >> " + out_fpath_file)
def get_docIDs_from_prompts(data, doc_d, utt_d, utt_txt_d, utt_ph_d, lex):
""" Parse the prompts and get the utt to doc ID mappings """
found = 0
not_found = 0 # tokens not found in lexicon
incom = 0
txt_utt_d = {} # utt txt to utt ID mapping (to get the unique utts)
# oth = {}
for utt_line in data:
utt_line = utt_line.strip()
vals = utt_line.split("(")
id_tmp = vals[-1][:-1]
utt = utt_line[:-len(id_tmp)-2].strip().lower()
"""
translator = str.maketrans({key: None for key in string.punctuation})
clean_utt = utt.translate(translator)
clean_utt = re.sub("\s\s+", " ", clean_utt) # remove multiple spaces
utt = clean_utt
"""
utt = re.sub("\.|,|\"|\?|\(|\)|;|\&|\$|\%|\{|\}|\[|\]|:|/|~|`|\!", "", utt)
utt = re.sub("\-", " ", utt)
# m = re.search("^\'[a-z]", utt)
# if m is not None:
# utt = re.sub("\'", "", utt)
pt_tmp = id_tmp.split(" ")[-1].split(".")
utt_id = id_tmp.split(" ")[0].strip()
# https://catalog.ldc.upenn.edu/docs/LDC93S6A/csrnov92.html
# ptx format (<UTT_ID> <PROMPT_ID>)
# PROMPT_ID = <YEAR>.<FILE-NUMBER>.<ARTICLE-NUMBER>.<PARAGRAPH-NUMBER>.<SENTENCE-NUMBER>
# article ID as doc ID
doc_id = ''
if len(pt_tmp) == 5:
doc_id = pt_tmp[2] # 2 => get article ID
else:
incom += 1
# oth[pt_tmp[0]] = 1
# update the doc_d dictionary
if doc_id in doc_d:
doc_d[doc_id].append(utt_id)
else:
doc_d[doc_id] = [utt_id]
# check if the sentence is repeating
if utt in txt_utt_d:
txt_utt_d[utt].append(utt_id)
else:
txt_utt_d[utt] = [utt_id]
# update the utt_d and utt_txt_d dictionaries
if utt_id in utt_d:
continue
else:
utt_d[utt_id] = doc_id
utt_txt_d[utt_id] = utt
utt_ph = ""
tokens = utt.split()
for tok in tokens:
try:
utt_ph += lex[tok] + " "
found += 1
except KeyError:
not_found += 1
# m = re.search('[0-9]+', tok)
# if m is None:
# print(tok) #, 'not found in lexicon.')
utt_ph_d[utt_id] = utt_ph
if VERBOSE:
print('Utts with incomplete prompt IDs:', incom)
print('No. of tokens not found in lexicon:', not_found,
'({:.2f} %)'.format((float(not_found) * 100) / found))
return txt_utt_d
def dump_utts_into_docs(utt_ids, doc_d, utt_txt_d, utt_ph_d, out_word_dir,
out_ph_dir, txt_utt_d, pwd, base):
""" Dump the utts in utt_ids into corresponding documents and save them
in out_word_dir/ out_ph_dir/"""
fpu = None
if VERBOSE:
fpu = open(pwd + '../data/repeating_utts_' + base + '.txt', 'w')
count = 0
uniq_utt = {}
uniq_keys = []
uniq_doc_ids = []
for doc_id in sorted(list(doc_d.keys())):
utt_l = sorted(doc_d[doc_id])
out_word_f = out_word_dir + doc_id + ".txt"
out_ph_f = out_ph_dir + doc_id + ".txt"
utts_to_dump = []
utts_to_dump_ph = []
utt_l2 = sorted(list(set(utt_ids) & set(utt_l)))
count += len(utt_l2)
for utt_id in utt_l2:
try:
utt_ids_l = txt_utt_d[utt_txt_d[utt_id]]
if VERBOSE:
if len(utt_ids_l) > 0:
for uid in utt_ids_l:
fpu.write(utt_txt_d[utt_id] + ":" + uid + "\n")
except KeyError:
print('Cannot find sentence.')
try:
uniq_utt[utt_txt_d[utt_id]] += 1
except KeyError:
uniq_utt[utt_txt_d[utt_id]] = 1
# utts_to_dump.append(utt_id + " " + utt_txt_d[utt_id])
# utts_to_dump_ph.append(utt_id + " " + utt_ph_d[utt_id])
utts_to_dump.append(utt_txt_d[utt_id])
utts_to_dump_ph.append(utt_ph_d[utt_id])
uniq_keys.append(utt_id)
if len(utts_to_dump) > 0:
uniq_doc_ids.append(doc_id)
with open(out_word_f, 'w') as fpw, open(out_ph_f, 'w') as fpp:
fpw.write("\n".join(utts_to_dump) + "\n")
fpp.write("\n".join(utts_to_dump_ph) + "\n")
uniq_keys = sorted(uniq_keys)
uniq_key_f = pwd + "../data/" + base + "_unique.keys"
with open(uniq_key_f, 'w') as fpw:
fpw.write("\n".join(uniq_keys) + "\n")
uniq_doc_ids = sorted(uniq_doc_ids)
uniq_doc_f = pwd + "../data/" + base + "_docs.keys"
with open(uniq_doc_f, 'w') as fpw:
fpw.write("\n".join(uniq_doc_ids) + "\n")
if VERBOSE:
print("No. of utts used:", count)
print("No. of unique utts:", len(uniq_utt))
fpu.close()
def main():
""" main method """
pwd = os.path.dirname(os.path.realpath(__file__)) + "/"
out_fpath_file = pwd + "../data/prompts.fpaths"
get_ptx_fpaths(out_fpath_file)
fpaths = read_simple_flist(out_fpath_file)
lex_file = pwd + "../data/lexicon.txt"
lex = read_lexicon(lex_file)
if VERBOSE:
print('Total no. of prompt files:', len(fpaths))
# data = read_ptx_file('all_ptx.txt')
data = read_all_prompts(fpaths)
if VERBOSE:
print('Total no. of prompts:', len(data))
utt_txt_d = {} # utt ID to text mapping
utt_ph_d = {} # utt ID to phoneme seq mapping
utt_d = {} # utt ID to docID mapping
doc_d = {} # doc to utt [] mapping
txt_utt_d = get_docIDs_from_prompts(data, doc_d, utt_d, utt_txt_d,
utt_ph_d, lex)
if VERBOSE:
with open(pwd + '../data/unique_utt_IDs.txt', 'w') as fpw:
for txt, uid_l in txt_utt_d.items():
fpw.write(txt + " " + ",".join(uid_l) + "\n")
print('No. of docs (articles):', len(doc_d))
print('No. of utts with doc IDs:', len(utt_d))
print('No. of utts with doc IDs and text:', len(utt_txt_d))
print('No. of unique utts (based on text):', len(txt_utt_d))
train_ids = read_simple_flist(pwd + '../data/training_si84.keys')
train_ids += read_simple_flist(pwd + '../data/training_si284.keys')
test_ids = read_simple_flist(pwd + '../data/test_eval92.keys')
if VERBOSE:
print('Train utt IDs:', len(train_ids))
print('Test utt IDs:', len(test_ids))
# Dump the utts in respective documents
out_dir = os.path.realpath(pwd + "/EVAL/topics/wsj/") + "/"
train_out = out_dir + "words/"
test_out = out_dir + "words/"
train_ph_out = out_dir + "phonemes/"
test_ph_out = out_dir + "phonemes/"
os.makedirs(out_dir, exist_ok=True)
os.system("mkdir -p " + train_out + " " + test_out + " " + \
train_ph_out + " " + test_ph_out)
if VERBOSE:
print('Created the dirs:')
print(out_dir + '\n' + train_out + '\n' + test_out + '\n' + \
train_ph_out + '\n' + test_ph_out)
dump_utts_into_docs(sorted(train_ids), doc_d, utt_txt_d, utt_ph_d,
train_out, train_ph_out, txt_utt_d, pwd, "training")
dump_utts_into_docs(sorted(test_ids), doc_d, utt_txt_d, utt_ph_d,
test_out, test_ph_out, txt_utt_d, pwd, "test")
print('Data preparation for topic based document clustering is done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('wsj0', help='path to wsj0')
parser.add_argument('wsj1', help='path to wsj1')
parser.add_argument('--verbose', action='store_true',
help='Display useless information while processing.')
args = parser.parse_args()
WSJ0 = os.path.realpath(args.wsj0) + "/"
WSJ1 = os.path.realpath(args.wsj1) + "/"
VERBOSE = args.verbose
main()
"""
Lucas didn't like this automation
host_addr = socket.gethostbyaddr(socket.gethostname())[0]
host = host_addr.split(".")[1]
VERBOSE = False
WSJ0 = ''
WSJ1 = ''
# BUT cluster
if host == "fit":
print('Host: BUT cluster')
WSJ0 = "/mnt/matylda2/data/WSJ0/"
WSJ1 = "/mnt/matylda2/data/WSJ1/"
# CLSP cluster
elif host == "clsp":
print('Host: CLSP cluster')
WSJ0 = "/export/corpora5/LDC/LDC93S6B/"
WSJ1 = "/export/corpora5/LDC/LDC94S13B/"
else:
print("Manually enter the path of WSJ0 and WSJ1 in the source file:",
sys.argv[0])
sys.exit()
"""
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.resourcemanager_v3.services.tag_keys import pagers
from google.cloud.resourcemanager_v3.types import tag_keys
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TagKeysTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import TagKeysGrpcAsyncIOTransport
from .client import TagKeysClient
class TagKeysAsyncClient:
"""Allow users to create and manage tag keys."""
_client: TagKeysClient
DEFAULT_ENDPOINT = TagKeysClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = TagKeysClient.DEFAULT_MTLS_ENDPOINT
tag_key_path = staticmethod(TagKeysClient.tag_key_path)
parse_tag_key_path = staticmethod(TagKeysClient.parse_tag_key_path)
common_billing_account_path = staticmethod(
TagKeysClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
TagKeysClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(TagKeysClient.common_folder_path)
parse_common_folder_path = staticmethod(TagKeysClient.parse_common_folder_path)
common_organization_path = staticmethod(TagKeysClient.common_organization_path)
parse_common_organization_path = staticmethod(
TagKeysClient.parse_common_organization_path
)
common_project_path = staticmethod(TagKeysClient.common_project_path)
parse_common_project_path = staticmethod(TagKeysClient.parse_common_project_path)
common_location_path = staticmethod(TagKeysClient.common_location_path)
parse_common_location_path = staticmethod(TagKeysClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TagKeysAsyncClient: The constructed client.
"""
return TagKeysClient.from_service_account_info.__func__(TagKeysAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TagKeysAsyncClient: The constructed client.
"""
return TagKeysClient.from_service_account_file.__func__(TagKeysAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return TagKeysClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> TagKeysTransport:
"""Returns the transport used by the client instance.
Returns:
TagKeysTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(TagKeysClient).get_transport_class, type(TagKeysClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, TagKeysTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the tag keys client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TagKeysTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = TagKeysClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_tag_keys(
self,
request: Union[tag_keys.ListTagKeysRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTagKeysAsyncPager:
r"""Lists all TagKeys for a parent resource.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_list_tag_keys():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.ListTagKeysRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tag_keys(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.ListTagKeysRequest, dict]):
The request object. The request message for listing all
TagKeys under a parent resource.
parent (:class:`str`):
Required. The resource name of the new TagKey's parent.
Must be of the form ``folders/{folder_id}`` or
``organizations/{org_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.services.tag_keys.pagers.ListTagKeysAsyncPager:
The ListTagKeys response message.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_keys.ListTagKeysRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tag_keys,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTagKeysAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_tag_key(
self,
request: Union[tag_keys.GetTagKeyRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tag_keys.TagKey:
r"""Retrieves a TagKey. This method will return
``PERMISSION_DENIED`` if the key does not exist or the user does
not have permission to view it.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_get_tag_key():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.GetTagKeyRequest(
name="name_value",
)
# Make the request
response = client.get_tag_key(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.GetTagKeyRequest, dict]):
The request object. The request message for getting a
TagKey.
name (:class:`str`):
Required. A resource name in the format
``tagKeys/{id}``, such as ``tagKeys/123``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.types.TagKey:
A TagKey, used to group a set of
TagValues.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_keys.GetTagKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_tag_key,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_tag_key(
self,
request: Union[tag_keys.CreateTagKeyRequest, dict] = None,
*,
tag_key: tag_keys.TagKey = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new TagKey. If another request with the
same parameters is sent while the original request is in
process, the second request will receive an error. A
maximum of 300 TagKeys can exist under a parent at any
given time.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_create_tag_key():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
tag_key = resourcemanager_v3.TagKey()
tag_key.short_name = "short_name_value"
request = resourcemanager_v3.CreateTagKeyRequest(
tag_key=tag_key,
)
# Make the request
operation = client.create_tag_key(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.CreateTagKeyRequest, dict]):
The request object. The request message for creating a
TagKey.
tag_key (:class:`google.cloud.resourcemanager_v3.types.TagKey`):
Required. The TagKey to be created. Only fields
``short_name``, ``description``, and ``parent`` are
considered during the creation request.
This corresponds to the ``tag_key`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.resourcemanager_v3.types.TagKey` A
TagKey, used to group a set of TagValues.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag_key])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_keys.CreateTagKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag_key is not None:
request.tag_key = tag_key
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tag_key,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_keys.TagKey,
metadata_type=tag_keys.CreateTagKeyMetadata,
)
# Done; return the response.
return response
async def update_tag_key(
self,
request: Union[tag_keys.UpdateTagKeyRequest, dict] = None,
*,
tag_key: tag_keys.TagKey = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the attributes of the TagKey resource.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_update_tag_key():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
tag_key = resourcemanager_v3.TagKey()
tag_key.short_name = "short_name_value"
request = resourcemanager_v3.UpdateTagKeyRequest(
tag_key=tag_key,
)
# Make the request
operation = client.update_tag_key(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.UpdateTagKeyRequest, dict]):
The request object. The request message for updating a
TagKey.
tag_key (:class:`google.cloud.resourcemanager_v3.types.TagKey`):
Required. The new definition of the TagKey. Only the
``description`` and ``etag`` fields can be updated by
this request. If the ``etag`` field is not empty, it
must match the ``etag`` field of the existing tag key.
Otherwise, ``FAILED_PRECONDITION`` will be returned.
This corresponds to the ``tag_key`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Fields to be updated. The mask may only contain
``description`` or ``etag``. If omitted entirely, both
``description`` and ``etag`` are assumed to be
significant.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.resourcemanager_v3.types.TagKey` A
TagKey, used to group a set of TagValues.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag_key, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_keys.UpdateTagKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag_key is not None:
request.tag_key = tag_key
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tag_key,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tag_key.name", request.tag_key.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_keys.TagKey,
metadata_type=tag_keys.UpdateTagKeyMetadata,
)
# Done; return the response.
return response
async def delete_tag_key(
self,
request: Union[tag_keys.DeleteTagKeyRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a TagKey. The TagKey cannot be deleted if it
has any child TagValues.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_delete_tag_key():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.DeleteTagKeyRequest(
name="name_value",
)
# Make the request
operation = client.delete_tag_key(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.DeleteTagKeyRequest, dict]):
The request object. The request message for deleting a
TagKey.
name (:class:`str`):
Required. The resource name of a TagKey to be deleted in
the format ``tagKeys/123``. The TagKey cannot be a
parent of any existing TagValues or it will not be
deleted successfully.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.resourcemanager_v3.types.TagKey` A
TagKey, used to group a set of TagValues.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_keys.DeleteTagKeyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tag_key,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_keys.TagKey,
metadata_type=tag_keys.DeleteTagKeyMetadata,
)
# Done; return the response.
return response
async def get_iam_policy(
self,
request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a TagKey. The returned policy
may be empty if no such policy or resource exists. The
``resource`` field should be the TagKey's resource name. For
example, "tagKeys/1234". The caller must have
``cloudresourcemanager.googleapis.com/tagKeys.getIamPolicy``
permission on the specified TagKey.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_get_iam_policy():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.get_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def set_iam_policy(
self,
request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a TagKey, replacing any
existing policy. The ``resource`` field should be the TagKey's
resource name. For example, "tagKeys/1234". The caller must have
``resourcemanager.tagKeys.setIamPolicy`` permission on the
identified tagValue.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_set_iam_policy():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.SetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.set_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_iam_policy,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def test_iam_permissions(
self,
request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that a caller has on the specified TagKey.
The ``resource`` field should be the TagKey's resource name. For
example, "tagKeys/1234".
There are no permissions required for making this API call.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_test_iam_permissions():
# Create a client
client = resourcemanager_v3.TagKeysClient()
# Initialize request argument(s)
request = resourcemanager_v3.TestIamPermissionsRequest(
resource="resource_value",
permissions=['permissions_value_1', 'permissions_value_2'],
)
# Make the request
response = client.test_iam_permissions(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (:class:`Sequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-resourcemanager",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TagKeysAsyncClient",)
|
|
import abc
from collections import OrderedDict
from datetime import date, datetime, timedelta
from io import BytesIO
import os
from textwrap import fill
from urllib.request import urlopen
from pandas._config import config
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import is_bool, is_float, is_integer, is_list_like
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES,
_is_url,
_stringify_path,
_validate_header_arg,
get_filepath_or_buffer,
)
from pandas.io.excel._util import (
_fill_mi_header,
_get_default_writer,
_maybe_convert_usecols,
_pop_header_name,
get_writer,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_read_excel_doc = (
"""
Read an Excel file into a pandas DataFrame.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, ExcelFile, xlrd.Book, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If int, then indicates last column to be parsed.
.. deprecated:: 0.24.0
Pass in a list of int instead from 0 to `usecols` inclusive.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
.. versionadded:: 0.24.0
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False.
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
+ fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
Alias of `skipfooter`.
.. deprecated:: 0.23.0
Use `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
to_excel : Write DataFrame to an Excel file.
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
)
@Appender(_read_excel_doc)
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skip_footer=0,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
):
for arg in ("sheet", "sheetname", "parse_cols"):
if arg in kwds:
raise TypeError(
"read_excel() got an unexpected keyword argument " "`{}`".format(arg)
)
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
"Engine should not be specified when passing "
"an ExcelFile - ExcelFile already has the engine set"
)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
class _BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer):
# If filepath_or_buffer is a url, load the data into a BytesIO
if _is_url(filepath_or_buffer):
filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read())
elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer)
if isinstance(filepath_or_buffer, self._workbook_class):
self.book = filepath_or_buffer
elif hasattr(filepath_or_buffer, "read"):
# N.B. xlrd.Book has a read attribute too
filepath_or_buffer.seek(0)
self.book = self.load_workbook(filepath_or_buffer)
elif isinstance(filepath_or_buffer, str):
self.book = self.load_workbook(filepath_or_buffer)
else:
raise ValueError(
"Must explicitly set engine if not passing in" " buffer or path for io."
)
@property
@abc.abstractmethod
def _workbook_class(self):
pass
@abc.abstractmethod
def load_workbook(self, filepath_or_buffer):
pass
@property
@abc.abstractmethod
def sheet_names(self):
pass
@abc.abstractmethod
def get_sheet_by_name(self, name):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float):
pass
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
):
_validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(OrderedDict.fromkeys(sheets).keys())
output = OrderedDict()
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, str):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
usecols = _maybe_convert_usecols(usecols)
if not data:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header(data[row], control_row)
if index_col is not None:
header_name, _ = _pop_header_name(data[row], index_col)
header_names.append(header_name)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
# Check if we have an empty dataset
# before trying to collect data.
if offset < len(data):
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
has_index_names = is_list_like(header) and len(header) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(
data,
names=names,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
output[asheetname] = parser.read(nrows=nrows)
if not squeeze or isinstance(output[asheetname], DataFrame):
if header_names:
output[asheetname].columns = output[
asheetname
].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
class ExcelWriter(metaclass=abc.ABCMeta):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w', 'a'}, default 'w'
File mode to use (write or append).
.. versionadded:: 0.24.0
Attributes
----------
None
Methods
-------
None
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df1.to_excel(writer, sheet_name='Sheet1')
... df2.to_excel(writer, sheet_name='Sheet2')
You can set the date format or datetime format:
>>> with ExcelWriter('path_to_file.xlsx',
date_format='YYYY-MM-DD',
datetime_format='YYYY-MM-DD HH:MM:SS') as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option("io.excel.{ext}.writer".format(ext=ext))
if engine == "auto":
engine = _get_default_writer(ext)
except KeyError:
raise ValueError("No engine for filetype: '{ext}'".format(ext=ext))
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@property
@abc.abstractmethod
def supported_extensions(self):
"""Extensions that writer engine supports."""
pass
@property
@abc.abstractmethod
def engine(self):
"""Name of engine."""
pass
@abc.abstractmethod
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: integer tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(
self,
path,
engine=None,
date_format=None,
datetime_format=None,
mode="w",
**engine_kwargs
):
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
else:
ext = "xls" if engine == "xlwt" else "xlsx"
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = "YYYY-MM-DD"
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self.datetime_format = datetime_format
self.mode = mode
def __fspath__(self):
return _stringify_path(self.path)
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError(
"Must pass explicit sheet_name or set " "cur_sheet property"
)
return sheet_name
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = "0"
else:
val = str(val)
return val, fmt
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith("."):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = "Invalid extension for engine '{engine}': '{ext}'".format(
engine=pprint_thing(cls.engine), ext=pprint_thing(ext)
)
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class ExcelFile:
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file.
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or ``xlrd``.
"""
from pandas.io.excel._odfreader import _ODFReader
from pandas.io.excel._openpyxl import _OpenpyxlReader
from pandas.io.excel._xlrd import _XlrdReader
_engines = {"xlrd": _XlrdReader, "openpyxl": _OpenpyxlReader, "odf": _ODFReader}
def __init__(self, io, engine=None):
if engine is None:
engine = "xlrd"
if engine not in self._engines:
raise ValueError("Unknown engine: {engine}".format(engine=engine))
self.engine = engine
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
self._reader = self._engines[engine](self._io)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
"""
if "chunksize" in kwds:
raise NotImplementedError(
"chunksize keyword of read_excel " "is not implemented"
)
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self):
"""close io if necessary"""
if hasattr(self.io, "close"):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
|
#!/usr/bin/env python2
import os
import sys
import traceback
os.chdir(sys.path[0])
from subprocess import check_output, CalledProcessError
from time import sleep
from math import floor
from copy import deepcopy
import re
import pprint
from sqlalchemy import Column, ForeignKey, Integer, String, Date, DateTime,\
UniqueConstraint, create_engine
from sqlalchemy.sql import select, func, and_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker, scoped_session
from datetime import date, datetime, timedelta
from mako.template import Template
from mako.lookup import TemplateLookup
import calendar
import thread
import pid_handler
from flask import Flask
app = Flask(__name__)
LOCK_FILE = os.path.join(os.path.expanduser("~"), "activity-logger.lock")
Base = declarative_base()
class ActivityLog(Base):
__tablename__ = 'activity_log'
__table_args__ = (
UniqueConstraint('date', 'hour', 'workspace', 'command', 'title',
name='_date_hour_workspace_command_line_title_uc'),
)
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
date = Column(Date())
hour = Column(Integer())
seconds = Column(Integer())
workspace = Column(String(255))
command = Column(String(255))
title = Column(String(255))
@property
def hh_mm_ss(self):
return hh_mm_ss(self.seconds)
def __repr__(self):
return ("<ActivityLog(date='%r',\n"
" hour=%d,\n"
" seconds=%d,\n"
" workspace='%r',\n"
" command='%r'\n"
" title='%r'\n"
" time:%s"
")>" % (self.date, self.hour, self.seconds, self.workspace,
self.command, self.title, hh_mm_ss(self.seconds)))
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///activity.db')
engine.raw_connection().connection.text_factory = unicode
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
# Session = scoped_session(sessionmaker(bind=engine))
# session = Session()
RE_WMCTRL_DESKTOP = re.compile("([0-9]+)\s+([\*\-])\s+(DG\:\ .*)\s+(VP\: .*)"
"\s+(WA\: .*)\s+([0-9x]+)\s+(.*)")
RE_WMCTRL_OPEN_PROGRAMS = re.compile("([0-9xa-f]+)\s+([\-0-9]+)\s+([0-9]+)\s+"
"([A-Za-z0-9\_\-]+)\s+(.*)")
RE_XLSCLIENTS = re.compile("Window\ ([0-9a-fx]+)\:\n"
"\ \ Machine\:\ \ (.*)\n"
"\ \ Name\:\ \ (.*)\n"
"\ \ Command\:\ \ (.*)\n"
"\ \ Instance\/Class\: (.*)")
@app.route("/")
def index():
return get_html(template="index.html")
@app.route("/<today>/")
@app.route("/<today>/daily.html", alias=True)
def daily(today):
# print("TODAY:", today)
return get_html(today=today, template="index.html")
def ss():
return scoped_session(sessionmaker(bind=engine))
def ssr(created, session):
if created:
session.remove()
def _session(session=None):
if session is not None:
return False, session
return True, ss()
def _today(today=None):
if today is not None:
if isinstance(today, (str, unicode)):
y, m, d = today.split("-")
today = date(int(y), int(m), int(d))
# print("CONVERTED:", today)
return today
return date.today()
def _if_created(created, res, session=None):
if not created:
return res
_res = []
for r in res:
_res.append(r)
if session:
ssr(created, session)
return _res
def _print(*args, **kwargs):
if DEBUG:
for arg in args:
print arg,
print ""
for k, v in kwargs:
print k,":",v
def print_r(obj, ret=False):
if not ret:
if DEBUG:
pprint.pprint(obj)
return pprint.pformat(obj)
def get_idle():
idle_ms = int(safe_check_output(*['xprintidle']))
idle_sec = floor(idle_ms / 1000)
return int(idle_sec)
def get_active_desktop():
output = safe_check_output(*['wmctrl', '-d'])
lines = output.split("\n")
# 0 * DG: 1280x720 VP: 0,0 WA: 0,43 2960x857 Personal
for l in lines:
match = RE_WMCTRL_DESKTOP.match(l)
if match:
# print match.groups()
if match.group(2) == '*':
return int(match.group(1)), match.group(7)
def get_xlsclients():
output = safe_check_output(*['xlsclients', '-la'])
"""
Window 0x4600206:
Machine: erm
Name: skype
Command: skype
Instance/Class: skype/Skype"""
clients = {}
for match in RE_XLSCLIENTS.finditer(output):
# print "Match:", match
if match:
# print "groups:", match.groups()
window_id = match.group(1)
clients[window_id] = {
"Window": window_id,
"Machine": match.group(2),
"Name": match.group(3),
"Command": match.group(4),
"Instance_Class": match.group(5)
}
return clients
def safe_check_output(*args, **kwargs):
try:
output = check_output(args)
except Exception as e:
traceback.print_exc()
return ""
return output
def get_open_windows(desktop_number=None, only_active=False):
"""
wmctrl -lp
0x0387af21 3 2893 erm Source of: file:///home/erm/git/activity-logger/reports/2014-11-28/daily.html - Mozilla Firefox
"""
try:
active_pid = check_output(["xdotool",
"getwindowfocus",
"getwindowpid"]).strip()
except CalledProcessError:
active_pid = None
if only_active:
return []
output = safe_check_output(*['wmctrl', '-lp'])
lines = output.split("\n")
# 0 * DG: 1280x720 VP: 0,0 WA: 0,43 2960x857 Personal
# clients = get_xlsclients()
# print_r(clients)
open_windows = []
processed_pids = []
for l in lines:
match = RE_WMCTRL_OPEN_PROGRAMS.match(l)
# print "match:",match
if match:
window_id = match.group(1)
group_desktop_number = int(match.group(2))
pid = match.group(3)
user = match.group(4)
title = match.group(5)
active = bool(active_pid == pid)
# Only process active window
if only_active and not active:
continue
# Only process pids that are on the current desktop
if desktop_number is not None and \
group_desktop_number != desktop_number:
continue
if pid in processed_pids:
# Only process a pid once. Some process have more than 1
# window.
continue
processed_pids.append(pid)
# Convert the title so it'll play nicely with sqlite.
if not isinstance(title, unicode):
title = title.decode("utf-8")
title_lower = title.lower()
for find, replace in REPLACE_RULES:
if isinstance(find, str):
if find.lower() in title_lower:
# print "REPLACE:", title, "=>", replace
title = replace
break
elif find.match(title):
# print "REPLACE:", title, "=>", replace
title = replace
break
proc_path = os.path.join("/proc", pid)
exe_path = os.path.join(proc_path, 'exe')
cmd_path = os.path.join(proc_path, 'cmdline')
realpath = os.path.realpath(exe_path)
command_line = "error"
try:
with open(cmd_path,'r') as fp:
command_line = fp.read()
command_line = command_line.rstrip("\x00")
except Exception as e:
command_line = ""
traceback.print_exc()
command = os.path.basename(realpath)
window_data = {
"window_id": window_id,
"desktop_number": group_desktop_number,
"pid": pid,
"user": user,
"window_title": title,
"command": command,
"active": active,
"command_line": command_line
}
open_windows.append(window_data)
if only_active:
# Save processing we only need to get the first one
# If only_active is set.
break
return open_windows
def report(today=None, session=None):
if today is None:
today = date.today()
ESC = chr(27)
# print "{ESC}[2J{ESC}[0;0H".format(ESC=ESC)
_print ("*"*20)
created, session = _session(session)
activity = session.query(ActivityLog.workspace,
ActivityLog.command,
func.sum(ActivityLog.seconds))\
.filter(ActivityLog.date == today)\
.group_by(ActivityLog.workspace,
ActivityLog.command)
workspace = ""
total_seconds = 0
for a in activity:
# print_r(a)
if a[0] != workspace:
if total_seconds:
print "%s %s" % (workspace, hh_mm_ss(total_seconds))
workspace = a[0]
total_seconds = 0
print "-=[ %s ]=-" % workspace
print hh_mm_ss(a[2]), a[1]
total_seconds += a[2]
print "%s %s" % (workspace, hh_mm_ss(total_seconds))
spec = and_(ActivityLog.date == today,
ActivityLog.command != "idle")
daily = session.query(ActivityLog.workspace, ActivityLog.date,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.date,
ActivityLog.workspace)
print "By Workspace"
_date = ""
for a in daily:
if _date != a[1]:
_date = a[1]
print "-[ %s ]-" % _date
print hh_mm_ss(a[2]), a[0]
ssr(created, session)
def workspace_active_data(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date == today,
ActivityLog.command != "idle")
cols = ['Workspace', 'Time']
title = "Workspace - Active"
res = session.query(ActivityLog.workspace,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.date,
ActivityLog.workspace)
res = _if_created(created, res, session)
return {
"title": title,
"cols": cols,
"data": res
}
def workspace_command_title_data(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date == today)
cols = ['Workspace', 'Command', 'Title', 'Time']
title = "Workspace, Command and Title"
res = session.query(ActivityLog.workspace,
ActivityLog.command,
ActivityLog.title,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace,
ActivityLog.command,
ActivityLog.title)
res = _if_created(created, res, session)
return {
"title": title,
"cols": cols,
"data": res
}
def workspace_command_data(today=None, session=None):
today = _today(today)
spec = and_(ActivityLog.date == today)
cols = ['Workspace', 'Command', 'Time']
title = "Workspace & Command"
created, session = _session(session)
res = session.query(ActivityLog.workspace,
ActivityLog.command,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace,
ActivityLog.command)
res = _if_created(created, res, session)
return {
"title": title,
"cols": cols,
"data": res
}
def workspace_hour_data_active(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date == today,
ActivityLog.command != "idle")
cols = ['Workspace', 'Hour', 'Time']
title = "Workspace & Hour - Active"
res = session.query(ActivityLog.workspace,
ActivityLog.hour,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace,
ActivityLog.hour)
res = _if_created(created, res, session)
return {
"title": title,
"cols": cols,
"data": res
}
def make_dashes(cols):
dashes = []
for col in cols:
dashes.append("-"*len(col))
return dashes
def command_data(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date == today)
cols = ['Command', 'Time']
title = "Command"
res = session.query(ActivityLog.command,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.command)
res = _if_created(created, res, session)
return {
"title": title,
"cols": cols,
"data": res
}
def print_row(row, cols):
# print "ROW:",row
# print "COLS",cols
data = []
for i, col in enumerate(row):
try:
col_title = cols[i]
except IndexError,err:
col_title = "IndexError:%s index:%i" % (err, i)
if col_title == 'Time':
data.append(hh_mm_ss(col))
continue
if col_title == 'Hour':
data.append("%02d:00" % col)
continue
if col_title == 'Command':
parts = col.split("\x00")
basename = os.path.basename(parts[0])
space_parts = basename.split(" ")
basename = space_parts[0]
if basename in ('python', 'python2', 'python3', 'bash', 'sh',
'ruby', 'perl'):
# u'/usr/bin/python\x00/usr/bin/terminator' basename:u'python'
try:
basename = os.path.basename(parts[1])
except IndexError:
pass
# scontinue
data.append("%s" % (basename,))
continue
_str = "%s" % col
_str = _str.replace("|", "|")
data.append(_str)
return " | ".join(data)
def get_week_bounds(today=None, session=None):
today = _today(today)
created, session = _session(session)
# created, session = _session(session)
# For example, 2004 begins on a Thursday, so the first week of ISO year 2004 begins on Monday, 29 Dec 2003 and ends on Sunday, 4 Jan 2004, so that date(2003, 12, 29).isocalendar() == (2004, 1, 1) and date(2004, 1, 4).isocalendar() == (2004, 1, 7).
one_day = timedelta(days=1)
first_day = deepcopy(today)
last_day = deepcopy(today)
_print ("today:", today)
if first_day.strftime("%w") == "0":
# The first day is Sunday
return first_day, (last_day + timedelta(days=6))
while weeks_match((first_day - one_day), today):
# count down days until the week changes
first_day = first_day - one_day
_print ((first_day - one_day).strftime("%W") == today.strftime("%W"),\
(first_day - one_day).strftime("%W"),' == ', today.strftime("%W"))
while weeks_match((last_day + one_day), today):
# count up days until the week changes
last_day = last_day + one_day
_print ((last_day + one_day).strftime("%W") == today.strftime("%W"),
(last_day + one_day).strftime("%W"),' == ', today.strftime("%W"))
if first_day.strftime("%w") != "0":
first_day = first_day - one_day
last_day = last_day - one_day
_print("first_day:", first_day.strftime("%c"))
_print("last_day:", last_day.strftime("%c"))
ssr(created, session)
return first_day, last_day
def date_key(date, today=None):
today = _today(today)
if date is None:
return ""
if date != today:
return date.strftime(
"<a href='../%Y-%m-%d/daily.html'>%Y-%m-%d %a</a>")
return date.strftime("<a href='../%Y-%m-%d/daily.html'><b>%Y-%m-%d %a</b></a>")
def weekly_breakdown(today=None, session=None):
today = _today(today)
created, session = _session(session)
low, high = get_week_bounds(today=today)
_print("low:", low)
_print("high:", high)
spec = and_(ActivityLog.command != "idle",
ActivityLog.date >= low,
ActivityLog.date <= high)
cols = ['Workspace']
dates = []
date_data = {}
numdays = 7
date_list = [low + timedelta(days=x) for x in range(0, numdays)]
date_list = sorted(date_list)
_print ("date_list:", date_list)
for d in date_list:
_date = date_key(d, today)
if _date not in dates:
dates.append(_date)
if _date not in cols:
cols.append(_date)
cols.append('Total')
title = "Weekly - Active"
res = session.query(ActivityLog.workspace,
ActivityLog.date,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace,
ActivityLog.date)\
.order_by(ActivityLog.date.asc(), ActivityLog.workspace.asc())
totals = {}
for r in res:
_date = date_key(r.date, today)
if r.workspace not in date_data:
date_data[r.workspace] = {}
print_r(r)
date_data[r.workspace][_date] = "%s" % (hh_mm_ss(r[2]),)
if r.date == today:
date_data[r.workspace][_date] = "<b>%s</b>" % (hh_mm_ss(r[2]), )
if r.workspace not in totals:
totals[r.workspace] = 0
totals[r.workspace] += r[2]
for workspace in date_data:
for _date in dates:
if _date not in date_data[workspace]:
date_data[workspace][_date] = " "
for d in date_list:
_date = date_key(d, today)
if _date not in date_data[workspace]:
date_data[workspace][_date] = " "
"""
{u'Hacking': {'2014-11-28 Fri': '00:00:40', '2014-11-29 Sat': ' '},
u'Personal': {'2014-11-28 Fri': '05:01:05', '2014-11-29 Sat': '00:40:20'},
u'Programing': {'2014-11-28 Fri': '02:51:50', '2014-11-29 Sat': ' '},
u'Sesamii': {'2014-11-28 Fri': '00:34:15', '2014-11-29 Sat': ' '},
u'Task': {'2014-11-28 Fri': '01:58:05', '2014-11-29 Sat': '00:54:20'}}
"""
print_r(date_data)
date_data_formatted = []
for workspace in date_data:
row = [workspace]
keys = date_data[workspace].keys()
keys = sorted(keys)
_print ("keys:")
print_r(keys)
for _date in keys:
row.append(date_data[workspace][_date])
row.append(hh_mm_ss(totals[workspace]))
date_data_formatted.append(row)
_print (" | ".join(row))
print_r(date_data_formatted)
"""
GOAL
| Sun | Mon | Tue | Wed | Thru | Fri | Sat
------------| ----- | --- | --- | --- | ---- | --- | ---
workspace 1 | 00:00 |
workspace 2 |
workspace 3 |
"""
_print ("COLS")
print_r(cols)
ssr(created, session)
return {
"title": title,
"cols": cols,
"data": date_data_formatted
}
def get_yesterday(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date < today)
res = session.query(ActivityLog.date)\
.filter(spec)\
.group_by(ActivityLog.date)\
.order_by(ActivityLog.date.desc())\
.first()
if res:
_print ("yesterday:", res[0])
yesterday = res[0]
ssr(created, session)
return yesterday
ssr(created, session)
return None
def get_tomorrow(today=None, session=None):
today = _today(today)
created, session = _session(session)
spec = and_(ActivityLog.date > today)
res = session.query(ActivityLog.date)\
.filter(spec)\
.group_by(ActivityLog.date)\
.order_by(ActivityLog.date.asc())\
.first()
if res:
_print ("tomorrow:", res[0])
tomorrow = res[0]
ssr(created, session)
return tomorrow
# _today = get.today()
# one_day = timedelta(days=1)
# tomorrow = today + one_day
ssr(created, session)
return None
def get_next_week(today=None, session=None):
today = _today(today)
created, session = _session(session)
next_week = today + timedelta(days=7)
first, last = get_week_bounds(next_week, session=session)
first = first - timedelta(days=1)
day_with_activity = get_tomorrow(first, session=session)
ssr(created, session)
return day_with_activity
def get_last_week(today=None, session=None):
today = _today(today)
created, session = _session(session)
last_week = today - timedelta(days=7)
first, last = get_week_bounds(last_week, session=session)
first = first - timedelta(days=1)
day_with_activity = get_tomorrow(first)
if day_with_activity == today:
return None
return day_with_activity
def weeks_match(date1, date2):
if not isinstance(date1, date) or not isinstance(date2, date):
return False
if date1 == date2:
return True
return date1.strftime("%W") == date2.strftime("%W")
def get_all_days_with_activity(session=None):
created, session = _session(session)
low_date = session.query(ActivityLog.date)\
.order_by(ActivityLog.date.asc())\
.first()
high_date = session.query(ActivityLog.date)\
.order_by(ActivityLog.date.desc())\
.first()
if not low_date or not high_date:
ssr(created, session)
return []
low, end_date = get_week_bounds(high_date[0], session=session)
days = []
_date = low_date.date
while _date <= end_date:
days.append(_date)
_date += timedelta(days=1)
ssr(created, session)
return days
def monthly_breakdown(today=None, session=None):
created, session = _session(session)
_really_today = _today()
today = _today(today)
link_format = "<div class='{css_class}'><a href='/{date}/'>{day}<br>{day_data}</a></div>"
title = "Month Activity - %s" % today.strftime("%b %Y")
cols = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
rows = []
row = []
cal = calendar.Calendar(6)
cnt = 0
for t in cal.itermonthdates(today.year, today.month):
spec = and_(ActivityLog.date == t,
ActivityLog.command != 'idle')
res = session.query(ActivityLog.date,
ActivityLog.workspace,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace)\
.order_by(ActivityLog.workspace)
if cnt % 7 == 0 and len(row) != 0:
rows.append(row)
row = []
day_data = []
print("RES:",res)
for r in res:
if r[2] > 0:
contents = "{workspace} <span class='pull-right'>{hh_mm_ss}</span>".format(
workspace=r[1],
hh_mm_ss=hh_mm_ss(r[2])
)
day_data.append(contents)
css_class = ["day"]
if t == _really_today:
css_class.append('really_today')
if t == today:
css_class.append("today")
link = link_format.format(
date="%s" % t,
day="%s" % t.day,
day_data="%s" % "<br>".join(day_data),
css_class=" ".join(css_class)
)
row.append(link)
cnt += 1
rows.append(row)
ssr(created, session)
return {
"title": title,
"cols": cols,
"data": rows
}
def monthly_summary(today=None, session=None):
_print("Monthly Summary")
created, session = _session(session)
today = _today(today)
first = None
last = None
cal = calendar.Calendar(6)
for t in cal.itermonthdates(today.year, today.month):
if first is None:
first = t
last = t
_print("first:", first)
_print("last:", last)
spec = and_(ActivityLog.date >= first,
ActivityLog.date <= last)
res = session.query(ActivityLog.workspace,
func.sum(ActivityLog.seconds))\
.filter(spec)\
.group_by(ActivityLog.workspace)\
.order_by(ActivityLog.workspace.asc())
cols = ['Workspace', 'total']
rows = []
for r in res:
row = [r[0], hh_mm_ss(r[1])]
rows.append(row)
title = "Summary %s" % today.strftime("%b %Y")
ssr(created, session)
return {
"title": title,
"cols": cols,
"data": rows
}
def get_html(today=None, session=None, template="index.html"):
today = _today(today)
really_today = _today()
created, session = _session(session)
kwargs = {
"today": today,
"session": session
}
last_month = {
"today": today - timedelta(days=31),
"session": session
}
by = []
by.append(weekly_breakdown(**kwargs))
by.append(monthly_breakdown(**kwargs))
by.append(monthly_summary(**kwargs))
by.append(monthly_breakdown(**last_month))
by.append(monthly_summary(**last_month))
by.append(workspace_active_data(**kwargs))
by.append(workspace_hour_data_active(**kwargs))
by.append(workspace_command_data(**kwargs))
by.append(command_data(**kwargs))
by.append(workspace_command_title_data(**kwargs))
# print_r(weekly_breakdown())
mylookup = TemplateLookup(directories=['templates'],
output_encoding='utf-8',
encoding_errors='replace')
_template = mylookup.get_template(template)
yesterday = get_yesterday(today=today)
tomorrow = get_tomorrow(today=today)
title = "Daily Activity %s" % today
next_week = get_next_week(today, session=session)
last_week = get_last_week(today, session=session)
html = _template.render(title=title,
hh_mm_ss=hh_mm_ss,
by=by,
basename=os.path.basename,
make_dashes=make_dashes,
print_row=print_row,
yesterday=yesterday,
today=today,
tomorrow=tomorrow,
last_week=last_week,
next_week=next_week,
really_today=really_today)
return html
def write_report(today=None, session=None):
today = _today(today)
created, session = _session(session)
html = get_html(today, session, "daily.html")
report_dir = os.path.join("reports/","%s" % today)
report_file = os.path.join(report_dir, "daily.html")
if not os.path.exists(report_dir):
os.makedirs(report_dir, 0o777)
with open(report_file, 'w') as fp:
fp.write(html)
ssr(created, session)
def hh_mm_ss(s):
h = floor(s / 3600.0)
s = s - (h * 3600)
m = floor(s / 60.0)
s = s - (m * 60)
return "%02d:%02d:%02d" % (h,m,s)
def log_append_activity(current_activity, session=None):
created, session = _session(session)
# print "send to sql"
# print "current_activity:"
# print_r(current_activity)
try:
command_line = current_activity['active_window'][0]["command_line"]
title = current_activity['active_window'][0]["window_title"]
except:
command_line = "None"
title = "None"
try:
workspace = current_activity['workspace']
except:
workspace = "None"
spec = {
"date": date.today(),
"hour": datetime.now().hour,
"workspace": workspace,
"command": command_line,
"title": title
}
activity_log = session.query(ActivityLog).filter_by(**spec).first()
if not activity_log:
spec["seconds"] = TIME_BETWEEN_CHECKS
activity_log = ActivityLog(**spec)
else:
activity_log.seconds += TIME_BETWEEN_CHECKS
# print activity_log
session.add(activity_log)
session.commit()
ssr(created, session)
def log_loop():
while True:
now = datetime.now()
sleep_until = now + timedelta(seconds=TIME_BETWEEN_CHECKS)
now = now.replace(microsecond=0)
print "%s[ %s ]%s" % ("="*50, now, "="*50)
created, session = _session()
# write_report()
idle_sec = get_idle()
active_desktop_number, active_desktop = get_active_desktop()
active_windows = get_open_windows(active_desktop_number, True)
# print "active_desktop_number:", active_desktop_number
if idle_sec >= IDLE_THRESHOLD:
# print "idle_sec:", idle_sec
log_append_activity({
"workspace": active_desktop,
"active_window": [{
"command": "idle",
"window_title": "idle",
"command_line": "idle",
"title": "idle"
}]
}, session=session)
else:
log_append_activity({
"workspace": active_desktop,
"active_window": active_windows
}, session=session)
report(session=session)
ssr(created, session)
print "%s[ /%s ]%s" % ("="*50, now, "="*50)
time_to_sleep = sleep_until - datetime.now()
if time_to_sleep.total_seconds() > 0:
_print("time_to_sleep:", time_to_sleep.total_seconds())
sleep(time_to_sleep.total_seconds())
IDLE_THRESHOLD = 90
DEBUG = False
TIME_BETWEEN_CHECKS = 10
# string/regex, replacement
REPLACE_RULES = [
(re.compile("Inbox \(\d+\) .* Gmail"), "Inbox - Gmail"),
("(Private Browsing)", "--hidden--"),
("banking", "--hidden--"),
]
if os.path.exists('config_local.py'):
import config_local
if hasattr(config_local, 'IDLE_THRESHOLD'):
IDLE_THRESHOLD = config_local.IDLE_THRESHOLD
if hasattr(config_local, 'DEBUG'):
DEBUG = config_local.DEBUG
if hasattr(config_local, 'TIME_BETWEEN_CHECKS'):
TIME_BETWEEN_CHECKS = config_local.TIME_BETWEEN_CHECKS
if hasattr(config_local, 'REPLACE_RULES'):
for rule in config_local.REPLACE_RULES:
if rule not in REPLACE_RULES:
REPLACE_RULES.append(rule)
_print ("IDLE_THRESHOLD:",IDLE_THRESHOLD)
_print ("DEBUG:", DEBUG)
_print ("TIME_BETWEEN_CHECKS:", TIME_BETWEEN_CHECKS)
"""
days = get_all_days_with_activity()
days = []
for d in days:
print d
# write_report(d)
"""
pid_handler.pid_file = LOCK_FILE
if not pid_handler.is_running():
_print("="*100)
_print("STARTING THREAD")
_print ("="*100)
pid_handler.write_pid()
thread.start_new_thread(log_loop, ())
else:
_print("="*100)
_print("NOT STARTING THREAD")
_print ("="*100)
today = _today()
_print ("sys.argv", sys.argv)
# write_report()
def app_run():
app.run(port=5001, debug=DEBUG)
if __name__ == "__main__":
app_run()
|
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from copy import copy
from six import add_metaclass, iteritems
from six.moves import map
from .exceptions import UnknownDslObject, ValidationException
SKIP_VALUES = ("", None)
EXPAND__TO_DOT = True
DOC_META_FIELDS = frozenset(
(
"id",
"routing",
)
)
META_FIELDS = frozenset(
(
# Elasticsearch metadata fields, except 'type'
"index",
"using",
"score",
"version",
"seq_no",
"primary_term",
)
).union(DOC_META_FIELDS)
def _wrap(val, obj_wrapper=None):
if isinstance(val, collections_abc.Mapping):
return AttrDict(val) if obj_wrapper is None else obj_wrapper(val)
if isinstance(val, list):
return AttrList(val)
return val
class AttrList(object):
def __init__(self, l, obj_wrapper=None):
# make iterables into lists
if not isinstance(l, list):
l = list(l)
self._l_ = l
self._obj_wrapper = obj_wrapper
def __repr__(self):
return repr(self._l_)
def __eq__(self, other):
if isinstance(other, AttrList):
return other._l_ == self._l_
# make sure we still equal to a dict with the same data
return other == self._l_
def __ne__(self, other):
return not self == other
def __getitem__(self, k):
l = self._l_[k]
if isinstance(k, slice):
return AttrList(l, obj_wrapper=self._obj_wrapper)
return _wrap(l, self._obj_wrapper)
def __setitem__(self, k, value):
self._l_[k] = value
def __iter__(self):
return map(lambda i: _wrap(i, self._obj_wrapper), self._l_)
def __len__(self):
return len(self._l_)
def __nonzero__(self):
return bool(self._l_)
__bool__ = __nonzero__
def __getattr__(self, name):
return getattr(self._l_, name)
def __getstate__(self):
return self._l_, self._obj_wrapper
def __setstate__(self, state):
self._l_, self._obj_wrapper = state
class AttrDict(object):
"""
Helper class to provide attribute like access (read and write) to
dictionaries. Used to provide a convenient way to access both results and
nested dsl dicts.
"""
def __init__(self, d):
# assign the inner dict manually to prevent __setattr__ from firing
super(AttrDict, self).__setattr__("_d_", d)
def __contains__(self, key):
return key in self._d_
def __nonzero__(self):
return bool(self._d_)
__bool__ = __nonzero__
def __dir__(self):
# introspection for auto-complete in IPython etc
return list(self._d_.keys())
def __eq__(self, other):
if isinstance(other, AttrDict):
return other._d_ == self._d_
# make sure we still equal to a dict with the same data
return other == self._d_
def __ne__(self, other):
return not self == other
def __repr__(self):
r = repr(self._d_)
if len(r) > 60:
r = r[:60] + "...}"
return r
def __getstate__(self):
return (self._d_,)
def __setstate__(self, state):
super(AttrDict, self).__setattr__("_d_", state[0])
def __getattr__(self, attr_name):
try:
return self.__getitem__(attr_name)
except KeyError:
raise AttributeError(
"{!r} object has no attribute {!r}".format(
self.__class__.__name__, attr_name
)
)
def __delattr__(self, attr_name):
try:
del self._d_[attr_name]
except KeyError:
raise AttributeError(
"{!r} object has no attribute {!r}".format(
self.__class__.__name__, attr_name
)
)
def __getitem__(self, key):
return _wrap(self._d_[key])
def __setitem__(self, key, value):
self._d_[key] = value
def __delitem__(self, key):
del self._d_[key]
def __setattr__(self, name, value):
if name in self._d_ or not hasattr(self.__class__, name):
self._d_[name] = value
else:
# there is an attribute on the class (could be property, ..) - don't add it as field
super(AttrDict, self).__setattr__(name, value)
def __iter__(self):
return iter(self._d_)
def to_dict(self):
return self._d_
class DslMeta(type):
"""
Base Metaclass for DslBase subclasses that builds a registry of all classes
for given DslBase subclass (== all the query types for the Query subclass
of DslBase).
It then uses the information from that registry (as well as `name` and
`shortcut` attributes from the base class) to construct any subclass based
on it's name.
For typical use see `QueryMeta` and `Query` in `elasticsearch_dsl.query`.
"""
_types = {}
def __init__(cls, name, bases, attrs):
super(DslMeta, cls).__init__(name, bases, attrs)
# skip for DslBase
if not hasattr(cls, "_type_shortcut"):
return
if cls.name is None:
# abstract base class, register it's shortcut
cls._types[cls._type_name] = cls._type_shortcut
# and create a registry for subclasses
if not hasattr(cls, "_classes"):
cls._classes = {}
elif cls.name not in cls._classes:
# normal class, register it
cls._classes[cls.name] = cls
@classmethod
def get_dsl_type(cls, name):
try:
return cls._types[name]
except KeyError:
raise UnknownDslObject("DSL type %s does not exist." % name)
@add_metaclass(DslMeta)
class DslBase(object):
"""
Base class for all DSL objects - queries, filters, aggregations etc. Wraps
a dictionary representing the object's json.
Provides several feature:
- attribute access to the wrapped dictionary (.field instead of ['field'])
- _clone method returning a copy of self
- to_dict method to serialize into dict (to be sent via elasticsearch-py)
- basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO:
move into a class specific for Query/Filter
- respects the definition of the class and (de)serializes it's
attributes based on the `_param_defs` definition (for example turning
all values in the `must` attribute into Query objects)
"""
_param_defs = {}
@classmethod
def get_dsl_class(cls, name, default=None):
try:
return cls._classes[name]
except KeyError:
if default is not None:
return cls._classes[default]
raise UnknownDslObject(
"DSL class `{}` does not exist in {}.".format(name, cls._type_name)
)
def __init__(self, _expand__to_dot=EXPAND__TO_DOT, **params):
self._params = {}
for pname, pvalue in iteritems(params):
if "__" in pname and _expand__to_dot:
pname = pname.replace("__", ".")
self._setattr(pname, pvalue)
def _repr_params(self):
"""Produce a repr of all our parameters to be used in __repr__."""
return ", ".join(
"{}={!r}".format(n.replace(".", "__"), v)
for (n, v) in sorted(iteritems(self._params))
# make sure we don't include empty typed params
if "type" not in self._param_defs.get(n, {}) or v
)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._repr_params())
def __eq__(self, other):
return isinstance(other, self.__class__) and other.to_dict() == self.to_dict()
def __ne__(self, other):
return not self == other
def __setattr__(self, name, value):
if name.startswith("_"):
return super(DslBase, self).__setattr__(name, value)
return self._setattr(name, value)
def _setattr(self, name, value):
# if this attribute has special type assigned to it...
if name in self._param_defs:
pinfo = self._param_defs[name]
if "type" in pinfo:
# get the shortcut used to construct this type (query.Q, aggs.A, etc)
shortcut = self.__class__.get_dsl_type(pinfo["type"])
# list of dict(name -> DslBase)
if pinfo.get("multi") and pinfo.get("hash"):
if not isinstance(value, (tuple, list)):
value = (value,)
value = list(
{k: shortcut(v) for (k, v) in iteritems(obj)} for obj in value
)
elif pinfo.get("multi"):
if not isinstance(value, (tuple, list)):
value = (value,)
value = list(map(shortcut, value))
# dict(name -> DslBase), make sure we pickup all the objs
elif pinfo.get("hash"):
value = {k: shortcut(v) for (k, v) in iteritems(value)}
# single value object, just convert
else:
value = shortcut(value)
self._params[name] = value
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(
"{!r} object has no attribute {!r}".format(
self.__class__.__name__, name
)
)
value = None
try:
value = self._params[name]
except KeyError:
# compound types should never throw AttributeError and return empty
# container instead
if name in self._param_defs:
pinfo = self._param_defs[name]
if pinfo.get("multi"):
value = self._params.setdefault(name, [])
elif pinfo.get("hash"):
value = self._params.setdefault(name, {})
if value is None:
raise AttributeError(
"{!r} object has no attribute {!r}".format(
self.__class__.__name__, name
)
)
# wrap nested dicts in AttrDict for convenient access
if isinstance(value, collections_abc.Mapping):
return AttrDict(value)
return value
def to_dict(self):
"""
Serialize the DSL object to plain dict
"""
d = {}
for pname, value in iteritems(self._params):
pinfo = self._param_defs.get(pname)
# typed param
if pinfo and "type" in pinfo:
# don't serialize empty lists and dicts for typed fields
if value in ({}, []):
continue
# list of dict(name -> DslBase)
if pinfo.get("multi") and pinfo.get("hash"):
value = list(
{k: v.to_dict() for k, v in iteritems(obj)} for obj in value
)
# multi-values are serialized as list of dicts
elif pinfo.get("multi"):
value = list(map(lambda x: x.to_dict(), value))
# squash all the hash values into one dict
elif pinfo.get("hash"):
value = {k: v.to_dict() for k, v in iteritems(value)}
# serialize single values
else:
value = value.to_dict()
# serialize anything with to_dict method
elif hasattr(value, "to_dict"):
value = value.to_dict()
d[pname] = value
return {self.name: d}
def _clone(self):
c = self.__class__()
for attr in self._params:
c._params[attr] = copy(self._params[attr])
return c
class HitMeta(AttrDict):
def __init__(self, document, exclude=("_source", "_fields")):
d = {
k[1:] if k.startswith("_") else k: v
for (k, v) in iteritems(document)
if k not in exclude
}
if "type" in d:
# make sure we are consistent everywhere in python
d["doc_type"] = d.pop("type")
super(HitMeta, self).__init__(d)
class ObjectBase(AttrDict):
def __init__(self, meta=None, **kwargs):
meta = meta or {}
for k in list(kwargs):
if k.startswith("_") and k[1:] in META_FIELDS:
meta[k] = kwargs.pop(k)
super(AttrDict, self).__setattr__("meta", HitMeta(meta))
super(ObjectBase, self).__init__(kwargs)
@classmethod
def __list_fields(cls):
"""
Get all the fields defined for our class, if we have an Index, try
looking at the index mappings as well, mark the fields from Index as
optional.
"""
for name in cls._doc_type.mapping:
field = cls._doc_type.mapping[name]
yield name, field, False
if hasattr(cls.__class__, "_index"):
if not cls._index._mapping:
return
for name in cls._index._mapping:
# don't return fields that are in _doc_type
if name in cls._doc_type.mapping:
continue
field = cls._index._mapping[name]
yield name, field, True
@classmethod
def __get_field(cls, name):
try:
return cls._doc_type.mapping[name]
except KeyError:
# fallback to fields on the Index
if hasattr(cls, "_index") and cls._index._mapping:
try:
return cls._index._mapping[name]
except KeyError:
pass
@classmethod
def from_es(cls, hit):
meta = hit.copy()
data = meta.pop("_source", {})
doc = cls(meta=meta)
doc._from_dict(data)
return doc
def _from_dict(self, data):
for k, v in iteritems(data):
f = self.__get_field(k)
if f and f._coerce:
v = f.deserialize(v)
setattr(self, k, v)
def __getstate__(self):
return self.to_dict(), self.meta._d_
def __setstate__(self, state):
data, meta = state
super(AttrDict, self).__setattr__("_d_", {})
super(AttrDict, self).__setattr__("meta", HitMeta(meta))
self._from_dict(data)
def __getattr__(self, name):
try:
return super(ObjectBase, self).__getattr__(name)
except AttributeError:
f = self.__get_field(name)
if hasattr(f, "empty"):
value = f.empty()
if value not in SKIP_VALUES:
setattr(self, name, value)
value = getattr(self, name)
return value
raise
def to_dict(self, skip_empty=True):
out = {}
for k, v in iteritems(self._d_):
# if this is a mapped field,
f = self.__get_field(k)
if f and f._coerce:
v = f.serialize(v)
# if someone assigned AttrList, unwrap it
if isinstance(v, AttrList):
v = v._l_
if skip_empty:
# don't serialize empty values
# careful not to include numeric zeros
if v in ([], {}, None):
continue
out[k] = v
return out
def clean_fields(self):
errors = {}
for name, field, optional in self.__list_fields():
data = self._d_.get(name, None)
if data is None and optional:
continue
try:
# save the cleaned value
data = field.clean(data)
except ValidationException as e:
errors.setdefault(name, []).append(e)
if name in self._d_ or data not in ([], {}, None):
self._d_[name] = data
if errors:
raise ValidationException(errors)
def clean(self):
pass
def full_clean(self):
self.clean_fields()
self.clean()
def merge(data, new_data, raise_on_conflict=False):
if not (
isinstance(data, (AttrDict, collections_abc.Mapping))
and isinstance(new_data, (AttrDict, collections_abc.Mapping))
):
raise ValueError(
"You can only merge two dicts! Got {!r} and {!r} instead.".format(
data, new_data
)
)
for key, value in iteritems(new_data):
if (
key in data
and isinstance(data[key], (AttrDict, collections_abc.Mapping))
and isinstance(value, (AttrDict, collections_abc.Mapping))
):
merge(data[key], value, raise_on_conflict)
elif key in data and data[key] != value and raise_on_conflict:
raise ValueError("Incompatible data for key %r, cannot be merged." % key)
else:
data[key] = value
def recursive_to_dict(data):
"""Recursively transform objects that potentially have .to_dict()
into dictionary literals by traversing AttrList, AttrDict, list,
tuple, and Mapping types.
"""
if isinstance(data, AttrList):
data = list(data._l_)
elif hasattr(data, "to_dict"):
data = data.to_dict()
if isinstance(data, (list, tuple)):
return type(data)(recursive_to_dict(inner) for inner in data)
elif isinstance(data, collections_abc.Mapping):
return {key: recursive_to_dict(val) for key, val in data.items()}
return data
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable=g-bad-name
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
from graphy.backends import google_chart_api
try:
import json
except ImportError:
import simplejson as json
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from mapreduce import context
from mapreduce import hooks
from mapreduce import json_util
from mapreduce import util
# pylint: disable=protected-access
# Special datastore kinds for MR.
_MAP_REDUCE_KINDS = ("_AE_MR_MapreduceControl",
"_AE_MR_MapreduceState",
"_AE_MR_ShardState",
"_AE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
# Leave some wiggle room for headers and other fields.
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
# Update version when payload handling is changed
# in a backward incompatible way.
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
# TODO(user): Find a more space efficient way than urlencoding.
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
# Payload is small. Don't bother with anything.
if not compressed_payload:
self._payload = payload_str
# Compressed payload is small. Don't bother with datastore.
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
# Store payload in the datastore.
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name, default=0):
"""Get current counter value.
Args:
counter_name: counter name as string.
default: default value if one doesn't exist.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, default)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
def to_dict(self):
"""Convert to dictionary.
Returns:
a dictionary with counter name as key and counter values as value.
"""
return self.counters
class MapperSpec(json_util.JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
"""
self.handler_spec = handler_spec
self.input_reader_spec = input_reader_spec
self.output_writer_spec = output_writer_spec
self.shard_count = int(shard_count)
self.params = params
def get_handler(self):
"""Get mapper handler instance.
This always creates a new instance of the handler. If the handler is a
callable instance, MR only wants to create a new instance at the
beginning of a shard or shard retry. The pickled callable instance
should be accessed from TransientShardState.
Returns:
handler instance as callable.
"""
return util.handler_for_name(self.handler_spec)
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def output_writer_class(self):
"""Get output writer class.
Returns:
output writer class object.
"""
return self.output_writer_spec and util.for_name(self.output_writer_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
result = {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count
}
if self.output_writer_spec:
result["mapper_output_writer"] = self.output_writer_spec
return result
def __str__(self):
return "MapperSpec(%s, %s, %s, %s)" % (
self.handler_spec, self.input_reader_spec, self.params,
self.shard_count)
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"],
json.get("mapper_output_writer")
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
class MapreduceSpec(json_util.JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params={},
hooks_class_name=None):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
self.hooks_class_name = hooks_class_name
self.__hooks = None
self.get_hooks() # Fail fast on an invalid hook class.
def get_hooks(self):
"""Returns a hooks.Hooks class or None if no hooks class has been set."""
if self.__hooks is None and self.hooks_class_name is not None:
hooks_class = util.for_name(self.hooks_class_name)
if not isinstance(hooks_class, type):
raise ValueError("hooks_class_name must refer to a class, got %s" %
type(hooks_class).__name__)
if not issubclass(hooks_class, hooks.Hooks):
raise ValueError(
"hooks_class_name must refer to a hooks.Hooks subclass")
self.__hooks = hooks_class(self)
return self.__hooks
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
"hooks_class_name": self.hooks_class_name,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
def __str__(self):
return str(self.to_json())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
@classmethod
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if this MR is still running.
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing. This starts as 0,
then set by KickOffJob handler to be the actual number of input
readers after input splitting, and is updated by Controller task
as shards finish.
start_time: When the job started.
writer_state: Json property to be used by writer to store its state.
This is filled when single output per job. Will be deprecated.
Use OutputWriter.get_filenames instead.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
# TODO(user): Replace mapreduce_spec with job_config.
mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
writer_state = json_util.JsonProperty(dict, indexed=False)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
result_status = db.StringProperty(required=False, choices=_RESULTS)
# For UI purposes only.
chart_url = db.TextProperty(default="")
chart_width = db.IntegerProperty(default=300, indexed=False)
sparkline_url = db.TextProperty(default="")
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceState"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), str(mapreduce_id))
@classmethod
def get_by_job_id(cls, mapreduce_id):
"""Retrieves the instance of state for a Job.
Args:
mapreduce_id: The mapreduce job to retrieve.
Returns:
instance of MapreduceState for passed id.
"""
return db.get(cls.get_key_by_job_id(mapreduce_id))
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
shard_count = len(shards_processed)
if shards_processed:
# Only 16 labels on the whole chart.
stride_length = max(1, shard_count / 16)
chart.bottom.labels = []
for x in xrange(shard_count):
if (x % stride_length == 0 or
x == shard_count - 1):
chart.bottom.labels.append(x)
else:
chart.bottom.labels.append("")
chart.left.labels = ["0", str(max(shards_processed))]
chart.left.min = 0
self.chart_width = min(700, max(300, shard_count * 20))
self.chart_url = chart.display.Url(self.chart_width, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
"""
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([])
return state
@staticmethod
def new_mapreduce_id():
"""Generate new mapreduce id."""
return util._get_descending_key()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
class TransientShardState(object):
"""A shard's states that are kept in task payload.
TransientShardState holds two types of states:
1. Some states just don't need to be saved to datastore. e.g.
serialized input reader and output writer instances.
2. Some states are duplicated from datastore, e.g. slice_id, shard_id.
These are used to validate the task.
"""
def __init__(self,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
initial_input_reader,
output_writer=None,
retries=0,
handler=None):
"""Init.
Args:
base_path: base path of this mapreduce job. Deprecated.
mapreduce_spec: an instance of MapReduceSpec.
shard_id: shard id.
slice_id: slice id. When enqueuing task for the next slice, this number
is incremented by 1.
input_reader: input reader instance for this shard.
initial_input_reader: the input reader instance before any iteration.
Used by shard retry.
output_writer: output writer instance for this shard, if exists.
retries: the number of retries of the current shard. Used to drop
tasks from old retries.
handler: map/reduce handler.
"""
self.base_path = base_path
self.mapreduce_spec = mapreduce_spec
self.shard_id = shard_id
self.slice_id = slice_id
self.input_reader = input_reader
self.initial_input_reader = initial_input_reader
self.output_writer = output_writer
self.retries = retries
self.handler = handler
self._input_reader_json = self.input_reader.to_json()
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler
def advance_for_next_slice(self, recovery_slice=False):
"""Advance relavent states for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
if recovery_slice:
self.slice_id += 2
# Restore input reader to the beginning of the slice.
self.input_reader = self.input_reader.from_json(self._input_reader_json)
else:
self.slice_id += 1
def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result
@classmethod
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = json.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = json.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
json.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler)
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. ShardState key_name is equal to shard_id.
Shard state contains critical state to ensure the correctness of
shard execution. It is the single source of truth about a shard's
progress. For example:
1. A slice is allowed to run only if its payload matches shard state's
expectation.
2. A slice is considered running only if it has acquired the shard's lock.
3. A slice is considered done only if it has successfully committed shard
state to db.
Properties about the shard:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. All counters yielded
within mapreduce are stored here.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
retries: the number of times this shard has been retried.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
writer_state: writer state for this shard. The shard's output writer
instance can save in-memory output references to this field in its
"finalize" method.
Properties about slice management:
slice_id: slice id of current executing slice. A slice's task
will not run unless its slice_id matches this. Initial
value is 0. By the end of slice execution, this number is
incremented by 1.
slice_start_time: a slice updates this to now at the beginning of
execution. If the transaction succeeds, the current task holds
a lease of slice duration + some grace period. During this time, no
other task with the same slice_id will execute. Upon slice failure,
the task should try to unset this value to allow retries to carry on
ASAP.
slice_request_id: the request id that holds/held the lease. When lease has
expired, new request needs to verify that said request has indeed
ended according to logs API. Do this only when lease has expired
because logs API is expensive. This field should always be set/unset
with slice_start_time. It is possible Logs API doesn't log a request
at all or doesn't log the end of a request. So a new request can
proceed after a long conservative timeout.
slice_retries: the number of times a slice has been retried due to
processing data when lock is held. Taskqueue/datastore errors
related to slice/shard management are not counted. This count is
only a lower bound and is used to determined when to fail a slice
completely.
acquired_once: whether the lock for this slice has been acquired at
least once. When this is True, duplicates in outputs are possible.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
# Shard can be in aborted state when user issued abort, or controller
# issued abort because some other shard failed.
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Maximum number of shard states to hold in memory at any time.
_MAX_STATES_IN_MEMORY = 10
# Functional properties.
mapreduce_id = db.StringProperty(required=True)
active = db.BooleanProperty(default=True, indexed=False)
input_finished = db.BooleanProperty(default=False, indexed=False)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
retries = db.IntegerProperty(default=0, indexed=False)
writer_state = json_util.JsonProperty(dict, indexed=False)
slice_id = db.IntegerProperty(default=0, indexed=False)
slice_start_time = db.DateTimeProperty(indexed=False)
slice_request_id = db.ByteStringProperty(indexed=False)
slice_retries = db.IntegerProperty(default=0, indexed=False)
acquired_once = db.BooleanProperty(default=False, indexed=False)
# For UI purposes only.
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def __str__(self):
kv = {"active": self.active,
"slice_id": self.slice_id,
"last_work_item": self.last_work_item,
"update_time": self.update_time}
if self.result_status:
kv["result_status"] = self.result_status
if self.retries:
kv["retries"] = self.retries
if self.slice_start_time:
kv["slice_start_time"] = self.slice_start_time
if self.slice_retries:
kv["slice_retries"] = self.slice_retries
if self.slice_request_id:
kv["slice_request_id"] = self.slice_request_id
if self.acquired_once:
kv["acquired_once"] = self.acquired_once
keys = kv.keys()
keys.sort()
result = "ShardState is {"
for k in keys:
result += k + ":" + str(kv[k]) + ","
result += "}"
return result
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.input_finished = False
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
def set_for_failure(self):
self.active = False
self.result_status = self.RESULT_FAILED
def set_for_abort(self):
self.active = False
self.result_status = self.RESULT_ABORTED
def set_input_finished(self):
self.input_finished = True
def is_input_finished(self):
return self.input_finished
def set_for_success(self):
self.active = False
self.result_status = self.RESULT_SUCCESS
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_ShardState"
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Deprecated. Use find_all_by_mapreduce_state.
This will be removed after 1.8.9 release.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of ShardStates.
"""
return list(cls.find_all_by_mapreduce_state(mapreduce_state))
@classmethod
def find_all_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Yields:
shard states sorted by shard id.
"""
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
i = 0
while i < len(keys):
@db.non_transactional
def no_tx_get(i):
return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY])
# We need a separate function to so that we can mix non-transactional and
# use be a generator
states = no_tx_get(i)
for s in states:
i += 1
if s is not None:
yield s
@classmethod
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
"""Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
"""
if mapreduce_state is None:
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceControl"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs)
class QuerySpec(object):
"""Encapsulates everything about a query needed by DatastoreInputReader."""
DEFAULT_BATCH_SIZE = 50
def __init__(self,
entity_kind,
keys_only=None,
filters=None,
batch_size=None,
model_class_path=None,
app=None,
ns=None):
self.entity_kind = entity_kind
self.keys_only = keys_only or False
self.filters = filters or None
self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE
self.model_class_path = model_class_path
self.app = app
self.ns = ns
def to_json(self):
return {"entity_kind": self.entity_kind,
"keys_only": self.keys_only,
"filters": self.filters,
"batch_size": self.batch_size,
"model_class_path": self.model_class_path,
"app": self.app,
"ns": self.ns}
@classmethod
def from_json(cls, json):
return cls(json["entity_kind"],
json["keys_only"],
json["filters"],
json["batch_size"],
json["model_class_path"],
json["app"],
json["ns"])
|
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
import simplejson
from common.display import prep_stream_dict, prep_entry_list, prep_entry, prep_comment_list, DEFAULT_AVATARS
from common import api
from common import component
from common import exception
from common import decorator
from common import display
from common import google_contacts
from common import mail
from common import memcache
from common import oauth_util
from common import user
from common import util
from common import validate
from common import views as common_views
def join_join(request):
if request.user:
raise exception.AlreadyLoggedInException()
redirect_to = request.REQUEST.get('redirect_to', '/')
# get the submitted vars
nick = request.REQUEST.get('nick', '');
first_name = request.REQUEST.get('first_name', '');
last_name = request.REQUEST.get('last_name', '');
email = request.REQUEST.get('email', '');
password = request.REQUEST.get('password', '');
confirm = request.REQUEST.get('confirm', '');
hide = request.REQUEST.get('hide', '');
if request.POST:
try:
# TODO validate
params = util.query_dict_to_keywords(request.POST)
if hide:
params['privacy'] = 2
validate.email(email)
if not mail.is_allowed_to_send_email_to(email):
raise exception.ValidationError("Cannot send email to that address")
# TODO start transaction
if api.actor_lookup_email(api.ROOT, email):
raise exception.ValidationError(
'That email address is already associated with a member.')
actor_ref = api.user_create(api.ROOT, **params)
actor_ref.access_level = "delete"
api.post(actor_ref,
nick=actor_ref.nick,
message='Joined %s!' % (settings.SITE_NAME),
icon='jaiku-new-user')
# send off email confirmation
api.activation_request_email(actor_ref, actor_ref.nick, email)
# TODO end transaction
welcome_url = util.qsa('/welcome', {'redirect_to': redirect_to})
# NOTE: does not provide a flash message
response = http.HttpResponseRedirect(welcome_url)
user.set_user_cookie(response, actor_ref)
return response
except:
exception.handle_exception(request)
# for legal section
legal_component = component.include('legal', 'dummy_legal')
legal_html = legal_component.embed_join()
# for sidebar
sidebar_green_top = True
area = "join"
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/join.html')
return http.HttpResponse(t.render(c))
@decorator.login_required
def join_welcome(request):
redirect_to = request.REQUEST.get('redirect_to', '/')
next = '/welcome/1'
view = request.user
page = 'start'
area = 'welcome'
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/welcome_%s.html' % page)
return http.HttpResponse(t.render(c))
@decorator.login_required
def join_welcome_photo(request):
next = '/welcome/2'
redirect_to = request.REQUEST.get('redirect_to', '/')
# Welcome pages have a 'Continue' button that should always lead
# to the next page.
success = '/welcome/1'
if 'continue' in request.POST:
success = next
rv = common_views.common_photo_upload(
request,
util.qsa(success, {'redirect_to': redirect_to})
)
if rv:
return rv
# If avatar wasn't changed, just go to next page, if 'Continue' was clicked.
if 'continue' in request.POST:
return http.HttpResponseRedirect(util.qsa(next, {'redirect_to': redirect_to}))
avatars = display.DEFAULT_AVATARS
view = request.user
page = 'photo'
area = 'welcome'
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/welcome_%s.html' % page)
return http.HttpResponse(t.render(c))
@decorator.login_required
def join_welcome_mobile(request):
redirect_to = request.REQUEST.get('redirect_to', '/')
next = '/welcome/3'
try:
if not settings.SMS_ENABLED:
raise exception.FeatureDisabledError('Mobile activation is currently disabled')
except:
exception.handle_exception(request)
mobile = api.mobile_get_actor(request.user, request.user.nick)
# set the progress
welcome_photo = True
view = request.user
page = 'mobile'
area = 'welcome'
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/welcome_%s.html' % page)
return http.HttpResponse(t.render(c))
@decorator.login_required
def join_welcome_contacts(request):
"""
if we have an access token for this user attempt to fetch the contacts
else if we have a request token attempt to get an access token
if we have neither
if we are trying to authorize, grab a request token and redirect to authorize page
else
show the page
"""
redirect_to = request.REQUEST.get('redirect_to', '/')
next = '/welcome/done'
# these are for the find more contacts bits
start_index = int(request.REQUEST.get('index', 1))
max = 100
token = request.REQUEST.get('token')
contacts_more = int(request.REQUEST.get('contacts_more', 0))
# this won't be seen unless contacts_more is positive,
# so no worries about the possible negative value
contacts_so_far = contacts_more - 1
try:
if not settings.GOOGLE_CONTACTS_IMPORT_ENABLED:
raise exception.FeatureDisabledError('Google Contacts import is currently disabled')
if 'lookup_remote_contacts' in request.POST:
validate.nonce(request, 'lookup_remote_contacts')
next_url = util.qsa(util.here(request),
{'redirect_to': redirect_to,
'upgrade_auth_token': '',
'_nonce': util.create_nonce(request.user,
'upgrade_auth_token'),
}
)
auth_url = google_contacts.auth_sub_url(next_url)
return http.HttpResponseRedirect(auth_url)
elif 'actor_add_contacts' in request.POST:
validate.nonce(request, 'actor_add_contacts')
targets = request.POST.getlist('targets')
owner = request.POST.get('owner', '')
rv = api.actor_add_contacts(request.user, owner, targets)
next_url = util.qsa(util.here(request),
{'redirect_to': redirect_to,
'contacts_more': contacts_more,
'index': start_index,
'token': token,
}
)
return util.RedirectFlash(next_url, 'Contacts added.')
elif 'upgrade_auth_token' in request.GET:
validate.nonce(request, 'upgrade_auth_token')
auth_token = google_contacts.auth_sub_token_from_request(request)
session_token = google_contacts.upgrade_to_session_token(auth_token)
next_url = util.qsa(util.here(request),
{'redirect_to': redirect_to,
'fetch_contacts': '',
'token': session_token.get_token_string(),
'_nonce': util.create_nonce(request.user,
'fetch_contacts'),
}
)
return http.HttpResponseRedirect(next_url)
elif 'fetch_contacts' in request.REQUEST:
validate.nonce(request, 'fetch_contacts')
# start_index and max are gathered above
session_token = google_contacts.auth_sub_token_from_request(request)
# check for the "My Contacts" group, otherwise, fetch it
my_contacts = memcache.client.get('%s/my_contacts' % token)
if not my_contacts:
my_contacts = google_contacts.get_system_group(session_token,
'Contacts')
memcache.client.set('%s/my_contacts' % token, my_contacts)
rv, more = google_contacts.get_contacts_emails(session_token,
group=my_contacts,
index=start_index,
max=max)
contacts = []
for name, email in rv:
logging.info('looking up "%s" %s', name, email)
contacts.append(api.actor_lookup_email(request.user, email))
contacts = [x for x in contacts if x]
# for the template
contacts_found = True
contacts_more = more
contacts_so_far = contacts_more - 1
token = session_token.get_token_string()
contacts_emails = rv
# if no contacts were found and more are available, try some more
if not contacts and contacts_more:
next_url = util.qsa(util.here(request),
{'fetch_contacts': '',
'contacts_more': contacts_more,
'index': contacts_more,
'token': token,
'_nonce': util.create_nonce(request.user,
'fetch_contacts'),
'redirect_to': redirect_to,
}
)
# TODO(termie): this can take a really long time, probably not really
# viable until we can do it with javascript
#return util.MetaRefresh(next_url, message='Still working...', second=1)
#return http.HttpResponseRedirect(next_url)
except:
exception.handle_exception(request)
# set the progress
welcome_photo = True
welcome_mobile = True
view = request.user
page = 'contacts'
area = 'welcome'
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/welcome_%s.html' % page)
return http.HttpResponse(t.render(c))
def join_welcome_done(request):
redirect_to = request.REQUEST.get('redirect_to', '/')
# set the progress
welcome_photo = True
welcome_mobile = True
welcome_contacts = True
view = request.user
page = 'done'
area = 'welcome'
c = template.RequestContext(request, locals())
t = loader.get_template('join/templates/welcome_%s.html' % page)
return http.HttpResponse(t.render(c))
|
|
#!/usr/bin/env python
# usage:
# $ fontbakery check-profile fontbakery.profiles.googlefonts -h
import argparse
from importlib import import_module
import os
import sys
from collections import OrderedDict
from fontbakery.checkrunner import (
distribute_generator
, CheckRunner
, get_module_from_file
, DEBUG
, INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
, SECTIONSUMMARY
)
from fontbakery.configuration import Configuration
from fontbakery.profile import (Profile, get_module_profile)
from fontbakery.errors import ValueValidationError
from fontbakery.multiproc import multiprocessing_runner
from fontbakery.reporters.terminal import TerminalReporter
from fontbakery.reporters.serialize import SerializeReporter
from fontbakery.reporters.badge import BadgeReporter
from fontbakery.reporters.ghmarkdown import GHMarkdownReporter
from fontbakery.reporters.html import HTMLReporter
from fontbakery.utils import get_theme
log_levels = OrderedDict((s.name, s) \
for s in sorted((
DEBUG
, INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
)))
DEFAULT_LOG_LEVEL = INFO
class AddReporterAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self.cls = kwargs["cls"]
del kwargs["cls"]
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, "reporters"):
namespace.reporters = []
namespace.reporters.append((self.cls, values))
def ArgumentParser(profile, profile_arg=True):
argument_parser = \
argparse.ArgumentParser(description="Check TTF files against a profile.",
formatter_class=argparse.RawTextHelpFormatter)
if profile_arg:
argument_parser.add_argument('profile',
help='File/Module name,'
' must define a fontbakery "profile".')
values_keys = profile.setup_argparse(argument_parser)
argument_parser.add_argument('--configuration',
dest='configfile',
help='Read configuration file (TOML/YAML).\n')
argument_parser.add_argument(
"-c",
"--checkid",
action="append",
help=(
"Explicit check-ids (or parts of their name) to be executed. "
"Use this option multiple times to select multiple checks."
),
)
argument_parser.add_argument(
"-x",
"--exclude-checkid",
action="append",
help=(
"Exclude check-ids (or parts of their name) from execution. "
"Use this option multiple times to exclude multiple checks."
),
)
valid_keys = ', '.join(log_levels.keys())
def log_levels_get(key):
if key in log_levels:
return log_levels[key]
raise argparse.ArgumentTypeError(f'Key "{key}" must be one of: {valid_keys}.')
argument_parser.add_argument('-v', '--verbose',
dest='loglevels',
const=PASS,
action='append_const',
help='Shortcut for `-l PASS`.\n')
argument_parser.add_argument('-l', '--loglevel',
dest='loglevels',
type=log_levels_get,
action='append',
metavar= 'LOGLEVEL',
help=f'Report checks with a result of this status or higher.\n'
f'One of: {valid_keys}.\n'
f'(default: {DEFAULT_LOG_LEVEL.name})')
argument_parser.add_argument('-m', '--loglevel-messages',
default=None,
type=log_levels_get,
help=f'Report log messages of this status or higher.\n'
f'Messages are all status lines within a check.\n'
f'One of: {valid_keys}.\n'
f'(default: LOGLEVEL)')
argument_parser.add_argument('--succinct',
action='store_true',
help='This is a slightly more compact and succint'
' output layout for the text terminal.')
if sys.platform != "win32":
argument_parser.add_argument('-n', '--no-progress',
action='store_true',
help='In a tty as stdout, don\'t'
' render the progress indicators.')
argument_parser.add_argument('-C', '--no-colors',
action='store_true',
help='No colors for tty output.')
argument_parser.add_argument('-S', '--show-sections', default=False, action='store_true',
help='Show section summaries.')
argument_parser.add_argument('-L', '--list-checks', default=False, action='store_true',
help='List the checks available in the selected profile.')
argument_parser.add_argument('--dark-theme', default=False, action='store_true',
help='Use a color theme with dark colors.')
argument_parser.add_argument('--light-theme', default=False, action='store_true',
help='Use a color theme with light colors.')
argument_parser.add_argument('--json', default=False, action=AddReporterAction, cls=SerializeReporter,
metavar= 'JSON_FILE',
help='Write a json formatted report to JSON_FILE.')
argument_parser.add_argument('--badges', default=False, action=AddReporterAction, cls=BadgeReporter,
metavar= 'DIRECTORY',
help='Write a set of shields.io badge files to DIRECTORY.')
argument_parser.add_argument('--ghmarkdown', default=False, action=AddReporterAction, cls=GHMarkdownReporter,
metavar= 'MD_FILE',
help='Write a GitHub-Markdown formatted report to MD_FILE.')
argument_parser.add_argument('--html', default=False,action=AddReporterAction, cls=HTMLReporter,
metavar= 'HTML_FILE',
help='Write a HTML report to HTML_FILE.')
iterargs = sorted(profile.iterargs.keys())
gather_by_choices = iterargs + ['*check']
comma_separated = ', '.join(gather_by_choices)
argument_parser.add_argument('-g','--gather-by', default=None,
metavar= 'ITERATED_ARG',
choices=gather_by_choices,
type=str,
help=f'Optional: collect results by ITERATED_ARG\n'
f'In terminal output: create a summary counter for each ITERATED_ARG.\n'
f'In json output: structure the document by ITERATED_ARG.\n'
f'One of: {comma_separated}')
def parse_order(arg):
order = filter(len, [n.strip() for n in arg.split(',')])
return order or None
comma_separated = ', '.join(iterargs)
argument_parser.add_argument('-o','--order', default=None, type=parse_order,
help=f'Comma separated list of order arguments.\n'
f'The execution order is determined by the order of the check\n'
f'definitions and by the order of the iterable arguments.\n'
f'A section defines its own order. `--order` can be used to\n'
f'override the order of *all* sections.\n'
f'Despite the ITERATED_ARGS there are two special\n'
f'values available:\n'
f'"*iterargs" -- all remainig ITERATED_ARGS\n'
f'"*check" -- order by check\n'
f'ITERATED_ARGS: {comma_separated}\n'
f'A sections default is equivalent to: "*iterargs, *check".\n'
f'A common use case is `-o "*check"` when checking the whole \n'
f'collection against a selection of checks picked with `--checkid`.')
def positive_int(value):
int_value = int(value)
if int_value < 0:
raise argparse.ArgumentTypeError(f'Invalid value "{value}" must be'
f' zero or a positive integer value.')
return int_value
argument_parser.add_argument('-J','--jobs', default=0, type=positive_int,
metavar='JOBS', dest='multiprocessing',
help=f'Use multi-processing to run the checks. The argument is the number\n'
f'of worker processes. A sensible number is the cpu count of your\n'
f'system, detected: {os.cpu_count()}.'
f' As an automated shortcut see -j/--auto-jobs.\n'
f'Use 0 to run in single-processing mode (default %(default)s).')
argument_parser.add_argument('-j','--auto-jobs', const=os.cpu_count(),
action='store_const', dest='multiprocessing',
help='Use the auto detected cpu count (= %(const)s)'
' as number of worker processes\n'
'in multi-processing. This is equivalent to : `--jobs %(const)s`')
return argument_parser, values_keys
class ArgumentParserError(Exception): pass
def get_module(name):
if os.path.isfile(name):
# This name could also be the name of a module, but if there's a
# file that we can load the file will win. Otherwise, it's still
# possible to change the directory
imported = get_module_from_file(name)
else:
# Fails with an appropriate ImportError.
imported = import_module(name, package=None)
return imported
def get_profile():
""" Prefetch the profile module, to fill some holes in the help text. """
argument_parser, _ = ArgumentParser(Profile(), profile_arg=True)
# monkey patching will do here
def error(message): raise ArgumentParserError(message)
argument_parser.error = error
try:
args, _ = argument_parser.parse_known_args()
except ArgumentParserError:
# silently fails, the main parser will show usage string.
return Profile()
imported = get_module(args.profile)
profile = get_module_profile(imported)
if not profile:
raise Exception(f"Can't get a profile from {imported}.")
return profile
def main(profile=None, values=None):
# profile can be injected by e.g. check-googlefonts injects it's own profile
add_profile_arg = False
if profile is None:
profile = get_profile()
add_profile_arg = True
argument_parser, values_keys = ArgumentParser(profile, profile_arg=add_profile_arg)
try:
args = argument_parser.parse_args()
except ValueValidationError as e:
print(e)
argument_parser.print_usage()
sys.exit(1)
# The default Windows Terminal just displays the escape codes. The argument
# parser above therefore has these options disabled.
if sys.platform == "win32":
args.no_progress = True
args.no_colors = True
theme = get_theme(args)
# the most verbose loglevel wins
loglevel = min(args.loglevels) if args.loglevels else DEFAULT_LOG_LEVEL
if args.list_checks:
list_checks(profile, theme, verbose=loglevel > DEFAULT_LOG_LEVEL)
values_ = {}
if values is not None:
values_.update(values)
# values_keys are returned by profile.setup_argparse
# these are keys for custom arguments required by the profile.
if values_keys:
for key in values_keys:
if hasattr(args, key):
values_[key] = getattr(args, key)
if args.configfile:
configuration = Configuration.from_config_file(args.configfile)
else:
configuration = Configuration()
# Command line args overrides config, but only if given
configuration.maybe_override(Configuration(
custom_order=args.order,
explicit_checks=args.checkid,
exclude_checks=args.exclude_checkid
))
runner_kwds = dict(values=values_, config=configuration)
try:
runner = CheckRunner(profile, **runner_kwds)
except ValueValidationError as e:
print(e)
argument_parser.print_usage()
sys.exit(1)
is_async = args.multiprocessing != 0
tr = TerminalReporter(runner=runner, is_async=is_async
, print_progress=not args.no_progress
, succinct=args.succinct
, check_threshold=loglevel
, log_threshold=args.loglevel_messages or loglevel
, theme=theme
, collect_results_by=args.gather_by
, skip_status_report=None if args.show_sections \
else (SECTIONSUMMARY, )
)
reporters = [tr]
if "reporters" not in args:
args.reporters = []
for reporter_class, output_file in args.reporters:
reporters.append(reporter_class(loglevels=args.loglevels,
runner=runner,
collect_results_by=args.gather_by,
output_file=output_file
))
if args.multiprocessing == 0:
status_generator = runner.run()
else:
status_generator = multiprocessing_runner(args.multiprocessing, runner, runner_kwds)
distribute_generator(status_generator, [reporter.receive for reporter in reporters])
for reporter in reporters:
reporter.write()
# Fail and error let the command fail
return 1 if tr.worst_check_status in (ERROR, FAIL) else 0
def list_checks(profile, theme, verbose=False):
if verbose:
for section in profile._sections.values():
print(theme["list-checks: section"]("\nSection:") + " " + section.name)
for check in section._checks:
print(theme["list-checks: check-id"](check.id) + "\n" +
theme["list-checks: description"](f'"{check.description}"') + "\n")
else:
for _, section in profile._sections.items():
for check in section._checks:
print(check.id)
sys.exit()
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import logging
import math
import random
import time
import boto3
import click
from concurrent.futures import ProcessPoolExecutor, as_completed
from dateutil.parser import parse as parse_date
from elasticsearch import Elasticsearch, helpers, RequestsHttpConnection
import jsonschema
from influxdb import InfluxDBClient
import yaml
from c7n import schema
from c7n.credentials import assumed_session, SessionFactory
from c7n.registry import PluginRegistry
from c7n.reports import csvout as s3_resource_parser
from c7n.resources import load_resources
from c7n.utils import chunks, dumps, get_retry, local_session
# from c7n.executor import MainThreadExecutor
# ThreadPoolExecutor = MainThreadExecutor
# ProcessPoolExecutor = MainThreadExecutor
# MainThreadExecutor.c7n_async = False
MAX_POINTS = 1440.0
NAMESPACE = 'CloudMaid'
log = logging.getLogger('c7n.metrics')
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': True,
'required': ['indexer', 'accounts'],
'properties': {
'indexer': {
'oneOf': [
{
'type': 'object',
'required': ['host', 'port', 'idx_name'],
'properties': {
'type': {'enum': ['es']},
'host': {'type': 'string'},
'port': {'type': 'number'},
'user': {'type': 'string'},
'password': {'type': 'string'},
'idx_name': {'type': 'string'},
'query': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['host', 'db', 'user', 'password'],
'properties': {
'type': {'enum': ['influx']},
'host': {'type': 'string'},
'db': {'type': 'string'},
'user': {'type': 'string'},
'password': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['template', 'Bucket'],
'properties': {
'type': {'enum': ['s3']},
'template': {'type': 'string'},
'Bucket': {'type': 'string'}
}
}
]
},
'accounts': {
'type': 'array',
'items': {
'type': 'object',
'anyOf': [
{"required": ['profile']},
{"required": ['role']}
],
'required': ['name', 'bucket', 'regions', 'title', 'id'],
'properties': {
'name': {'type': 'string'},
'title': {'type': 'string'},
'tags': {'type': 'object'},
'bucket': {'type': 'string'},
'regions': {'type': 'array', 'items': {'type': 'string'}}
}
}
}
}
}
retry = get_retry(('Throttling',), log_retries=True)
indexers = PluginRegistry('policy-metrics-indexers')
class Indexer(object):
""" Metrics indexer
"""
def get_indexer(config, **kwargs):
itype = config['indexer']['type']
klass = indexers.get(itype)
return klass(config, **kwargs)
@indexers.register('es')
class ElasticSearchIndexer(Indexer):
def __init__(self, config, **kwargs):
self.config = config
self.es_type = kwargs.get('type', 'policy-metric')
host = [config['indexer'].get('host', 'localhost')]
kwargs = {}
kwargs['connection_class'] = RequestsHttpConnection
user = config['indexer'].get('user', False)
password = config['indexer'].get('password', False)
if user and password:
kwargs['http_auth'] = (user, password)
kwargs['port'] = config['indexer'].get('port', 9200)
self.client = Elasticsearch(
host,
**kwargs
)
def index(self, points):
for p in points:
p['_index'] = self.config['indexer']['idx_name']
p['_type'] = self.es_type
results = helpers.streaming_bulk(self.client, points)
for status, r in results:
if not status:
log.debug("index err result %s", r)
@indexers.register('s3')
class S3Archiver(Indexer):
def __init__(self, config, **kwargs):
self.config = config
self.client = boto3.client('s3')
def index(self, points):
# account, region in templ
key = self.config['indexer']['template'].format(points[0])
# day aggregation
self.client.put_object(
Bucket=self.config['indexer']['Bucket'],
Key=key,
Body=dumps(points))
@indexers.register('influx')
class InfluxIndexer(Indexer):
def __init__(self, config, **kwargs):
self.config = config
self.client = InfluxDBClient(
username=config['indexer']['user'],
password=config['indexer']['password'],
database=config['indexer']['db'],
host=config['indexer'].get('host'))
def index(self, points):
measurements = []
for p in points:
measurements.append({
'measurement': 'policy-metrics',
'time': p['Timestamp'],
'fields': {
'rcount': p['Sum'],
'runit': p['Unit']},
'tags': {
'region': p['Region'],
'account': p['Account'],
'policy': p['Policy'],
'env': p['Env'],
'division': p['Division'],
'resource': p.get('ResType', ''),
'metric': p['MetricName'],
'namespace': p['Namespace']}})
self.client.write_points(measurements)
def index_metric_set(indexer, account, region, metric_set, start, end, period):
session = local_session(
lambda : assumed_session(account['role'], 'PolicyIndex')) # NOQA E203
client = session.client('cloudwatch', region_name=region)
t = time.time()
account_info = dict(account['tags'])
account_info['Account'] = account['name']
account_info['AccountId'] = account['id']
account_info['Region'] = region
point_count = 0
for m in metric_set:
params = dict(
Namespace=m['Namespace'],
MetricName=m['MetricName'],
Statistics=['Sum'],
Dimensions=m['Dimensions'],
StartTime=start,
EndTime=end,
Period=period)
try:
points = retry(client.get_metric_statistics, **params)['Datapoints']
except Exception as e:
log.error(
"error account:%s region:%s start:%s end:%s error:%s",
account['name'], region, start, end, e)
if not points:
continue
dims = {d['Name']: d['Value'] for d in m.pop('Dimensions', ())}
for p in points:
if m['Namespace'] == 'AWS/Lambda':
dims['Policy'] = dims['FunctionName'].split('-', 1)[1]
p.update(dims)
p.update(m)
p.update(account_info)
point_count += len(points)
log.debug("account:%s region:%s metric:%s points:%d policy:%s",
account['name'], region, m['MetricName'], len(points),
dims.get('Policy', 'unknown'))
indexer.index(points)
return time.time() - t, point_count
def index_account_metrics(config, idx_name, region, account, start, end, period):
session = assumed_session(account['role'], 'PolicyIndex')
indexer = get_indexer(config)
client = session.client('cloudwatch', region_name=region)
policies = set()
account_metrics = []
pager = client.get_paginator('list_metrics')
for p in pager.paginate(Namespace=NAMESPACE):
metrics = p.get('Metrics')
for metric in metrics:
if 'Dimensions' not in metric:
log.warning("account:%s region:%s metric with no dims: %s",
account['name'], region, metric)
continue
dims = {d['Name']: d['Value'] for d in metric.get(
'Dimensions', ())}
if dims['Policy'] not in policies:
log.debug("Processing account:%s region:%s policy: %s",
account['name'], region, dims['Policy'])
policies.add(dims['Policy'])
account_metrics.append(metric)
for p in pager.paginate(Namespace='AWS/Lambda'):
metrics = p.get('Metrics')
for metric in metrics:
dims = {d['Name']: d['Value'] for d
in metric.get('Dimensions', ())}
if not dims.get('FunctionName', '').startswith('custodian-'):
continue
account_metrics.append(metric)
log.debug("account:%s region:%s processing metrics:%d start:%s end:%s",
account['name'], region, len(account_metrics),
start.strftime("%Y/%m/%d"), end.strftime("%Y/%m/%d"))
region_time = region_points = 0
# originally was parallel thread, but rate limits around get
# metric stat polling means single threaded is faster.
for metric_set in chunks(account_metrics, 20):
mt, mp = index_metric_set(
indexer, account, region, metric_set, start, end, period)
region_time += mt
region_points += mp
log.info(("indexed account:%s region:%s metrics:%d"
" points:%d start:%s end:%s time:%0.2f"),
account['name'], region, len(account_metrics), region_points,
start.strftime("%Y/%m/%d"), end.strftime("%Y/%m/%d"), region_time)
return region_time, region_points
def index_account_resources(config, account, region, policy, date):
indexer = get_indexer(config, type=policy['resource'])
bucket = account['bucket']
key_prefix = "accounts/{}/{}/policies/{}".format(
account['name'], region, policy['name'])
# Look for AWS profile in config before Instance role
records = s3_resource_parser.record_set(
lambda: SessionFactory(
region, profile=account.get('profile'),
assume_role=account.get('role'))(),
bucket,
key_prefix,
date,
specify_hour=True)
for r in records:
# Adding Custodian vars to each record
r['c7n:MatchedPolicy'] = policy['name']
r['c7n:AccountNumber'] = account['id']
# Reformat tags for ease of index/search
# Tags are stored in the following format:
# Tags: [ {'key': 'mykey', 'val': 'myval'}, {'key': 'mykey2', 'val': 'myval2'} ]
# and this makes searching for tags difficult. We will convert them to:
# Tags: ['mykey': 'myval', 'mykey2': 'myval2']
r['Tags'] = {t['Key']: t['Value'] for t in r.get('Tags', [])}
indexer.index(records)
def get_periods(start, end, period):
days_delta = (start - end)
period_max = (period * MAX_POINTS)
num_periods = math.ceil(abs(days_delta.total_seconds()) / period_max)
if num_periods <= 1:
yield (start, end)
return
delta_unit = (abs(days_delta.total_seconds()) / num_periods / 86400)
n_start = start
for idx in range(1, int(num_periods) + 1):
period = (n_start,
min((end, n_start + datetime.timedelta(delta_unit))))
yield period
n_start = period[1]
def get_date_range(start, end):
if start and not isinstance(start, datetime.datetime):
start = parse_date(start)
if end and not isinstance(end, datetime.datetime):
end = parse_date(end)
now = datetime.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0)
if end and not start:
raise ValueError("Missing start date")
elif start and not end:
end = now
if not end and not start:
raise ValueError("Missing start and end")
return start, end
def valid_date(date, delta=0):
# optional input, use default time delta if not provided
# delta is 1 hour for resources
if not date:
date = datetime.datetime.utcnow() - datetime.timedelta(hours=delta)
elif date and not isinstance(date, datetime.datetime):
date = parse_date(date)
return date
@click.group()
def cli():
"""Custodian Indexing"""
@cli.command(name='index-metrics')
@click.option('-c', '--config', required=True, help="Config file")
@click.option('--start', required=True, help="Start date")
@click.option('--end', required=False, help="End Date")
@click.option('--incremental/--no-incremental', default=False,
help="Sync from last indexed timestamp")
@click.option('--concurrency', default=5)
@click.option('-a', '--accounts', multiple=True)
@click.option('-p', '--period', default=3600)
@click.option('-t', '--tag')
@click.option('--index', default='policy-metrics')
@click.option('--verbose/--no-verbose', default=False)
def index_metrics(
config, start, end, incremental=False, concurrency=5, accounts=None,
period=3600, tag=None, index='policy-metrics', verbose=False):
"""index policy metrics"""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('c7n.worker').setLevel(logging.INFO)
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
start, end = get_date_range(start, end)
p_accounts = set()
p_account_stats = {}
i_time = i_points = 0
t = time.time()
with ProcessPoolExecutor(max_workers=concurrency) as w:
futures = {}
jobs = []
# Filter
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
if tag:
found = False
for t in account['tags'].values():
if tag == t:
found = True
break
if not found:
continue
p_accounts.add((account['name']))
for region in account.get('regions'):
for (p_start, p_end) in get_periods(start, end, period):
p = (config, index, region, account, p_start, p_end, period)
jobs.append(p)
# by default we'll be effectively processing in order, but thats bumps
# our concurrency into rate limits on metrics retrieval in a given account
# region, go ahead and shuffle, at least with lucene, the non ordering
# should have minimal impact on query perf (inverted index).
random.shuffle(jobs)
for j in jobs:
log.debug("submit account:%s region:%s start:%s end:%s" % (
j[3]['name'], j[2], j[4], j[5]))
futures[w.submit(index_account_metrics, *j)] = j
# Process completed
for f in as_completed(futures):
config, index, region, account, p_start, p_end, period = futures[f]
if f.exception():
log.warning("error account:%s region:%s error:%s",
account['name'], region, f.exception())
continue
rtime, rpoints = f.result()
rstat = p_account_stats.setdefault(
account['name'], {}).setdefault(region, {'points': 0})
rstat['points'] += rpoints
# log.info("complete account:%s, region:%s points:%s time:%0.2f",
# account['name'], region, rpoints, rtime)
i_time += rtime
i_points += rpoints
log.info("complete accounts:%d points:%d itime:%0.2f time:%0.2f",
len(p_accounts), i_points, i_time, time.time() - t)
@cli.command(name='index-resources')
@click.option('-c', '--config', required=True, help="Config file")
@click.option('-p', '--policies', required=True, help="Policy file")
@click.option('--date', required=False, help="Start date")
@click.option('--concurrency', default=5)
@click.option('-a', '--accounts', multiple=True)
@click.option('-t', '--tag')
@click.option('--verbose/--no-verbose', default=False)
def index_resources(
config, policies, date=None, concurrency=5,
accounts=None, tag=None, verbose=False):
"""index policy resources"""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('c7n.worker').setLevel(logging.INFO)
# validating the config and policy files.
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
with open(policies) as fh:
policies = yaml.safe_load(fh.read())
load_resources()
schema.validate(policies)
date = valid_date(date, delta=1)
with ProcessPoolExecutor(max_workers=concurrency) as w:
futures = {}
jobs = []
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
if tag:
found = False
for t in account['tags'].values():
if tag == t:
found = True
break
if not found:
continue
for region in account.get('regions'):
for policy in policies.get('policies'):
p = (config, account, region, policy, date)
jobs.append(p)
for j in jobs:
log.debug("submit account:{} region:{} policy:{} date:{}".format(
j[1]['name'], j[2], j[3]['name'], j[4]))
futures[w.submit(index_account_resources, *j)] = j
# Process completed
for f in as_completed(futures):
config, account, region, policy, date = futures[f]
if f.exception():
log.warning(
"error account:{} region:{} policy:{} error:{}".format(
account['name'], region, policy['name'], f.exception()))
continue
log.info("complete account:{} region:{} policy:{}".format(
account['name'], region, policy['name']))
if __name__ == '__main__':
try:
cli()
except Exception:
import traceback, pdb, sys
print(traceback.print_exc())
pdb.post_mortem(sys.exc_info()[-1])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if not returns:
# TODO(b/110167197): Replace with a plain return.
template = """
return 1
"""
return_stmt = templates.replace(template)
elif len(returns) == 1:
template = """
return retval
"""
return_stmt = templates.replace(template, retval=returns[0])
else:
template = """
return (retvals,)
"""
return_stmt = templates.replace(template, retvals=returns)
if aliased_orig_names:
template = """
def body_name():
aliased_new_names, = aliased_orig_names,
body
return_stmt
"""
return templates.replace(
template,
body_name=body_name,
body=body,
aliased_orig_names=aliased_orig_names,
aliased_new_names=aliased_new_names,
return_stmt=return_stmt)
else:
template = """
def body_name():
body
return_stmt
"""
return templates.replace(
template, body_name=body_name, body=body, return_stmt=return_stmt)
def _create_cond_expr(self, results, test, body_name, orelse_name,
state_getter_name,
state_setter_name):
if results is not None:
template = """
results = ag__.if_stmt(test, body_name, orelse_name,
state_getter_name, state_setter_name)
"""
return templates.replace(
template,
test=test,
results=results,
body_name=body_name,
orelse_name=orelse_name,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name)
else:
template = """
ag__.if_stmt(test, body_name, orelse_name, getter_name, setter_name)
"""
return templates.replace(
template,
test=test,
body_name=body_name,
orelse_name=orelse_name,
getter_name=state_getter_name,
setter_name=state_setter_name)
def _fmt_symbols(self, symbol_set):
if not symbol_set:
return 'no variables'
return ', '.join(map(str, symbol_set))
def _determine_aliased_symbols(self, scope, node_defined_in, block):
if block:
block_live_in = set(anno.getanno(block[0], anno.Static.LIVE_VARS_IN))
else:
block_live_in = set()
modified_live = scope.modified & node_defined_in & block_live_in
# Composite symbols are handled elsewhere see _create_state_functions
return {s for s in modified_live if not s.is_composite()}
def _create_state_functions(self, composites,
state_getter_name, state_setter_name):
if composites:
composite_tuple = tuple(composites)
template = """
def state_getter_name():
return composite_tuple,
def state_setter_name(vals):
composite_tuple, = vals
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_tuple=composite_tuple)
else:
template = """
def state_getter_name():
return ()
def state_setter_name(_):
pass
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name)
return node
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Str(s.ssf()))
return assignments
def visit_If(self, node):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
# Note: this information needs to be extracted before the body conversion
# that happens in the call to generic_visit below, because the conversion
# generates nodes that lack static analysis annotations.
need_alias_in_body = self._determine_aliased_symbols(
body_scope, defined_in, node.body)
need_alias_in_orelse = self._determine_aliased_symbols(
orelse_scope, defined_in, node.orelse)
node = self.generic_visit(node)
modified_in_cond = body_scope.modified | orelse_scope.modified
returned_from_cond = set()
composites = set()
for s in modified_in_cond:
if s in live_out and not s.is_composite():
returned_from_cond.add(s)
if s.is_composite():
# Special treatment for compound objects, always return them.
# This allows special handling within the if_stmt itself.
# For example, in TensorFlow we need to restore the state of composite
# symbols to ensure that only effects from the executed branch are seen.
composites.add(s)
created_in_body = body_scope.modified & returned_from_cond - defined_in
created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
basic_created_in_body = tuple(
s for s in created_in_body if not s.is_composite())
basic_created_in_orelse = tuple(
s for s in created_in_orelse if not s.is_composite())
# These variables are defined only in a single branch. This is fine in
# Python so we pass them through. Another backend, e.g. Tensorflow, may need
# to handle these cases specially or throw an Error.
possibly_undefined = (set(basic_created_in_body) ^
set(basic_created_in_orelse))
# Alias the closure variables inside the conditional functions, to allow
# the functions access to the respective variables.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
aliased_body_orig_names = tuple(need_alias_in_body)
aliased_orelse_orig_names = tuple(need_alias_in_orelse)
aliased_body_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
aliased_orelse_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), orelse_scope.referenced)
for s in aliased_orelse_orig_names)
alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))
alias_orelse_map = dict(
zip(aliased_orelse_orig_names, aliased_orelse_new_names))
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
cond_var_name = self.ctx.namer.new_symbol('cond', body_scope.referenced)
body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
all_referenced = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', all_referenced)
state_setter_name = self.ctx.namer.new_symbol('set_state', all_referenced)
returned_from_cond = tuple(returned_from_cond)
if returned_from_cond:
if len(returned_from_cond) == 1:
cond_results = returned_from_cond[0]
else:
cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
returned_from_body = tuple(
alias_body_map[s] if s in need_alias_in_body else s
for s in returned_from_cond)
returned_from_orelse = tuple(
alias_orelse_map[s] if s in need_alias_in_orelse else s
for s in returned_from_cond)
else:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
cond_results = None
# TODO(mdan): Replace with None once side_effect_guards is retired.
returned_from_body = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
returned_from_orelse = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
cond_assign = self.create_assignment(cond_var_name, node.test)
body_def = self._create_cond_branch(
body_name,
aliased_orig_names=aliased_body_orig_names,
aliased_new_names=aliased_body_new_names,
body=node_body,
returns=returned_from_body)
orelse_def = self._create_cond_branch(
orelse_name,
aliased_orig_names=aliased_orelse_orig_names,
aliased_new_names=aliased_orelse_new_names,
body=node_orelse,
returns=returned_from_orelse)
undefined_assigns = self._create_undefined_assigns(possibly_undefined)
composite_defs = self._create_state_functions(
composites, state_getter_name, state_setter_name)
cond_expr = self._create_cond_expr(cond_results, cond_var_name, body_name,
orelse_name, state_getter_name,
state_setter_name)
if_ast = (
undefined_assigns + composite_defs + body_def + orelse_def +
cond_assign + cond_expr)
return if_ast
def _get_loop_state(self, node, modified_symbols):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
reserved_symbols = body_scope.referenced
loop_state = []
for s in modified_symbols:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF loop.
continue
# Variables not live into or out of the loop are considered local to the
# loop.
if s not in live_in and s not in live_out:
continue
# Mutations made to objects created inside the loop will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the loop.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the loop, but x is not.
if s.is_composite() and not all(p in live_in for p in s.support_set):
continue
loop_state.append(s)
loop_state = frozenset(loop_state)
# Variable that are used or defined inside the loop, but not defined
# before entering the loop
undefined_lives = loop_state - defined_in
# Only simple variables must be defined. The composite ones will be
# implicitly checked at runtime.
possibly_undefs = {v for v in undefined_lives if v.is_simple()}
return loop_state, reserved_symbols, possibly_undefs
def _state_constructs(self, loop_state, reserved_symbols):
loop_state = tuple(loop_state)
state_ssf = [
self.ctx.namer.new_symbol(s.ssf(), reserved_symbols) for s in loop_state
]
ssf_map = {
name: ssf
for name, ssf in zip(loop_state, state_ssf)
if str(name) != ssf
}
state_ast_tuple = gast.Tuple([n.ast() for n in loop_state], None)
if len(loop_state) == 1:
loop_state = loop_state[0]
state_ssf = state_ssf[0]
return loop_state, state_ssf, state_ast_tuple, ssf_map
def visit_While(self, node):
self.generic_visit(node)
loop_state, reserved_symbols, possibly_undefs = self._get_loop_state(
node, anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified)
loop_state, state_ssf, state_ast_tuple, ssf_map = self._state_constructs(
loop_state, reserved_symbols)
node_body = ast_util.rename_symbols(node.body, ssf_map)
test = ast_util.rename_symbols(node.test, ssf_map)
if loop_state:
template = """
def test_name(state_ssf):
return test
def body_name(state_ssf):
body
return state_ssf,
state_ast_tuple = ag__.while_stmt(test_name, body_name, (state,))
"""
node = templates.replace(
template,
state=loop_state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node_body)
else:
template = """
def test_name():
return test
def body_name():
body
return ()
ag__.while_stmt(test_name, body_name, ())
"""
node = templates.replace(
template,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node_body)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
return undefined_assigns + node
def _for_loop_with_extra_test(self, loop_state, state_ssf, state_ast_tuple,
original_node, extra_test_name, extra_test,
body_name, loop_body, ssf_map):
target_nodes = ast_util.rename_symbols(original_node.target, ssf_map)
template = """
def extra_test_name(state_ssf):
return extra_test_expr
def body_name(loop_vars, state_ssf):
# Workaround for PEP-3113
target = loop_vars
body
return state_ssf,
state_ast_tuple = ag__.for_stmt(
iter_, extra_test_name, body_name, (state,))
"""
return templates.replace(
template,
state=loop_state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
iter_=original_node.iter,
target=target_nodes,
extra_test_name=extra_test_name,
extra_test_expr=extra_test,
body_name=body_name,
body=loop_body)
def _for_loop_with_state(self, loop_state, state_ssf, state_ast_tuple,
original_node, body_name, loop_body, ssf_map):
target_nodes = ast_util.rename_symbols(original_node.target, ssf_map)
template = """
def body_name(loop_vars, state_ssf):
# Workaround for PEP-3113
target = loop_vars
body
return state_ssf,
state_ast_tuple = ag__.for_stmt(
iter_, None, body_name, (state,))
"""
return templates.replace(
template,
state=loop_state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
iter_=original_node.iter,
target=target_nodes,
body_name=body_name,
body=loop_body)
def _for_loop_without_state(self, original_node, body_name, loop_body):
template = """
def body_name(loop_vars):
# Workaround for PEP-3113
iterate = loop_vars
body
return ()
ag__.for_stmt(iter_, None, body_name, ())
"""
return templates.replace(
template,
iter_=original_node.iter,
iterate=original_node.target,
body_name=body_name,
body=loop_body)
def visit_For(self, node):
self.generic_visit(node)
loop_state, reserved_symbols, possibly_undefs = self._get_loop_state(
node,
(anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified |
anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE).modified))
loop_state, state_ssf, state_ast_tuple, ssf_map = self._state_constructs(
loop_state, reserved_symbols)
node_body = ast_util.rename_symbols(node.body, ssf_map)
body_name = self.ctx.namer.new_symbol('loop_body', reserved_symbols)
has_extra_test = anno.hasanno(node, 'extra_test')
if loop_state:
if has_extra_test:
# Loop with early stopping (e.g. break or return)
extra_test = anno.getanno(node, 'extra_test')
extra_test = ast_util.rename_symbols(extra_test, ssf_map)
extra_test_name = self.ctx.namer.new_symbol('extra_test',
reserved_symbols)
loop_nodes = self._for_loop_with_extra_test(
loop_state, state_ssf, state_ast_tuple, node, extra_test_name,
extra_test, body_name, node_body, ssf_map)
else:
# Loop with loop-carried state and no early stopping
loop_nodes = self._for_loop_with_state(
loop_state, state_ssf, state_ast_tuple, node, body_name, node_body,
ssf_map)
else:
# Loop with no loop-carried state and no early stopping
assert not has_extra_test, ('Early stopping (e.g. break and/or return) '
'should create state variables.')
loop_nodes = self._for_loop_without_state(node, body_name, node_body)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
return undefined_assigns + loop_nodes
def transform(node, ctx):
node = ControlFlowTransformer(ctx).visit(node)
return node
|
|
"""
missing types & inference
"""
from decimal import Decimal
from functools import partial
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import (
NaT,
Period,
iNaT,
)
from pandas._typing import (
ArrayLike,
DtypeObj,
)
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
nan_checker = np.isnan
INF_AS_NA = False
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna(obj, inf_as_na: bool = False):
"""
Detect missing values, treating None, NaN or NA as null. Infinite
values will also be treated as null if inf_as_na is True.
Parameters
----------
obj: ndarray or object value
Input array or scalar value.
inf_as_na: bool
Whether to treat infinity as null.
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
if inf_as_na:
return libmissing.checknull_old(obj)
else:
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(obj, (np.ndarray, ABCExtensionArray)):
return _isna_array(obj, inf_as_na=inf_as_na)
elif isinstance(obj, (ABCSeries, ABCIndex)):
result = _isna_array(obj._values, inf_as_na=inf_as_na)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(
result, index=obj.index, name=obj.name, copy=False
)
return result
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
elif hasattr(obj, "__array__"):
return _isna_array(np.asarray(obj), inf_as_na=inf_as_na)
else:
return False
def _use_inf_as_na(key):
"""
Option change callback for na/inf behaviour.
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* https://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
inf_as_na = get_option(key)
globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
if inf_as_na:
globals()["nan_checker"] = lambda x: ~np.isfinite(x)
globals()["INF_AS_NA"] = True
else:
globals()["nan_checker"] = np.isnan
globals()["INF_AS_NA"] = False
def _isna_array(values: ArrayLike, inf_as_na: bool = False):
"""
Return an array indicating which values of the input array are NaN / NA.
Parameters
----------
obj: ndarray or ExtensionArray
The input array whose elements are to be checked.
inf_as_na: bool
Whether or not to treat infinite values as NA.
Returns
-------
array-like
Array of boolean values denoting the NA status of each element.
"""
dtype = values.dtype
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
if inf_as_na and is_categorical_dtype(dtype):
result = libmissing.isnaobj_old(values.to_numpy())
else:
result = values.isna()
elif is_string_dtype(dtype):
result = _isna_string_dtype(values, inf_as_na=inf_as_na)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
if inf_as_na:
result = ~np.isfinite(values)
else:
result = np.isnan(values)
return result
def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> np.ndarray:
# Working around NumPy ticket 1542
dtype = values.dtype
shape = values.shape
if dtype.kind in ("S", "U"):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
if inf_as_na:
vec = libmissing.isnaobj_old(values.ravel())
else:
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(pd.NA)
False
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def isna_compat(arr, fill_value=np.nan) -> bool:
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
if isna(fill_value):
dtype = arr.dtype
return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))
return True
def array_equivalent(
left, right, strict_nan: bool = False, dtype_equal: bool = False
) -> bool:
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
dtype_equal : bool, default False
Whether `left` and `right` are known to have the same dtype
according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
require that the dtypes match. Setting this to ``True`` can improve
performance, but will give different results for arrays that are
equal but different dtypes.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
if dtype_equal:
# fastpath when we require that the dtypes match (Block.equals)
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
return _array_equivalent_float(left, right)
elif is_datetimelike_v_numeric(left.dtype, right.dtype):
return False
elif needs_i8_conversion(left.dtype):
return _array_equivalent_datetimelike(left, right)
elif is_string_dtype(left.dtype):
# TODO: fastpath for pandas' StringDtype
return _array_equivalent_object(left, right, strict_nan)
else:
return np.array_equal(left, right)
# Slow path when we allow comparing different dtypes.
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left.dtype) or is_string_dtype(right.dtype):
return _array_equivalent_object(left, right, strict_nan)
# NaNs can occur in float and complex arrays.
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
if not (left.size and right.size):
return True
return ((left == right) | (isna(left) & isna(right))).all()
elif is_datetimelike_v_numeric(left, right):
# GH#29553 avoid numpy deprecation warning
return False
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# datetime64, timedelta64, Period
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view("i8")
right = right.view("i8")
# if we have structured dtypes, compare first
if (
left.dtype.type is np.void or right.dtype.type is np.void
) and left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _array_equivalent_float(left, right):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
def _array_equivalent_datetimelike(left, right):
return np.array_equal(left.view("i8"), right.view("i8"))
def _array_equivalent_object(left, right, strict_nan):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel())
)
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif left_value is libmissing.NA and right_value is not libmissing.NA:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
try:
if np.any(np.asarray(left_value != right_value)):
return False
except TypeError as err:
if "Cannot compare tz-naive" in str(err):
# tzawareness compat failure, see GH#28507
return False
elif "boolean value of NA is ambiguous" in str(err):
return False
raise
return True
def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
"""
ExtensionArray-compatible implementation of array_equivalent.
"""
if not is_dtype_equal(left.dtype, right.dtype):
return False
elif isinstance(left, ABCExtensionArray):
return left.equals(right)
else:
return array_equivalent(left, right, dtype_equal=True)
def infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if needs_i8_conversion(val.dtype):
return np.array("NaT", dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
return np.array("NaT", dtype=DT64NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
return np.array("NaT", dtype=TD64NS_DTYPE)
return np.nan
def maybe_fill(arr: np.ndarray) -> np.ndarray:
"""
Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
"""
if arr.dtype.kind not in ("u", "i", "b"):
arr.fill(np.nan)
return arr
def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
numpy.datetime64('NaT')
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif needs_i8_conversion(dtype):
return dtype.type("NaT", "ns")
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
if compat:
return False
return np.nan
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(np.asarray(arr))]
def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
"""
isna check that excludes incompatible dtypes
Parameters
----------
obj : object
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
Returns
-------
bool
"""
if not lib.is_scalar(obj) or not isna(obj):
return False
elif dtype.kind == "M":
if isinstance(dtype, np.dtype):
# i.e. not tzaware
return not isinstance(obj, (np.timedelta64, Decimal))
# we have to rule out tznaive dt64("NaT")
return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal))
elif dtype.kind == "m":
return not isinstance(obj, (np.datetime64, Decimal))
elif dtype.kind in ["i", "u", "f", "c"]:
# Numeric
return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
elif dtype == np.dtype("object"):
# This is needed for Categorical, but is kind of weird
return True
# must be PeriodDType
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
def isna_all(arr: ArrayLike) -> bool:
"""
Optimized equivalent to isna(arr).all()
"""
total_len = len(arr)
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases.
# parameters 1000 and 40 were chosen arbitrarily
chunk_len = max(total_len // 40, 1000)
dtype = arr.dtype
if dtype.kind == "f":
checker = nan_checker
elif dtype.kind in ["m", "M"] or dtype.type is Period:
# error: Incompatible types in assignment (expression has type
# "Callable[[Any], Any]", variable has type "ufunc")
checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
else:
# error: Incompatible types in assignment (expression has type "Callable[[Any],
# Any]", variable has type "ufunc")
checker = lambda x: _isna_array( # type: ignore[assignment]
x, inf_as_na=INF_AS_NA
)
return all(
# error: Argument 1 to "__call__" of "ufunc" has incompatible type
# "Union[ExtensionArray, Any]"; expected "Union[Union[int, float, complex, str,
# bytes, generic], Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]"
checker(arr[i : i + chunk_len]).all() # type: ignore[arg-type]
for i in range(0, total_len, chunk_len)
)
|
|
"""Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# see profile.doc and profile.py for more info.
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
import re
from functools import cmp_to_key
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if ("jprofile", 0, "profiler") in callers:
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and type(field[0]) == type(1):
# Be compatible with old profiler
field = [ {-1: "stdname",
0:"calls",
1:"time",
2: "cumulative" } [ field[0] ] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare))
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if type(sel) == type(""):
new_list = []
for func in list:
if re.search(sel, func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif type(sel) == type(1) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg = msg + " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
list, msg = self.eval_print_amount(selection, list, msg)
count = len(list)
if not list:
return 0, list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f CPU seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(tt)/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(ct)/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
new_callers[func] = tuple([i[0] + i[1] for i in
zip(caller, new_callers[func])])
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
if profile is not None:
self.stats = Stats(profile)
self.stream = self.stats.stream
else:
self.stats = None
self.stream = sys.stdout
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs()
if line and all((x in abbrevs) for x in line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
|
|
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, with_statement
import htmlentitydefs
import re
import urllib
from cyclone.util import basestring_type
from cyclone.util import bytes_type
from cyclone.util import unicode_type
try:
from urlparse import parse_qs # Python 2.6+
except ImportError:
from cgi import parse_qs
# json module is in the standard library as of python 2.6; fall back to
# simplejson if present for older versions.
try:
import json
assert hasattr(json, "loads") and hasattr(json, "dumps")
_json_decode = json.loads
_json_encode = json.dumps
except Exception:
try:
import simplejson
_json_decode = lambda s: simplejson.loads(_unicode(s))
_json_encode = lambda v: simplejson.dumps(v)
except ImportError:
try:
# For Google AppEngine
from django.utils import simplejson
_json_decode = lambda s: simplejson.loads(_unicode(s))
_json_encode = lambda v: simplejson.dumps(v)
except ImportError:
def _json_decode(s):
raise NotImplementedError(
"A JSON parser is required, e.g., simplejson at "
"http://pypi.python.org/pypi/simplejson/")
_json_encode = _json_decode
_XHTML_ESCAPE_RE = re.compile('[&<>"]')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"'}
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return _XHTML_ESCAPE_RE.sub(lambda match:
_XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/\
# json-why-are-forward-slashes-escaped
return _json_encode(recursive_unicode(value)).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return _json_decode(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value):
"""Returns a valid URL-encoded version of the given value."""
return urllib.quote_plus(utf8(value))
def url_unescape(value, encoding='utf-8'):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
"""
if encoding is None:
return urllib.unquote_plus(utf8(value))
else:
return unicode(urllib.unquote_plus(utf8(value)), encoding)
parse_qs_bytes = parse_qs
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
assert isinstance(value, unicode_type)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
assert isinstance(value, bytes_type)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
assert isinstance(value, bytes_type)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v))
for (k, v) in obj.iteritems())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes_type):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])"""
r"""(?:(?:(?:[^\s&()]|&|")*"""
r"""(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))"""
r"""|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://cyclone.io!")`` would return
``Hello <a href="http://cyclone.io">http://cyclone.io</a>!``
Parameters:
shorten: Long urls will be shortened for display.
extra_params: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
require_protocol: Only linkify urls which include a protocol. If this is
False, urls such as www.facebook.com will also be linkified.
permitted_protocols: List (or set) of protocols which should be linkified,
e.g. linkify(text, permitted_protocols=["http", "ftp", "mailto"]).
It is very unsafe to include protocols such as "javascript".
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return ('<a href="%s"%s>%s</a>'.decode("unicode_escape") %
(href, params, url))
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
|
|
"""
Copyright 2015 Zalando SE
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
"""
from copy import deepcopy
import functools
import logging
import os
import jsonschema
from jsonschema import ValidationError
from .decorators import validation
from .decorators.metrics import UWSGIMetricsCollector
from .decorators.parameter import parameter_to_arg
from .decorators.produces import BaseSerializer, Produces, Jsonifier
from .decorators.response import ResponseValidator
from .decorators.security import security_passthrough, verify_oauth
from .decorators.validation import RequestBodyValidator, ParameterValidator, TypeValidationError
from .exceptions import InvalidSpecification
from .utils import flaskify_endpoint, produces_json
logger = logging.getLogger('connexion.operation')
class Operation:
"""
A single API operation on a path.
"""
def __init__(self, method, path, operation, app_produces, app_security, security_definitions, definitions,
parameter_definitions, resolver, validate_responses=False):
"""
This class uses the OperationID identify the module and function that will handle the operation
From Swagger Specification:
**OperationID**
A friendly name for the operation. The id MUST be unique among all operations described in the API.
Tools and libraries MAY use the operation id to uniquely identify an operation.
:param method: HTTP method
:type method: str
:param path:
:type path: str
:param operation: swagger operation object
:type operation: dict
:param app_produces: list of content types the application can return by default
:type app_produces: list
:param app_security: list of security rules the application uses by default
:type app_security: list
:param security_definitions: `Security Definitions Object
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#security-definitions-object>`_
:type security_definitions: dict
:param definitions: `Definitions Object
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#definitionsObject>`_
:type definitions: dict
:param resolver: Callable that maps operationID to a function
:param validate_responses: True enables validation. Validation errors generate HTTP 500 responses.
:type validate_responses: bool
"""
self.method = method
self.path = path
self.security_definitions = security_definitions
self.definitions = definitions
self.parameter_definitions = parameter_definitions
self.definitions_map = {
'definitions': self.definitions,
'parameters': self.parameter_definitions
}
self.validate_responses = validate_responses
self.operation = operation
# todo support definition references
# todo support references to application level parameters
self.parameters = list(self.resolve_parameters(operation.get('parameters', [])))
self.security = operation.get('security', app_security)
self.produces = operation.get('produces', app_produces)
resolution = resolver.resolve(self)
self.operation_id = resolution.operation_id
self.endpoint_name = flaskify_endpoint(self.operation_id)
self.__undecorated_function = resolution.function
for param in self.parameters:
if param['in'] == 'body' and 'default' in param:
self.default_body = param
break
else:
self.default_body = None
self.validate_defaults()
def validate_defaults(self):
for param in self.parameters:
try:
if param['in'] == 'body' and 'default' in param:
param = param.copy()
if 'required' in param:
del param['required']
if param['type'] == 'object':
jsonschema.validate(param['default'], self.body_schema,
format_checker=jsonschema.draft4_format_checker)
else:
jsonschema.validate(param['default'], param, format_checker=jsonschema.draft4_format_checker)
elif param['in'] == 'query' and 'default' in param:
validation.validate_type(param, param['default'], 'query', param['name'])
except (TypeValidationError, ValidationError):
raise InvalidSpecification('The parameter \'{param_name}\' has a default value which is not of'
' type \'{param_type}\''.format(param_name=param['name'],
param_type=param['type']))
def resolve_reference(self, schema):
schema = deepcopy(schema) # avoid changing the original schema
# find the object we need to resolve/update
for obj in schema, schema.get('items'):
reference = obj and obj.get('$ref') # type: str
if reference:
break
if reference:
if not reference.startswith('#/'):
raise InvalidSpecification(
"{method} {path} '$ref' needs to start with '#/'".format(**vars(self)))
path = reference.split('/')
definition_type = path[1]
try:
definitions = self.definitions_map[definition_type]
except KeyError:
raise InvalidSpecification(
"{method} {path} '$ref' needs to point to definitions or parameters".format(**vars(self)))
definition_name = path[-1]
try:
# Get sub definition
definition = deepcopy(definitions[definition_name])
except KeyError:
raise InvalidSpecification("{method} {path} Definition '{definition_name}' not found".format(
definition_name=definition_name, method=self.method, path=self.path))
# resolve object properties too
for prop, prop_spec in definition.get('properties', {}).items():
resolved = self.resolve_reference(prop_spec.get('schema', {}))
if not resolved:
resolved = self.resolve_reference(prop_spec)
if resolved:
definition['properties'][prop] = resolved
# Update schema
obj.update(definition)
del obj['$ref']
return schema
def get_mimetype(self):
if produces_json(self.produces): # endpoint will return json
try:
return self.produces[0]
except IndexError:
# if the endpoint as no 'produces' then the default is 'application/json'
return 'application/json'
elif len(self.produces) == 1:
return self.produces[0]
else:
return None
def resolve_parameters(self, parameters):
for param in parameters:
param = self.resolve_reference(param)
yield param
def get_path_parameter_types(self):
return {p['name']: p.get('type') for p in self.parameters if p['in'] == 'path'}
@property
def body_schema(self):
"""
`About operation parameters
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#fixed-fields-4>`_
A list of parameters that are applicable for all the operations described under this path. These parameters can
be overridden at the operation level, but cannot be removed there. The list MUST NOT include duplicated
parameters. A unique parameter is defined by a combination of a name and location. The list can use the
Reference Object to link to parameters that are defined at the Swagger Object's parameters.
**There can be one "body" parameter at most.**
:rtype: dict
"""
body_parameters = [parameter for parameter in self.parameters if parameter['in'] == 'body']
if len(body_parameters) > 1:
raise InvalidSpecification(
"{method} {path} There can be one 'body' parameter at most".format(**vars(self)))
body_parameters = body_parameters[0] if body_parameters else {}
schema = body_parameters.get('schema') # type: dict
if schema:
schema = self.resolve_reference(schema)
return schema
@property
def function(self):
"""
Operation function with decorators
:rtype: types.FunctionType
"""
parameters = []
for param in self.parameters: # resolve references
param = param.copy()
schema = param.get('schema')
if schema:
schema = self.resolve_reference(schema)
param['schema'] = schema
parameters.append(param)
function = parameter_to_arg(parameters, self.__undecorated_function)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator, extra=vars(self))
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
# NOTE: the security decorator should be applied last to check auth before anything else :-)
security_decorator = self.__security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator, extra=vars(self))
function = security_decorator(function)
if UWSGIMetricsCollector.is_available():
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
"""
Get produces decorator.
If the operation mimetype format is json then the function return value is jsonified
From Swagger Specfication:
**Produces**
A list of MIME types the operation can produce. This overrides the produces definition at the Swagger Object.
An empty value MAY be used to clear the global definition.
:rtype: types.FunctionType
"""
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if produces_json(self.produces): # endpoint will return json
logger.debug('... Produces json', extra=vars(self))
jsonify = Jsonifier(mimetype)
return jsonify
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __security_decorator(self):
"""
Gets the security decorator for operation
From Swagger Specification:
**Security Definitions Object**
A declaration of the security schemes available to be used in the specification.
This does not enforce the security schemes on the operations and only serves to provide the relevant details
for each scheme.
**Security Requirement Object**
Lists the required security schemes to execute this operation. The object can have multiple security schemes
declared in it which are all required (that is, there is a logical AND between the schemes).
The name used for each property **MUST** correspond to a security scheme declared in the Security Definitions.
:rtype: types.FunctionType
"""
logger.debug('... Security: %s', self.security, extra=vars(self))
if self.security:
if len(self.security) > 1:
logger.warning("... More than one security requirement defined. **IGNORING SECURITY REQUIREMENTS**",
extra=vars(self))
return security_passthrough
security = self.security[0] # type: dict
# the following line gets the first (and because of the previous condition only) scheme and scopes
# from the operation's security requirements
scheme_name, scopes = next(iter(security.items())) # type: str, list
security_definition = self.security_definitions[scheme_name]
if security_definition['type'] == 'oauth2':
token_info_url = security_definition.get('x-tokenInfoUrl', os.getenv('HTTP_TOKENINFO_URL'))
if token_info_url:
scopes = set(scopes) # convert scopes to set because this is needed for verify_oauth
return functools.partial(verify_oauth, token_info_url, scopes)
else:
logger.warning("... OAuth2 token info URL missing. **IGNORING SECURITY REQUIREMENTS**",
extra=vars(self))
elif security_definition['type'] in ('apiKey', 'basic'):
logger.debug(
"... Security type '%s' not natively supported by Connexion; you should handle it yourself",
security_definition['type'], extra=vars(self))
else:
logger.warning("... Security type '%s' unknown. **IGNORING SECURITY REQUIREMENTS**",
security_definition['type'], extra=vars(self))
# if we don't know how to handle the security or it's not defined we will usa a passthrough decorator
return security_passthrough
@property
def __validation_decorators(self):
"""
:rtype: types.FunctionType
"""
if self.parameters:
yield ParameterValidator(self.parameters)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.default_body is not None)
@property
def __response_validation_decorator(self):
"""
Get a decorator for validating the generated Response.
:rtype: types.FunctionType
"""
return ResponseValidator(self, self.get_mimetype())
|
|
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric, util, warnings
from contextlib import contextmanager
from ctypes import c_long, c_int, c_double, byref
import treelog as log
import os, numpy
libmkl = util.loadlib(linux='libmkl_rt.so', darwin='libmkl_rt.dylib', win32='mkl_rt.dll')
if not libmkl:
raise BackendNotAvailable('the Intel MKL matrix backend requires libmkl to be installed (try: pip install mkl)')
os.environ.setdefault('MKL_THREADING_LAYER', 'TBB')
def assemble(data, index, shape):
# In the increments below the output dtype is set to int32 not only to avoid
# an additional allocation, but crucially also to avoid truncation in case
# the incremented index overflows the original type.
return MKLMatrix(data, ncols=shape[1],
rowptr=numpy.add(index[0].searchsorted(numpy.arange(shape[0]+1)), 1, dtype=numpy.int32),
colidx=numpy.add(index[1], 1, dtype=numpy.int32))
class Pardiso:
'''Wrapper for libmkl.pardiso.
https://software.intel.com/en-us/mkl-developer-reference-c-pardiso
'''
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self, mtype, a, ia, ja, verbose=False, iparm={}):
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
self.maxfct = c_int(1)
self.mnum = c_int(1)
self.mtype = c_int(mtype)
self.n = c_int(len(ia)-1)
self.a = a.ctypes
self.ia = ia.ctypes
self.ja = ja.ctypes
self.perm = None
self.iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
self.msglvl = c_int(verbose)
libmkl.pardisoinit(self.pt.ctypes, byref(self.mtype), self.iparm.ctypes) # initialize iparm based on mtype
if self.iparm[0] != 1:
raise MatrixError('pardiso init failed')
for n, v in iparm.items():
self.iparm[n] = v
self.iparm[27] = 0 # double precision data
self.iparm[34] = 0 # one-based indexing
self.iparm[36] = 0 # csr matrix format
self._phase(12) # analysis, numerical factorization
log.debug('peak memory use {:,d}k'.format(max(self.iparm[14], self.iparm[15]+self.iparm[16])))
def __call__(self, rhs):
rhsflat = numpy.ascontiguousarray(rhs.reshape(rhs.shape[0], -1).T, dtype=numpy.float64)
lhsflat = numpy.empty_like(rhsflat)
self._phase(33, rhsflat.shape[0], rhsflat.ctypes, lhsflat.ctypes) # solve, iterative refinement
return lhsflat.T.reshape(rhs.shape)
def _phase(self, phase, nrhs=0, b=None, x=None):
error = c_int(1)
libmkl.pardiso(self.pt.ctypes, byref(self.maxfct), byref(self.mnum), byref(self.mtype),
byref(c_int(phase)), byref(self.n), self.a, self.ia, self.ja, self.perm,
byref(c_int(nrhs)), self.iparm.ctypes, byref(self.msglvl), b, x, byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
self._phase(-1) # release all internal memory for all matrices
if self.pt.any():
warnings.warn('Pardiso failed to release its internal memory')
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
def __init__(self, data, rowptr, colidx, ncols):
assert len(data) == len(colidx) == rowptr[-1]-1
self.data = numpy.ascontiguousarray(data, dtype=numpy.float64)
self.rowptr = numpy.ascontiguousarray(rowptr, dtype=numpy.int32)
self.colidx = numpy.ascontiguousarray(colidx, dtype=numpy.int32)
super().__init__((len(rowptr)-1, ncols))
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, MKLMatrix):
return mat
data, colidx, rowptr = mat.export('csr')
return MKLMatrix(data, rowptr+1, colidx+1, self.shape[1])
def __add__(self, other):
other = self.convert(other)
assert self.shape == other.shape
request = c_int(1)
info = c_int()
rowptr = numpy.empty(self.shape[0]+1, dtype=numpy.int32)
args = ["N", byref(request), byref(c_int(0)),
byref(c_int(self.shape[0])), byref(c_int(self.shape[1])),
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, byref(c_double(1.)),
other.data.ctypes, other.colidx.ctypes, other.rowptr.ctypes,
None, None, rowptr.ctypes, None, byref(info)]
libmkl.mkl_dcsradd(*args)
assert info.value == 0
colidx = numpy.empty(rowptr[-1]-1, dtype=numpy.int32)
data = numpy.empty(rowptr[-1]-1, dtype=numpy.float64)
request.value = 2
args[12:14] = data.ctypes, colidx.ctypes
libmkl.mkl_dcsradd(*args)
assert info.value == 0
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return MKLMatrix(self.data * other, self.rowptr, self.colidx, self.shape[1])
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
x = numpy.ascontiguousarray(other.T, dtype=numpy.float64)
y = numpy.empty(x.shape[:-1] + self.shape[:1], dtype=numpy.float64)
if other.ndim == 1:
libmkl.mkl_dcsrgemv('N', byref(c_int(self.shape[0])),
self.data.ctypes, self.rowptr.ctypes, self.colidx.ctypes, x.ctypes, y.ctypes)
else:
libmkl.mkl_dcsrmm('N', byref(c_int(self.shape[0])),
byref(c_int(other.size//other.shape[0])),
byref(c_int(self.shape[1])), byref(c_double(1.)), 'GXXFXX',
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, self.rowptr[1:].ctypes,
x.ctypes, byref(c_int(other.shape[0])), byref(c_double(0.)),
y.ctypes, byref(c_int(other.shape[0])))
return y.T
def __neg__(self):
return MKLMatrix(-self.data, self.rowptr, self.colidx, self.shape[1])
@property
def T(self):
if self.shape[0] != self.shape[1]:
raise NotImplementedError('MKLMatrix does not yet support transpose of non-square matrices')
job = numpy.array([0, 1, 1, 0, 0, 1], numpy.int32)
data = numpy.empty_like(self.data)
rowptr = numpy.empty_like(self.rowptr)
colidx = numpy.empty_like(self.colidx)
info = c_int()
libmkl.mkl_dcsrcsc(job.ctypes,
byref(c_int(self.shape[0])), self.data.ctypes,
self.colidx.ctypes, self.rowptr.ctypes, data.ctypes, colidx.ctypes,
rowptr.ctypes, byref(info))
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def _submatrix(self, rows, cols):
keep = rows.repeat(numpy.diff(self.rowptr))
keep &= cols[self.colidx-1]
if keep.all(): # all nonzero entries are kept
rowptr = self.rowptr[numpy.hstack([True, rows])]
keep = slice(None) # avoid array copies
else:
rowptr = numpy.cumsum([1] + [keep[i:j].sum() for i, j in numeric.overlapping(self.rowptr-1)[rows]], dtype=numpy.int32)
data = self.data[keep]
assert rowptr[-1] == len(data)+1
colidx = (self.colidx if cols.all() else cols.cumsum(dtype=numpy.int32)[self.colidx-1])[keep]
return MKLMatrix(data, rowptr, colidx, cols.sum())
def export(self, form):
if form == 'dense':
dense = numpy.zeros(self.shape)
for row, i, j in zip(dense, self.rowptr[:-1]-1, self.rowptr[1:]-1):
row[self.colidx[i:j]-1] = self.data[i:j]
return dense
if form == 'csr':
return self.data, self.colidx-1, self.rowptr-1
if form == 'coo':
return self.data, (numpy.arange(self.shape[0]).repeat(self.rowptr[1:]-self.rowptr[:-1]), self.colidx-1)
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def _solver_fgmres(self, rhs, atol, maxiter=0, restart=150, precon=None, ztol=1e-12, preconargs={}, **args):
rci = c_int(0)
n = c_int(len(rhs))
b = numpy.array(rhs, dtype=numpy.float64)
x = numpy.zeros_like(b)
ipar = numpy.zeros(128, dtype=numpy.int32)
ipar[0] = len(rhs) # problem size
ipar[1] = 6 # output on screen
ipar[2] = 1 # current stage of the RCI FGMRES computations; the initial value is 1
ipar[3] = 0 # current iteration number; the initial value is 0
ipar[4] = 0 # maximum number of iterations
ipar[5] = 1 # output error messages in accordance with the parameter ipar[1]
ipar[6] = 1 # output warning messages in accordance with the parameter ipar[1]
ipar[7] = 0 # do not perform the stopping test for the maximum number of iterations: ipar[3] <= ipar[4]
ipar[8] = 0 # do not perform the residual stopping test: dpar[4] <= dpar[3]
ipar[9] = 1 # perform the user-defined stopping test by setting RCI_request=2
if precon is None:
ipar[10] = 0 # run the non-preconditioned version of the FGMRES method
else:
ipar[10] = 1 # run the preconditioned version of the FGMRES method
precon = self.getprecon(precon, **args, **preconargs)
ipar[11] = 0 # do not perform the automatic test for zero norm of the currently generated vector: dpar[6] <= dpar[7]
ipar[12] = 1 # update the solution to the vector b according to the computations done by the dfgmres routine
ipar[13] = 0 # internal iteration counter that counts the number of iterations before the restart takes place; the initial value is 0
ipar[14] = min(restart, len(rhs)) # the number of non-restarted FGMRES iterations
dpar = numpy.zeros(128, dtype=numpy.float64)
tmp = numpy.zeros((2*ipar[14]+1)*ipar[0]+(ipar[14]*(ipar[14]+9))//2+1, dtype=numpy.float64)
libmkl.dfgmres_check(byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes)
if rci.value != 0:
raise MatrixError('dgmres check failed with error code {}'.format(rci.value))
with log.context('fgmres {:.0f}%', 0, 0) as format:
while True:
libmkl.dfgmres(byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes)
if rci.value == 1: # multiply the matrix
tmp[ipar[22]-1:ipar[22]+n.value-1] = self @ tmp[ipar[21]-1:ipar[21]+n.value-1]
elif rci.value == 2: # perform the stopping test
if dpar[4] < atol:
libmkl.dfgmres_get(byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes, byref(c_int(0)))
if numpy.linalg.norm(self @ b - rhs) < atol:
break
b[:] = rhs # reset rhs vector for restart
format(100 * numpy.log(dpar[2]/dpar[4]) / numpy.log(dpar[2]/atol))
if ipar[3] > maxiter > 0:
break
elif rci.value == 3: # apply the preconditioner
tmp[ipar[22]-1:ipar[22]+n.value-1] = precon(tmp[ipar[21]-1:ipar[21]+n.value-1])
elif rci.value == 4: # check if the norm of the current orthogonal vector is zero
if dpar[6] < ztol:
libmkl.dfgmres_get(byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes, byref(c_int(0)))
if numpy.linalg.norm(self @ b - rhs) < atol:
break
raise MatrixError('singular matrix')
else:
raise MatrixError('this should not have occurred: rci={}'.format(rci.value))
log.debug('performed {} fgmres iterations, {} restarts'.format(ipar[3], ipar[3]//ipar[14]))
return b
def _precon_direct(self, **args):
return Pardiso(mtype=11, a=self.data, ia=self.rowptr, ja=self.colidx, **args)
# vim:sw=2:sts=2:et
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
from mock import Mock, MagicMock, patch, call, PropertyMock
from nefertari.view import (
BaseView, error_view, key_error_view, value_error_view)
from nefertari.utils import dictset
from nefertari.json_httpexceptions import (
JHTTPBadRequest, JHTTPNotFound, JHTTPMethodNotAllowed)
from nefertari.wrappers import wrap_me, ValidationError, ResourceNotFound
from nefertari.renderers import _JSONEncoder
class DummyBaseView(BaseView):
_json_encoder = _JSONEncoder
class TestViewMapper(object):
@patch('nefertari.view.trigger_before_events')
def test_trigger_before_events_called(self, mock_trigger):
from nefertari.view import ViewMapper
class MyView(object):
Model = Mock
def __init__(self, ctx, req):
self._before_calls = {}
self._after_calls = {}
self._json_params = {}
self.context = 'foo'
def index(self):
return ['thing']
request = MagicMock(
json={'username': 'admin'},
body='{"username":"admin"}')
resource = MagicMock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
wrapper(resource, request)
assert mock_trigger.called
def test_viewmapper(self):
from nefertari.view import ViewMapper
bc1 = Mock()
bc3 = Mock()
bc2 = Mock()
class MyView(object):
Model = Mock
_response = None
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1], 'show': [bc3]}
self._after_calls = {'show': [bc2]}
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
@wrap_me(before=bc2)
def index(self):
return ['thing']
request = MagicMock()
resource = MagicMock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
result = wrapper(resource, request)
assert request.filters == {'show': [bc2]}
assert request.action == 'index'
assert result == ['thing']
bc1.assert_called_with(request=request)
assert not bc2.called
assert not bc3.called
def test_viewmapper_bad_request(self):
from nefertari.view import ViewMapper
bc1 = Mock(side_effect=ValidationError)
class MyView(object):
Model = Mock
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1]}
self._after_calls = {}
def index(self):
return ['thing']
request = Mock()
resource = Mock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
with pytest.raises(JHTTPBadRequest):
wrapper(resource, request)
def test_viewmapper_not_found(self):
from nefertari.view import ViewMapper
bc1 = Mock(side_effect=ResourceNotFound)
class MyView(object):
Model = 'foo'
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1]}
self._after_calls = {}
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
def index(self):
return ['thing']
request = Mock()
resource = Mock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
with pytest.raises(JHTTPNotFound):
wrapper(resource, request)
class TestBaseView(object):
def get_common_mock_request(self):
return Mock(content_type='', method='', accept=[''], user=None)
def test_baseview(self, *a):
class UsersView(BaseView):
_json_encoder = _JSONEncoder
def __init__(self, context, request):
BaseView.__init__(self, context, request)
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
def show(self, id):
return 'John Doe'
def convert_ids2objects(self, *args, **kwargs):
pass
request = MagicMock(content_type='')
request.matched_route.pattern = '/users'
view = UsersView(request.context, request)
assert 'John Doe' == view.show(1)
with pytest.raises(JHTTPMethodNotAllowed):
view.index()
with pytest.raises(AttributeError):
view.frobnicate()
# delete is an allowed action, but it raises since BaseView
# does not implement it.
with pytest.raises(JHTTPMethodNotAllowed):
view.delete()
def test_convert_dotted(self):
converted = BaseView.convert_dotted({
'settings.foo': 'bar',
'option': 'value',
'one.two.three.four': 4,
'one.two.six': 6,
})
assert sorted(converted.keys()) == sorted([
'settings', 'option', 'one'])
assert converted['settings'] == {'foo': 'bar'}
assert converted['option'] == 'value'
assert converted['one'] == {
'two': {
'three': {'four': 4},
'six': 6,
},
}
assert 'settings.foo' not in converted
def test_convert_dotted_no_dotted(self):
converted = BaseView.convert_dotted({
'option': 'value'
})
assert converted == {'option': 'value'}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init(self, run):
request = Mock(
content_type='application/json',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=[''],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
run.assert_called_once_with()
assert request.override_renderer == 'nefertari_json'
assert list(sorted(view._params.keys())) == [
'param1', 'param2', 'param3']
assert view._params['param1'] == {'foo': 'val1'}
assert view._params['param2'] == {'foo': 'val2'}
assert view._params['param3'] == 'val3'
assert view.request == request
assert view.context == {'foo': 'bar'}
assert view._before_calls == {}
assert view._after_calls == {}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_json_accept_header(self, run):
request = Mock(
content_type='application/json',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['application/json'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'nefertari_json'
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_text_ct_and_accept(self, run):
request = Mock(
content_type='text/plain',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['text/plain'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'string'
assert list(view._params.keys()) == ['param2']
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_json_error(self, run):
import simplejson
request = Mock(
content_type='application/json',
method='POST',
accept=['application/json'],
)
type(request).json = PropertyMock(
side_effect=simplejson.JSONDecodeError(
'foo', 'asdasdasdasd', pos=1))
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'nefertari_json'
assert list(view._params.keys()) == ['param2']
@patch('nefertari.view.BaseView.setup_default_wrappers')
@patch('nefertari.view.BaseView.convert_ids2objects')
@patch('nefertari.view.BaseView.set_public_limits')
def test_run_init_actions(self, limit, conv, setpub):
request = Mock(
content_type='text/plain',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['text/plain'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
DummyBaseView(context={'foo': 'bar'}, request=request)
limit.assert_called_once_with()
conv.assert_called_once_with()
setpub.assert_called_once_with()
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation_es_disabled(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=False)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
view.index = 1
view._setup_aggregation()
assert view.index == 1
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation_index_not_defined(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=True)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
assert view.index == view.not_allowed_action
view._setup_aggregation()
with pytest.raises(JHTTPMethodNotAllowed):
view.index()
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=True)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
type(view).index = 1
view._setup_aggregation()
aggregator.assert_called_once_with(view)
aggregator().wrap.assert_called_once_with(1)
assert view.index == aggregator().wrap()
@patch('nefertari.elasticsearch.ES')
def test_get_collection_es(self, mock_es):
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(
context={}, request=request,
_query_params={'foo': 'bar'})
view.Model = Mock(__name__='MyModel')
view._query_params['q'] = 'movies'
result = view.get_collection_es()
mock_es.assert_called_once_with('MyModel')
mock_es().get_collection.assert_called_once_with(
foo='bar', q='movies')
assert result == mock_es().get_collection()
@patch('nefertari.view.BaseView._run_init_actions')
def test_fill_null_values(self, run):
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(
context={}, request=request,
_query_params={'foo': 'bar'})
view.Model = Mock()
view.Model.get_null_values.return_value = {
'name': None, 'email': 1, 'foo': None}
view._json_params = {'foo': 'bar'}
view.fill_null_values()
assert view._json_params == {
'foo': 'bar', 'name': None, 'email': 1
}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_no_root(self, run):
request = Mock(content_type='', method='', accept=[''])
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view.root_resource = None
view.__init__(**kwargs)
assert not view._auth_enabled
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_no_auth(self, run, wrap):
request = Mock(content_type='', method='', accept=[''])
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = False
view.set_public_limits()
assert not wrap.set_public_limits.called
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_user_authenticated(self, run, wrap):
request = Mock(content_type='', method='', accept=[''], user='foo')
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = True
view.set_public_limits()
assert not wrap.set_public_limits.called
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_applied(self, run, wrap):
request = self.get_common_mock_request()
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = True
view.set_public_limits()
wrap.set_public_limits.assert_called_once_with(view)
@patch('nefertari.view.engine')
@patch('nefertari.view.BaseView.id2obj')
@patch('nefertari.view.BaseView._run_init_actions')
def test_convert_ids2objects_non_relational(self, run, id2obj, eng):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo1': 'bar'},
_json_params={'foo': 'bar'})
view.Model = 'Model1'
eng.is_relationship_field.return_value = False
view.convert_ids2objects()
eng.is_relationship_field.assert_called_once_with('foo', 'Model1')
assert not id2obj.called
@patch('nefertari.view.engine')
@patch('nefertari.view.BaseView.id2obj')
@patch('nefertari.view.BaseView._run_init_actions')
def test_convert_ids2objects_relational(self, run, id2obj, eng):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo1': 'bar'},
_json_params={'foo': 'bar'})
view.Model = 'Model1'
eng.is_relationship_field.return_value = True
view.convert_ids2objects()
eng.get_relationship_cls.assert_called_once_with('foo', 'Model1')
id2obj.assert_called_once_with('foo', eng.get_relationship_cls())
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_setup_default_wrappers_with_auth(self, run, wrap):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view._auth_enabled = True
view.setup_default_wrappers()
assert len(view._after_calls['index']) == 4
assert len(view._after_calls['show']) == 4
assert len(view._after_calls['create']) == 4
assert len(view._after_calls['update']) == 4
assert len(view._after_calls['replace']) == 4
assert wrap.apply_privacy.call_count == 5
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_setup_default_wrappers_no_auth(self, run, wrap):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view._auth_enabled = False
view.setup_default_wrappers()
assert len(view._after_calls['index']) == 3
assert len(view._after_calls['show']) == 3
assert not wrap.apply_privacy.called
def test_defalt_wrappers_and_wrap_me(self):
from nefertari import wrappers
self.maxDiff = None
def before_call(*a):
return a[2]
def after_call(*a):
return a[2]
class MyView(BaseView):
_json_encoder = _JSONEncoder
@wrappers.wrap_me(before=before_call, after=after_call)
def index(self):
return [1, 2, 3]
def convert_ids2objects(self, *args, **kwargs):
pass
request = MagicMock(content_type='')
resource = MagicMock(actions=['index'])
view = MyView(resource, request)
assert len(view._after_calls['index']) == 3
assert len(view._after_calls['show']) == 3
assert view.index._before_calls == [before_call]
assert view.index._after_calls == [after_call]
@patch('nefertari.view.BaseView._run_init_actions')
def test_not_allowed_action(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
with pytest.raises(JHTTPMethodNotAllowed):
view.not_allowed_action()
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_before(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable_ = lambda x: x
view.add_before_or_after_call(
action='foo', _callable=callable_, pos=None, before=True)
assert callable_ in view._before_calls['foo']
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_after(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable_ = lambda x: x
view.add_before_or_after_call(
action='foo', _callable=callable_, pos=None, before=False)
assert callable_ in view._after_calls['foo']
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_position(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable1 = lambda x: x
callable2 = lambda x: x + x
view.add_before_or_after_call(
action='foo', _callable=callable1, pos=None,
before=False)
assert callable1 is view._after_calls['foo'][0]
view.add_before_or_after_call(
action='foo', _callable=callable2, pos=0,
before=False)
assert callable2 is view._after_calls['foo'][0]
assert callable1 is view._after_calls['foo'][1]
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_not_callable(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
with pytest.raises(ValueError) as ex:
view.add_before_or_after_call(
action='foo', _callable='asdasd', pos=None,
before=False)
assert str(ex.value) == 'asdasd is not a callable'
@patch('nefertari.view.urllib')
@patch('nefertari.view.Request')
@patch('nefertari.view.BaseView._run_init_actions')
def test_subrequest_get(self, run, req, ulib):
request = Mock(
content_type='', method='', accept=[''], user=None,
cookies=['1'])
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view.subrequest(url='http://', params={'par': 'val'}, method='GET')
req.blank.assert_called_once_with(
'http://', cookies=['1'], content_type='application/json',
method='GET')
view.request.invoke_subrequest.assert_called_once_with(req.blank())
ulib.parse.urlencode.assert_called_once_with({'par': 'val'})
@patch('nefertari.view.json')
@patch('nefertari.view.Request')
@patch('nefertari.view.BaseView._run_init_actions')
def test_subrequest_post(self, run, req, json):
request = Mock(
content_type='', method='', accept=[''], user=None,
cookies=['1'])
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view.subrequest(url='http://', params={'par': 'val'}, method='POST')
req.blank.assert_called_once_with(
'http://', cookies=['1'], content_type='application/json',
method='POST')
view.request.invoke_subrequest.assert_called_once_with(req.blank())
json.dumps.assert_called_once_with({'par': 'val'})
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
view.id2obj(name='user', model=model)
assert view._json_params['user'] == 'foo'
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_list(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = ['1']
view.id2obj(name='user', model=model)
assert view._json_params['user'] == ['foo']
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_not_in_params(self, run):
model = Mock()
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view.id2obj(name='asdasdasd', model=model)
assert not model.pk_field.called
assert not model.get_item.called
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_setdefault(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
view.id2obj(name='user', model=model, setdefault=123)
assert view._json_params['user'] == 123
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_value_none(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['users'] = [None, '1']
view._json_params['story'] = None
view.id2obj(name='users', model=model)
view.id2obj(name='story', model=model)
assert view._json_params['users'] == [None, 'foo']
assert view._json_params['story'] is None
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_already_object(self, run):
id_ = Mock()
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = id_
view.id2obj(name='user', model=model, setdefault=123)
assert view._json_params['user'] == id_
model.pk_field.assert_called_once_with()
assert not model.get_item.called
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_not_found(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
with pytest.raises(JHTTPBadRequest) as ex:
view.id2obj(name='user', model=model)
assert str(ex.value) == 'id2obj: Object 1 not found'
class TestViewHelpers(object):
def test_key_error_view(self):
resp = key_error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "Bad or missing param 'foo'"
def test_value_error_view(self):
resp = value_error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "Bad or missing value 'foo'"
def test_error_view(self):
resp = error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "foo"
def test_includeme(self):
from nefertari.view import includeme
config = Mock()
includeme(config)
calls = [
call(key_error_view, context=KeyError),
call(value_error_view, context=ValueError),
call(error_view, context=Exception)
]
config.add_view.assert_has_calls(calls, any_order=True)
|
|
# HAppy
import os
import sys
import re
import signal
import socket
import logging
import logging.handlers
import threading
import ConfigParser
from time import sleep, time
from happy.resources import *
DEFAULT_LOG = 'syslog'
DEFAULT_LEVEL = 'warn'
DEFAULT_RESOURCES = 'haresources:/etc/ha.d/haresources'
DEFAULT_CONFIG = '/etc/happy.conf'
DEFAULT_PORT = 694
DEFAULT_BIND_ADDR = "0.0.0.0"
DEFAULT_DEAD_TIME = 8
BUFFER_SIZE = 1024
logger = logging.getLogger(__name__)
class HAppy(object):
def __init__(self, options = None):
if options is not None:
self.log_level = options.log_level
self.daemonize = not options.foreground
self.config = options.config
else:
self.config = 'happy.conf'
self.daemonize = False
self.log_level = 'debug'
self._catch_signals()
self._parse_config()
self._resources = None
self._uname = None
self.owner = {}
self.dead_time = self.get_config('dead_time', DEFAULT_DEAD_TIME)
def daemon(self):
if self.daemonize and os.fork() > 0:
sys.exit()
partner = self.get_config('partner', None)
port = self.get_config('udp_port', DEFAULT_PORT)
if partner is None:
logger.error("No partner configured, exiting!")
raise RuntimeError("No partner configured!")
logger.info("Peering with {0}:{1}".format(partner, port))
self.partner_pair = (partner, port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((DEFAULT_BIND_ADDR, port))
self.last_seen = 0
class Listener(threading.Thread):
def __init__(self, daemon, **kwargs):
super(Listener, self).__init__(**kwargs)
self.setName("HAppy-Listener")
self.setDaemon(True)
self.daemon = daemon
def run(self):
while True:
data, addr = self.daemon.sock.recvfrom(BUFFER_SIZE)
logger.debug("Recieved: {0} from {1}".format(data, addr))
source, message = data.split(': ', 1)
# TODO think about this
#if source != partner:
# continue
self.daemon.partner_is_alive()
listener = Listener(self)
listener.start()
while True:
self.send("{0}: hello {1}".format(self.uname, partner))
#logger.debug("Partner status: {0}".format(self.partner_status))
if not self.have_the_ball:
pass
sleep(5)
def send(self, message):
self.sock.sendto(message, self.partner_pair)
def partner_is_alive(self):
self.last_seen = time()
@property
def partner_status(self):
return 'dead' if time() - self.last_seen > self.dead_time else 'alive'
def takeover(self):
pass
def release(self):
pass
def status(self):
pass
@property
def resources(self):
if self._resources is not None:
return self._resources
resource_source = self.get_config('resources', DEFAULT_RESOURCES)
if resource_source[:11] == 'haresources':
# parse the heartbeat haresources file
filename = resource_source.split(':')
filename = filename[1] if len(filename) > 1 else '/etc/ha.d/haresources'
fd = open(filename, 'r')
haresources = fd.read()
fd.close()
resources = {}
haresources = re.sub("\\\\\r?\n", ' ', haresources)
haresources = re.split("\r?\n", haresources)
for haresource in haresources:
haresource = haresource.strip()
if haresource == '' or haresource[0] == '#':
continue
haresource = re.split("[ \t]+", haresource)
node_name = haresource.pop(0)
ident = haresource[0]
resource_list = []
for resource in haresource:
ip_check = re.match("[0-9]+(\\.[0-9]+){3}", resource)
if ip_check:
if '/' in resource:
resource = resource.split('/')
ip = resource[0]
mask = resource[1]
else:
ip = resource
mask = '32'
resource_list.append(IPAddrResource(ip, mask))
elif resource.startswith('Filesystem::'):
filesystem, block_device, mount_point = resource.split('::')
resource_list.append(FilesystemResource(block_device, mount_point))
elif resource.startswith('drbddisk::'):
drbd, name = resource.split('::')
resource_list.append(DRBDResource(name))
else:
resource = resource.split('::')
resource_list.append(ServiceResource(resource[0], resource[1:]))
resources[ident] = {
'preferred': node_name, # this is ignored for now
'resources': resource_list,
}
self._resources = resources
self.owners = {}
for ident, resources in self._resources.items():
self.owner[ident] = True
for resource in resources['resources']:
if not resource.status():
self.owner[ident] = False
break
return self._resources
@property
def uname(self):
if self._uname is None:
self._uname = self.get_config('node_name', os.uname()[1])
return self._uname
def get_config(self, key, default, section = 'DEFAULT'):
try:
return self._config.get(section, key)
except ConfigParser.NoOptionError:
return default
def _setup_logging(self):
levels = {
'debug' : logging.DEBUG,
'info' : logging.INFO,
'warn' : logging.WARN,
'error' : logging.ERROR,
}
level = self.log_level
logger.setLevel(levels[level])
target = self.get_config('log', DEFAULT_LOG)
if not self.daemonize or target == 'stdout':
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(levels[level])
logger.addHandler(handler)
elif target[0] == '/' and os.path.isfile(target):
handler = logging.FileHandler(target)
handler.setLevel(levels[level])
logger.addHandler(handler)
elif target == 'syslog':
handler = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_DAEMON)
handler.setLevel(levels[level])
formatter = logging.Formatter('%(name)s[{0}] %(levelname)s: %(message)s'.format(os.getpid()))
handler.setFormatter(formatter)
logger.addHandler(handler)
def _parse_config(self):
if not os.path.isfile(self.config):
logger.error("{0} does not exist".format(self.config))
raise IOError("{0} does not exist".format(self.config))
# happy.conf program config
class FakeSectionHeader(object):
def __init__(self, fp):
self.fp = fp
self.header = '[DEFAULT]\n'
def readline(self):
if self.header:
try: return self.header
finally: self.header = None
else:
return self.fp.readline()
parser = ConfigParser.RawConfigParser()
with open(self.config) as config_fd:
parser.readfp(FakeSectionHeader(config_fd))
self._config = parser
self._setup_logging()
def signals(self, sig, frame):
if sig in (signal.SIGINT, signal.SIGTERM):
# exit gracefully
if sig == signal.SIGINT and not self.daemonize:
sys.stdout.write("\r")
logger.warn("Exiting")
sys.exit()
elif sig == signal.SIGHUP:
# reload configurations
logger.info("Reloading configurations")
self._parse_config()
self._resources = None
self._uname = None
def _catch_signals(self):
signal.signal(signal.SIGINT, self.signals)
signal.signal(signal.SIGTERM, self.signals)
signal.signal(signal.SIGHUP, self.signals)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.