gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Tests for L{eliot._traceback}.
"""
from __future__ import unicode_literals
from unittest import TestCase, SkipTest
from warnings import catch_warnings, simplefilter
import traceback
import sys
try:
from twisted.python.failure import Failure
except ImportError:
Failure = None
from .._traceback import writeTraceback, writeFailure, _writeTracebackMessage
from ..testing import (
assertContainsFields, validateLogging, capture_logging,
MemoryLogger,
)
class TracebackLoggingTests(TestCase):
"""
Tests for L{writeTraceback} and L{writeFailure}.
"""
@validateLogging(None)
def test_writeTraceback(self, logger):
"""
L{writeTraceback} writes the current traceback to the log.
"""
e = None
def raiser():
raise RuntimeError("because")
try:
raiser()
except Exception as exception:
expectedTraceback = traceback.format_exc()
writeTraceback(logger)
e = exception
lines = expectedTraceback.split("\n")
# Remove source code lines:
expectedTraceback = "\n".join(
[l for l in lines if not l.startswith(" ")])
message = logger.messages[0]
assertContainsFields(self, message,
{"message_type": "eliot:traceback",
"exception": RuntimeError,
"reason": e,
"traceback": expectedTraceback})
logger.flushTracebacks(RuntimeError)
@capture_logging(None)
def test_writeTracebackDefaultLogger(self, logger):
"""
L{writeTraceback} writes to the default log, if none is
specified.
"""
def raiser():
raise RuntimeError("because")
try:
raiser()
except Exception:
writeTraceback()
message = logger.messages[0]
assertContainsFields(self, message,
{"message_type": "eliot:traceback"})
logger.flushTracebacks(RuntimeError)
@validateLogging(None)
def test_writeFailure(self, logger):
"""
L{writeFailure} writes a L{Failure} to the log.
"""
if Failure is None:
raise SkipTest("Twisted unavailable")
try:
raise RuntimeError("because")
except:
failure = Failure()
expectedTraceback = failure.getBriefTraceback()
writeFailure(failure, logger)
message = logger.messages[0]
assertContainsFields(self, message,
{"message_type": "eliot:traceback",
"exception": RuntimeError,
"reason": failure.value,
"traceback": expectedTraceback})
logger.flushTracebacks(RuntimeError)
@capture_logging(None)
def test_writeFailureDefaultLogger(self, logger):
"""
L{writeFailure} writes to the default log, if none is
specified.
"""
if Failure is None:
raise SkipTest("Twisted unavailable")
try:
raise RuntimeError("because")
except:
failure = Failure()
writeFailure(failure)
message = logger.messages[0]
assertContainsFields(self, message,
{"message_type": "eliot:traceback"})
logger.flushTracebacks(RuntimeError)
@validateLogging(None)
def test_writeFailureResult(self, logger):
"""
L{writeFailure} returns C{None}.
"""
if Failure is None:
raise SkipTest("Twisted unavailable")
try:
raise RuntimeError("because")
except:
result = writeFailure(Failure(), logger)
self.assertIs(result, None)
logger.flushTracebacks(RuntimeError)
@validateLogging(None)
def test_serialization(self, logger):
"""
L{_writeTracebackMessage} serializes exceptions to string values and
types to FQPN.
"""
try:
raise KeyError(123)
except:
exc_info = sys.exc_info()
_writeTracebackMessage(logger, *exc_info)
serialized = logger.serialize()[0]
assertContainsFields(self, serialized,
{"exception":
"%s.KeyError" % (KeyError.__module__,),
"reason": "123"})
logger.flushTracebacks(KeyError)
@validateLogging(None)
def test_badException(self, logger):
"""
L{_writeTracebackMessage} logs a message even if given a bad exception.
"""
class BadException(Exception):
def __str__(self):
raise TypeError()
try:
raise BadException()
except BadException:
exc_info = sys.exc_info()
_writeTracebackMessage(logger, *exc_info)
self.assertEqual(logger.serialize()[0]["reason"],
"eliot: unknown, unicode() raised exception")
logger.flushTracebacks(BadException)
def test_systemDeprecatedWriteTraceback(self):
"""
L{writeTraceback} warns with C{DeprecationWarning} if a C{system}
argument is passed in.
"""
logger = MemoryLogger()
with catch_warnings(record=True) as warnings:
simplefilter("always")
try:
raise Exception()
except:
writeTraceback(logger, "system")
self.assertEqual(warnings[-1].category, DeprecationWarning)
def test_systemDeprecatedWriteFailure(self):
"""
L{writeTraceback} warns with C{DeprecationWarning} if a C{system}
argument is passed in.
"""
if Failure is None:
raise SkipTest("Twisted unavailable")
logger = MemoryLogger()
with catch_warnings(record=True) as warnings:
simplefilter("always")
try:
raise Exception()
except:
writeFailure(Failure(), logger, "system")
self.assertEqual(warnings[-1].category, DeprecationWarning)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Runs one or more cluster benchmarks for RAMCloud, using cluster.py to
set up the cluster and ClusterPerf.cc to implement the details of the
benchmark.
"""
# TO ADD A NEW BENCHMARK:
# 1. Decide on a symbolic name for the new test.
# 2. Write code for the test in ClusterPerf.cc using the same test name (see
# instructions in ClusterPerf.cc for details).
# 3. If needed, create a driver function for the test (named after the test)
# in the "driver functions" section below. Many tests can just use the
# function "default". If you need to provide special arguments to
# cluster.run for your tests, or if the running of your test is unusual
# in some way (e.g., you call cluster.run several times or collect
# results from unusual places) then you'll need to write a test-specific
# driver function.
# 4. Create a new Test object in one of the tables simple_tests or
# graph_tests below, depending on the kind of test.
from __future__ import division, print_function
from common import *
import cluster
import config
import log
import glob
import os
import pprint
import re
import sys
import time
from optparse import OptionParser
# Each object of the following class represents one test that can be
# performed by this program.
class Test:
def __init__(self,
name, # Symbolic name for the test, used on the
# command line to run the test. This same
# name is normally used for the
# corresponding test in ClusterPerf.cc.
function # Python driver function for the test.
):
"""
Construct a Test object.
"""
self.name = name
self.function = function
def flatten_args(args):
"""
Given a dictionary of arguments, produce a string suitable for inclusion
in a command line, such as "--name1 value1 --name2 value2"
"""
return " ".join(["%s %s" % (name, value)
for name, value in args.iteritems()])
def get_client_log(
index = 0 # Client index (0 for first client,
# which is usually the one that's wanted)
):
"""
Given the index of a client, read the client's log file
from the current log directory and return its contents,
ignoring RAMCloud log messages (what's left should be a
summary of the results from a test.
"""
globResult = glob.glob('%s/latest/client%d*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
result = "";
for line in open(globResult[0], 'r'):
if not re.match('([0-9]+\.[0-9]+) ', line):
result += line
return result
def run_test(
test, # Test object describing the test to run.
options # Command-line options.
):
"""
Run a given test. The main value provided by this function is to
prepare a candidate set of options for cluster.run and another set
for the ClusterPerf clients, based on the command-line options.
"""
cluster_args = {
'debug': options.debug,
'log_dir': options.log_dir,
'log_level': options.log_level,
'backups_per_server': options.backups_per_server,
'num_servers': options.num_servers,
'replicas': options.replicas,
'timeout': options.timeout,
'share_hosts': True,
'transport': options.transport,
'replicas': options.replicas,
'verbose': options.verbose
}
client_args = {}
# Provide a default value for num_servers here. This is better
# than defaulting it in the OptionParser below, because tests can
# see whether or not an actual value was specified and provide a
# test-specific default.
if cluster_args['num_servers'] == None:
# Make sure there are enough servers to meet replica requirements.
cluster_args['num_servers'] = options.replicas
if options.num_clients != None:
cluster_args['num_clients'] = options.num_clients
if options.size != None:
client_args['--size'] = options.size
test.function(test.name, options, cluster_args, client_args)
#-------------------------------------------------------------------
# Driver functions follow below. These functions are responsible for
# invoking ClusterPerf via cluster.py, and they collect and print
# result data. Simple tests can just use the "default" driver function.
#-------------------------------------------------------------------
def default(
name, # Name of this test; passed through
# to ClusterPerf verbatim.
options, # The full set of command-line options.
cluster_args, # Proposed set of arguments to pass to
# cluster.run (extracted from options).
# Individual tests can override as
# appropriate for the test.
client_args # Proposed set of arguments to pass to
# ClusterPerf (via cluster.run).
# Individual tests can override as
# needed for the test.
):
"""
This function is used as the invocation function for most tests;
it simply invokes ClusterPerf via cluster.run and prints the result.
"""
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def broadcast(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 10
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def netBandwidth(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 2*len(config.hosts)
if options.num_servers == None:
cluster_args['num_servers'] = cluster_args['num_clients']
if cluster_args['num_servers'] > len(config.hosts):
cluster_args['num_servers'] = len(config.hosts)
if options.size != None:
client_args['--size'] = options.size
else:
client_args['--size'] = 1024*1024;
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def readAllToAll(name, options, cluster_args, client_args):
cluster_args['backups_per_server'] = 0
cluster_args['replicas'] = 0
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = len(hosts)
if options.num_servers == None:
cluster_args['num_servers'] = len(hosts)
client_args['--numTables'] = cluster_args['num_servers'];
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def readLoaded(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 20
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def readRandom(name, options, cluster_args, client_args):
cluster_args['timeout'] = 60
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 50
if options.num_servers == None:
cluster_args['num_servers'] = 10
client_args['--numTables'] = cluster_args['num_servers'];
cluster.run(client='%s/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
#-------------------------------------------------------------------
# End of driver functions.
#-------------------------------------------------------------------
# The following tables define all of the benchmarks supported by this program.
# The benchmarks are divided into two groups:
# * simple_tests describes tests that output one or more individual
# performance metrics
# * graph_tests describe tests that generate one graph per test; the graph
# output is in gnuplot format with comments describing the data.
simple_tests = [
Test("basic", default),
Test("broadcast", broadcast),
Test("netBandwidth", netBandwidth),
Test("readAllToAll", readAllToAll),
Test("readNotFound", default),
Test("writeAsyncSync", default),
]
graph_tests = [
Test("readLoaded", readLoaded),
Test("readRandom", readRandom)
]
if __name__ == '__main__':
parser = OptionParser(description=
'Run one or more performance benchmarks on a RAMCloud cluster. Each '
'test argument names one test to run (default: run all tests). Not '
'all options are used by all benchmarks.',
usage='%prog [options] test test ...',
conflict_handler='resolve')
parser.add_option('-n', '--clients', type=int,
metavar='N', dest='num_clients',
help='Number of instances of the client application '
'to run')
parser.add_option('--debug', action='store_true', default=False,
help='Pause after starting servers but before running '
'clients to enable debugging setup')
parser.add_option('-d', '--logDir', default='logs', metavar='DIR',
dest='log_dir',
help='Top level directory for log files; the files for '
'each invocation will go in a subdirectory.')
parser.add_option('-l', '--logLevel', default='NOTICE',
choices=['DEBUG', 'NOTICE', 'WARNING', 'ERROR', 'SILENT'],
metavar='L', dest='log_level',
help='Controls degree of logging in servers')
parser.add_option('-b', '--numBackups', type=int, default=1,
metavar='N', dest='backups_per_server',
help='Number of backups to run on each server host '
'(0, 1, or 2)')
parser.add_option('-r', '--replicas', type=int, default=3,
metavar='N',
help='Number of disk backup copies for each segment')
parser.add_option('--servers', type=int,
metavar='N', dest='num_servers',
help='Number of hosts on which to run servers')
parser.add_option('-s', '--size', type=int,
help='Object size in bytes')
parser.add_option('-t', '--timeout', type=int, default=20,
metavar='SECS',
help="Abort if the client application doesn't finish within "
'SECS seconds')
parser.add_option('-T', '--transport', default='infrc',
help='Transport to use for communication with servers')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Print progress messages')
(options, args) = parser.parse_args()
# Invoke the requested tests (run all of them if no tests were specified)
try:
if len(args) == 0:
# No specific tests were requested, so run all of them.
for test in simple_tests:
run_test(test, options)
for test in graph_tests:
run_test(test, options)
else:
for name in args:
for test in simple_tests:
if test.name == name:
run_test(test, options)
break
else:
for test in graph_tests:
if test.name == name:
run_test(test, options)
break
else:
print("No clusterperf test named '%s'" % (name))
finally:
logInfo = log.scan("%s/latest" % (options.log_dir),
["WARNING", "ERROR"])
if len(logInfo) > 0:
print(logInfo)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Perspective.created'
db.add_column('avocado_perspective', 'created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 9, 293249)), keep_default=False)
# Adding field 'Perspective.modified'
db.add_column('avocado_perspective', 'modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 17, 501402)), keep_default=False)
# Changing field 'Perspective.store'
db.alter_column('avocado_perspective', 'store', self.gf('avocado.store.fields.JSONField')(editable=False))
# Adding field 'Report.created'
db.add_column('avocado_report', 'created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 22, 805747)), keep_default=False)
# Adding field 'Report.modified'
db.add_column('avocado_report', 'modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 32, 445973)), keep_default=False)
# Adding field 'Scope.created'
db.add_column('avocado_scope', 'created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 42, 598399)), keep_default=False)
# Adding field 'Scope.modified'
db.add_column('avocado_scope', 'modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2011, 8, 9, 17, 36, 46, 950524)), keep_default=False)
# Changing field 'Scope.store'
db.alter_column('avocado_scope', 'store', self.gf('avocado.store.fields.JSONField')(editable=False))
def backwards(self, orm):
# Deleting field 'Perspective.created'
db.delete_column('avocado_perspective', 'created')
# Deleting field 'Perspective.modified'
db.delete_column('avocado_perspective', 'modified')
# Changing field 'Perspective.store'
db.alter_column('avocado_perspective', 'store', self.gf('avocado.store.fields.JSONField')(null=True, editable=False))
# Deleting field 'Report.created'
db.delete_column('avocado_report', 'created')
# Deleting field 'Report.modified'
db.delete_column('avocado_report', 'modified')
# Deleting field 'Scope.created'
db.delete_column('avocado_scope', 'created')
# Deleting field 'Scope.modified'
db.delete_column('avocado_scope', 'modified')
# Changing field 'Scope.store'
db.alter_column('avocado_scope', 'store', self.gf('avocado.store.fields.JSONField')(null=True, editable=False))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avocado.category': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['avocado.Category']"})
},
'avocado.column': {
'Meta': {'ordering': "('name',)", 'object_name': 'Column'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.Category']", 'null': 'True', 'blank': 'True'}),
'csv_fmtr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['avocado.Field']", 'through': "orm['avocado.ColumnField']", 'symmetrical': 'False'}),
'html_fmtr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {}),
'search_doc': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'avocado.columnfield': {
'Meta': {'ordering': "('order',)", 'object_name': 'ColumnField'},
'concept': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conceptfields'", 'to': "orm['avocado.Column']"}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.Field']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'avocado.criterion': {
'Meta': {'ordering': "('name',)", 'object_name': 'Criterion'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.Category']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['avocado.Field']", 'through': "orm['avocado.CriterionField']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {}),
'search_doc': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'viewset': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'avocado.criterionfield': {
'Meta': {'ordering': "('order',)", 'object_name': 'CriterionField'},
'concept': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conceptfields'", 'to': "orm['avocado.Criterion']"}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.Field']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'avocado.field': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_name', 'model_name', 'field_name'),)", 'object_name': 'Field'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'chart_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'chart_xaxis': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'chart_yaxis': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'choices_handler': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_choices': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'search_doc': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'translator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'avocado.perspective': {
'Meta': {'object_name': 'Perspective'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'definition': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'store': ('avocado.store.fields.JSONField', [], {'editable': 'False'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 9, 17, 35, 47, 433504)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'avocado.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'perspective': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['avocado.Perspective']", 'unique': 'True'}),
'scope': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['avocado.Scope']", 'unique': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'avocado.scope': {
'Meta': {'object_name': 'Scope'},
'cnt': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'definition': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'store': ('avocado.store.fields.JSONField', [], {'editable': 'False'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 9, 17, 35, 47, 433504)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['avocado']
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import asarray_chkfinite
from scipy._lib.six import callable
from .lapack import get_lapack_funcs
from .misc import LinAlgError, _datacopied
__all__ = ['qz', 'ordqz']
_double_precision = ['i', 'l', 'd']
def _select_function(sort):
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = _lhp
elif sort == 'rhp':
sfunction = _rhp
elif sort == 'iuc':
sfunction = _iuc
elif sort == 'ouc':
sfunction = _ouc
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
return sfunction
def _lhp(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (np.real(x[nonzero] / y[nonzero]) < 0.0)
return out
def _rhp(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (np.real(x[nonzero] / y[nonzero]) > 0.0)
return out
def _iuc(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (abs(x[nonzero] / y[nonzero]) < 1.0)
return out
def _ouc(x, y):
out = np.empty_like(x, dtype=bool)
xzero = (x == 0)
yzero = (y == 0)
out[xzero & yzero] = False
out[~xzero & yzero] = True
out[~yzero] = (abs(x[~yzero] / y[~yzero]) > 1.0)
return out
def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None and will be "
"removed in a future release. Use ordqz instead.")
if output not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
if not (a_m == a_n == b_m == b_n):
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F', 'D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F', 'D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, A))
overwrite_b = overwrite_b or (_datacopied(b1, B))
gges, = get_lapack_funcs(('gges',), (a1, b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int)
sfunction = lambda x: None
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=0)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of gges" % -info)
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be "
"correct for J=%d,...,N" % info - 1, UserWarning)
elif info == a_n + 1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n + 2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues "
"in the Generalized Schur form no longer satisfy "
"sort=True. This could also be due to scaling.")
elif info == a_n + 3:
raise LinAlgError("Reordering failed in <s,d,c,z>tgsen")
return result, gges.typecode
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of N x N
nonsymmetric matrices (A,B) is::
(A,B) = (Q*AA*Z', Q*BB*Z')
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
output : {'real', 'complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue
``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
output='complex', the sort function takes two complex arguments
(alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively,
string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
overwrite_b : bool, optional
Whether to overwrite data in b (may improve performance)
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import linalg
>>> np.random.seed(1234)
>>> A = np.arange(9).reshape((3, 3))
>>> B = np.random.randn(3, 3)
>>> AA, BB, Q, Z = linalg.qz(A, B)
>>> AA
array([[-13.40928183, -4.62471562, 1.09215523],
[ 0. , 0. , 1.22805978],
[ 0. , 0. , 0.31973817]])
>>> BB
array([[ 0.33362547, -1.37393632, 0.02179805],
[ 0. , 1.68144922, 0.74683866],
[ 0. , 0. , 0.9258294 ]])
>>> Q
array([[ 0.14134727, -0.97562773, 0.16784365],
[ 0.49835904, -0.07636948, -0.86360059],
[ 0.85537081, 0.20571399, 0.47541828]])
>>> Z
array([[-0.24900855, -0.51772687, 0.81850696],
[-0.79813178, 0.58842606, 0.12938478],
[-0.54861681, -0.6210585 , -0.55973739]])
See also
--------
ordqz
"""
# output for real
# AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
# output for complex
# AA, BB, sdim, alpha, beta, vsl, vsr, work, info
result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
check_finite=check_finite)
return result[0], result[1], result[-4], result[-3]
def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
overwrite_b=False, check_finite=True):
"""QZ decomposition for a pair of matrices with reordering.
.. versionadded:: 0.17.0
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A
callable may be passed that, given an ordered pair ``(alpha,
beta)`` representing the eigenvalue ``x = (alpha/beta)``,
returns a boolean denoting whether the eigenvalue should be
sorted to the top-left (True). For the real matrix pairs
``beta`` is real while ``alpha`` can be complex, and for
complex matrix pairs both ``alpha`` and ``beta`` can be
complex. The callable must be able to accept a numpy
array. Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
With the predefined sorting functions, an infinite eigenvalue
(i.e. ``alpha != 0`` and ``beta = 0``) is considered to lie in
neither the left-hand nor the right-hand plane, but it is
considered to lie outside the unit circle. For the eigenvalue
``(alpha, beta) = (0, 0)`` the predefined sorting functions
all return `False`.
output : str {'real','complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
overwrite_a : bool, optional
If True, the contents of A are overwritten.
overwrite_b : bool, optional
If True, the contents of B are overwritten.
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
alpha : (N,) ndarray
alpha = alphar + alphai * 1j. See notes.
beta : (N,) ndarray
See notes.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
Notes
-----
On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and
``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
that would result if the 2-by-2 diagonal blocks of the real generalized
Schur form of (A,B) were further reduced to triangular form using complex
unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is
real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex
conjugate pair, with ``ALPHAI(j+1)`` negative.
See also
--------
qz
"""
# NOTE: should users be able to set these?
lwork = None
result, typ = _qz(A, B, output=output, lwork=lwork, sort=None,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
check_finite=check_finite)
AA, BB, Q, Z = result[0], result[1], result[-4], result[-3]
if typ not in 'cz':
alpha, beta = result[3] + result[4] * 1.j, result[5]
else:
alpha, beta = result[3], result[4]
sfunction = _select_function(sort)
select = sfunction(alpha, beta)
tgsen, = get_lapack_funcs(('tgsen',), (AA, BB))
if lwork is None or lwork == -1:
result = tgsen(select, AA, BB, Q, Z, lwork=-1)
lwork = result[-3][0].real.astype(np.int)
# looks like wrong value passed to ZTGSYL if not
lwork += 1
liwork = None
if liwork is None or liwork == -1:
result = tgsen(select, AA, BB, Q, Z, liwork=-1)
liwork = result[-2][0]
result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of tgsen" % -info)
elif info == 1:
raise ValueError("Reordering of (A, B) failed because the transformed"
" matrix pair (A, B) would be too far from "
"generalized Schur form; the problem is very "
"ill-conditioned. (A, B) may have been partially "
"reorded. If requested, 0 is returned in DIF(*), "
"PL, and PR.")
# for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,
# work, iwork, info
if typ in ['f', 'd']:
alpha = result[2] + result[3] * 1.j
return (result[0], result[1], alpha, result[4], result[5], result[6])
# for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,
# iwork, info
else:
return result[0], result[1], result[2], result[3], result[4], result[5]
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import re
import mock
from nova import exception
from nova.network import model as network_model
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.virt.vmwareapi import fake
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_dynamic_properties" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
cpu_info = {}
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
cpu_info['vcpus'] = 32
cpu_info['cores'] = 16
cpu_info['vendor'] = ["Intel", "Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)",
"Intel(R) Xeon(R)"]
else:
cpu_info['vcpus'] = 16
cpu_info['cores'] = 8
cpu_info['vendor'] = ["Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)"]
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'cpu': cpu_info, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_from_id(self):
fake_host_name = "ha-host"
fake_host_sys = fake.HostSystem(fake_host_name)
fake_host_id = fake_host_sys.obj.value
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_host_sys)
ref = vm_util.get_host_ref_from_id(
fake.FakeObjectRetrievalSession(fake_objects),
fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEqual(fake_host_name, host_name)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
@mock.patch.object(vm_util, '_get_vm_ref_from_vm_uuid',
return_value=None)
def test_get_host_name_for_vm(self, _get_ref_from_uuid):
fake_host = fake.HostSystem()
fake_host_id = fake_host.obj.value
fake_vm = fake.VirtualMachine(name='vm-123',
runtime_host=fake_host.obj)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_vm)
vm_ref = vm_util.get_vm_ref_from_name(
fake.FakeObjectRetrievalSession(fake_objects), 'vm-123')
self.assertIsNotNone(vm_ref)
host_id = vm_util.get_host_id_from_vm_ref(
fake.FakeObjectRetrievalSession(fake_objects), vm_ref)
self.assertEqual(fake_host_id, host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
good_objects = fake.FakeRetrieveResult()
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_good:
good_objects.add_object(result)
bad_objects = fake.FakeRetrieveResult()
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_bad:
bad_objects.add_object(result)
prop = vm_util.property_from_property_set(
'runtime.host', good_objects)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', bad_objects)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', good_objects)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', bad_objects)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
fake_instance)
expected = """{'memoryMB': 2048,
'numCPUs': 2,
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
200, 0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.maintenanceMode": "normal",
"summary.type": "VMFS",
"summary.accessible":true,
"summary.name": "fake-ds",
"summary.capacity": 1099511627776,
"summary.freeSpace": 536870912000,
"browser": ""
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
return devices
def test_get_vmdk_path(self):
uuid = '00000000-0000-0000-0000-000000000000'
filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices):
instance = {'uuid': uuid}
vmdk_path = vm_util.get_vmdk_path(session, None, instance)
self.assertEqual(filename, vmdk_path)
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] test_file.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(vmdk_info[0], filename)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(n_filename, vmdk_info[0])
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertIsNone(vmdk_info[0])
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'lsiLogic')
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def _test_get_vnc_config_spec(self, port):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
self.assertTrue(not refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual(1, len(refs))
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertTrue(not refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def test_get_vm_create_spec(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [])
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations={'cpu_limit': 7,
'cpu_reservation': 6})
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'reservation': 6,
'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations={'cpu_limit': 7})
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'limit': 7,
'obj_name': 'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
shares = {'cpu_shares_level': 'high'}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations=shares)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'high',
'shares': 0,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
shares = {'cpu_shares_level': 'custom',
'cpu_shares_share': 1948}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [],
allocations=shares)
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'cpuAllocation': {'shares': {'level': 'custom',
'shares': 1948,
'obj_name':'ns0:SharesInfo'},
'obj_name':'ns0:ResourceAllocationInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_create_vm(self):
method_list = ['CreateVM_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateVM_Task'):
return 'fake_create_vm_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_instance = mock.MagicMock()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with contextlib.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
fake_instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance)
fake_get_vm_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=error_util.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, fake_instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session._get_vim().get_service_content().virtualDiskManager
with contextlib.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session._get_vim().client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session._get_vim(),
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session._get_vim().get_service_content().virtualDiskManager
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest', 'fake-spec')
fake_call_method.assert_called_once_with(
session._get_vim(),
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest',
destSpec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_get_values(self):
objects = self._create_fake_vm_objects()
query = vm_util.get_values_from_object_properties(
fake.FakeObjectRetrievalSession(objects), objects)
self.assertEqual('poweredOn', query['runtime.powerState'])
self.assertEqual('guestToolsRunning',
query['summary.guest.toolsRunningStatus'])
self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
def test_reconfigure_vm(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def test_get_network_attach_config_spec_opaque(self):
vif_info = {'network_name': 'br-int',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name':'ns0:OptionValue'}],
'deviceChange': [
{'device': {
'macAddress':'00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl':True,
'startConnected': True,
'connected': True,
'obj_name':'ns0:VirtualDeviceConnectInfo'},
'backing': {
'opaqueNetworkType': 'opaque',
'opaqueNetworkId': 'fake-network-id',
'obj_name': '%(card)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
result = vm_util.get_network_attach_config_spec(
fake.FakeFactory(), vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = """{
'extraConfig': [{'value': 7,
'key': 'nvp.iface-id.1',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [
{'device': {'macAddress': '00:00:00:ca:fe:01',
'addressType': 'manual',
'connectable': {
'allowGuestControl': True,
'startConnected': True,
'connected': True,
'obj_name': 'ns0:VirtualDeviceConnectInfo'},
'backing': {
'port': {
'portgroupKey': 'fake-group',
'switchUuid': 'fake-network-id',
'obj_name': '%(obj_name_port)s'},
'obj_name': '%(obj_name_backing)s'},
'key': -47,
'obj_name': 'ns0:VirtualE1000',
'wakeOnLanEnabled': True},
'operation': 'add',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
'obj_name_backing': backing,
'obj_name_port': port}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_network_detach_config_spec(self):
result = vm_util.get_network_detach_config_spec(
fake.FakeFactory(), 'fake-device', 2)
expected = """{
'extraConfig': [{'value': 'free',
'key': 'nvp.iface-id.2',
'obj_name': 'ns0:OptionValue'}],
'deviceChange': [{'device': 'fake-device',
'operation': 'remove',
'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
'obj_name':'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance)
fake_get_ref.assert_called_once_with(session, fake_instance)
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
fake_instance = mock.MagicMock()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=error_util.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session._get_vim(),
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
|
|
import numpy
from six import moves
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.connection import convolution_2d
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
import chainerx
def _prod(shape):
prod = 1
for d in shape:
prod *= d
return prod
class ConvolutionND(function_node.FunctionNode):
def __init__(self, ndim, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
self.ndim = ndim
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
self.cover_all = cover_all
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = groups
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == self.ndim + 2,
w_type.ndim == self.ndim + 2,
# Need to consider the case that group count > 1.
# x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_chainerx(self, inputs):
# TODO(hvy): Support mixed precision.
if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
return chainer.Fallback
# TODO(hvy): Support dilate > 1.
if any(d != 1 for d in self.dilate):
return chainer.Fallback
# TODO(hvy): Support groups > 1.
if self.groups > 1:
return chainer.Fallback
if inputs[0].device.backend.name == 'cuda' and (
self.cover_all or self.ndim < 2):
return chainer.Fallback
return chainerx.conv(
*inputs, stride=self.stride, pad=self.pad,
cover_all=self.cover_all),
def _use_cudnn(self, x, W):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and self.ndim > 1)
def _forward_xp(self, x, W, b, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, W, b, xp)
else:
return self._forward_xp_core(x, W, b, xp)
def _forward_grouped_convolution_xp(self, x, W, b, xp):
# G: group count
# N: batch size
# iC: input channels
# oC: output channels
G = self.groups
N, iC = x.shape[:2]
oC = W.shape[0]
k_size = W.shape[2:]
iCg = iC // G
oCg = oC // G
dims = len(k_size)
if iC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of input channels')
if oC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of output channels')
xp = backend.get_array_module(x)
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
o_size = x.shape[-dims:]
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * _prod(k_size)
x = x.reshape(G, mul_len, N * _prod(o_size))
W = W.reshape(G, oCg, mul_len)
# (G, oCg, N*o_size) = (G, oCg, iCg*k_size) @ (G, iCg*k_size, N*o_size)
y = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
y = y.reshape(oC, N, *o_size)
y = xp.rollaxis(y, 1) # (N, oC, o_size...)
if b is not None:
y += b.reshape(1, b.size, *((1,) * dims))
return y,
def _forward_xp_core(self, x, W, b, xp):
ndim = self.ndim
ksize = W.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
# Make patch array.
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
# Compute correlation.
axes = tuple(moves.range(1, ndim + 2)) # (1, 2, ..., N+1)
y = xp.tensordot(col, W, (axes, axes)).astype(x.dtype, copy=False)
# Apply bias if given.
if b is not None:
y += b
# Roll c_O before the second in (n, y_1, y_2, ..., y_N, c_O).
return xp.rollaxis(y, ndim + 1, 1),
def _forward_cudnn(self, x, W, b):
out_c = W.shape[0] # (c_O, _, k_1, k_2, ..., k_N)
ksize = W.shape[2:]
n, c = x.shape[:2] # (n, c_I, d_1, d_2, ..., d_N)
dims = x.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
groups = self.groups
# Make empty array for result.
outs = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di) in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
y_shape = (n, out_c) + outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilate, groups,
auto_tune=auto_tune, tensor_core=tensor_core)
return y,
def forward(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, W, b, numpy)
elif not self._use_cudnn(x, W):
return self._forward_xp(x, W, b, cuda.cupy)
else:
return self._forward_cudnn(x, W, b)
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, W, stride=self.stride, pad=self.pad, outsize=x_shape,
dilate=self.dilate, groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = ConvolutionNDGradW(self).apply((x, gy))
ret.append(gW)
if 2 in indexes:
axis = (0,) + tuple(moves.range(2, gy.ndim))
gb = chainer.functions.sum(gy, axis=axis)
ret.append(gb)
return ret
class ConvolutionNDGradW(function_node.FunctionNode):
def __init__(self, convnd):
W_node = convnd.inputs[1]
self.ndim = convnd.ndim
self.ksize = W_node.shape[2:]
self.stride = convnd.stride
self.pad = convnd.pad
self.cover_all = convnd.cover_all
self.dilate = convnd.dilate
self.groups = convnd.groups
self.W_dtype = W_node.dtype
def _use_cudnn(self, x, gy):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == self.W_dtype
and gy.dtype == self.W_dtype
and self.ndim > 1)
def forward(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, gy, numpy)
elif not self._use_cudnn(x, gy):
return self._forward_xp(x, gy, cuda.cupy)
else:
return self._forward_cudnn(x, gy)
def _forward_xp(self, x, gy, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, gy, xp)
else:
return self._forward_xp_core(x, gy, xp)
def _forward_grouped_convolution_xp(self, x, gy, xp):
G = self.groups
N, iC = x.shape[:2]
oC = gy.shape[1]
o_size = gy.shape[2:]
o_size_prod = _prod(o_size)
k_size = self.ksize
dims = len(o_size)
iCg = iC // G
oCg = oC // G
# Do not check iCg and oCg because this class is rarely used alone
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * _prod(k_size)
x = x.reshape(G, mul_len, N * o_size_prod)
x = x.transpose(0, 2, 1) # (G, N*o_size, iCg*k_size)
gy = xp.rollaxis(gy, 1) # (oC, N, o_size...)
gy = gy.reshape(G, oCg, N * o_size_prod)
# (G, oCg, iCg*k_size) = (G, oCg, N*o_size) @ (G, N*o_size, iCg*k_size)
gW = convolution_2d._matmul(gy, x).astype(self.W_dtype, copy=False)
gW = gW.reshape(oC, iCg, *k_size)
return gW,
def _forward_xp_core(self, x, gy, xp):
# Compute filter weight gradient.
# (n, _, out_1, out_2, ..., out_N)
out_axes = (0,) + tuple(moves.range(2, self.ndim + 2))
# (n, _, _, ..., _, out_1, out_2, ..., out_N)
col_axes = (0,) + tuple(moves.range(self.ndim + 2, self.ndim * 2 + 2))
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (xp is numpy and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
gW = xp.tensordot(gy, col, (out_axes, col_axes)).astype(
self.W_dtype, copy=False)
return gW,
def _forward_cudnn(self, x, gy):
# Make empty arrays for result.
out_c = gy.shape[1]
in_c = x.shape[1] // self.groups
gW = cuda.cupy.empty(
(out_c, in_c) + self.ksize, dtype=self.W_dtype)
# Compute
pad = self.pad
stride = self.stride
dilate = self.dilate
groups = self.groups
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_filter(
x, gy, gW, pad, stride, dilate, groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, ggW, stride=self.stride, pad=self.pad, outsize=x_shape,
groups=self.groups, dilate=self.dilate)
ret.append(gx)
if 1 in indexes:
ggy = convolution_nd(
x, ggW, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, groups=self.groups,
dilate=self.dilate)
ret.append(ggy)
return ret
def convolution_nd(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""N-dimensional convolution function.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three variables: the
input ``x``, the filter weight ``W`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``convolution_nd`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an addtional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
W (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
One-dimensional bias variable with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use cuDNN.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
.. note::
This function uses cuDNN implementation for its forward and backward
computation if ALL of the following conditions are satisfied:
- ``cuda.cudnn_enabled`` is ``True``
- ``chainer.config.use_cudnn`` is ``'always'`` or ``'auto'``
- The number of spatial dimensions is more than one.
- ``cover_all`` is ``False``
- The input's ``dtype`` is equal to the filter weight's.
- The ``dtype`` is FP16, FP32 or FP64. (FP16 is only available when
cuDNN version :math:`\\geq` v3.)
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
.. seealso:: :class:`~chainer.links.ConvolutionND`, :func:`convolution_2d`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> W = np.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(1, 3, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
"""
ndim = len(x.shape[2:])
fnode = ConvolutionND(
ndim, stride, pad, cover_all, dilate=dilate, groups=groups)
args = (x, W) if b is None else (x, W, b)
y, = fnode.apply(args)
return y
def convolution_1d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""1-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
def convolution_3d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""3-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
|
|
import numpy as np
import ms
import shifter
import sampler
import numpy as np
import scipy.optimize as op
from scipy import ndimage
import time
import terminator
f = .05
g = .01
#fl = 1e-5
class stuff(object):
def __init__(self, data, cx, cy, mask, H = 3, epsilon = .01 , min_iter=5, max_iter=10, check_iter=5 , tol=1.e-8):
""" inputs of the code: NxD data matrix and NxD mask matrix;
data contains images of stars, and mask contains questionable
pixels in each image.
N = the number of stars
D = the number of pixels in each patch
H = upsampling factor
cx, cy = centroiding offsets
"""
self.N = data.shape[0] #number of observations
self.D = data.shape[1] #input dimensionality
self.H = H #upsampling factor
self.epsilon = epsilon #smoothness parameter
self.data = np.atleast_2d(data) #making sure the data has the right dimension
self.mask = np.atleast_2d(mask) #making sure the mask has the right dimension
self.dx = cx #list of centroid offsets x
self.dy = cy #list of centroid offsets y
self.M = int(self.D**.5)
""" outputs of the code:
H*H*D-dimensional mean vector: X
N-dimensional flux vector: F
N-dimensional flat-field background: B
"""
self.F = np.zeros((self.N)) #Creating an N-dimensional Flux vector.
self.B = np.zeros((self.N)) #one flat-field per star
self.lnX = np.ones((self.D*self.H*self.H)) #log(X)
""" initialization of X, F, B by means of subtracting the median!(to initialize the background B),
normalizing (to intialize the flux F),
shifting, and upsampling (to initialize the mean X)"""
self.initialize()
""" updating X, F, B by X-step, F-step, and B-step optimization"""
self.update(max_iter, check_iter, min_iter, tol)
def initialize(self):
"""
initializing the parameters
"""
m = int((self.D)**.5)
self.d2d = self.data.reshape(self.N , m , m)
self.dm = np.zeros((self.N, self.D))
X = np.zeros_like(self.lnX)
for i in range(self.N):
self.B[i] = np.array([self.d2d[i,m/2-4:m/2+5,-1:].mean(),self.d2d[i,m/2-4:m/2+5,:1].mean(),self.d2d[i,:1,m/2-4:m/2+5].mean(),self.d2d[i,-1:,m/2-4:m/2+5].mean()]).mean()
self.dm[i] = self.data[i]-self.B[i]
self.dm -= self.dm.min()
self.F[i] = np.sum(self.dm[i])
self.dm[i] /= self.F[i]
shifted = shifter.shifter(self.dm[i], self.dx[i], self.dy[i])
obs = ndimage.interpolation.zoom(shifted.reshape(25,25), self.H, output = None, order=3, mode='constant', cval=0.0, prefilter=True).flatten()
X += obs.flatten()
X /= self.N
#X[X<0] = fl #setting the initial negative pixels in X to fl
m = int((self.D)**.5)*self.H
X = X.reshape(m,m)
#X[m/2-15:m/2+15,m/2-15:m/2+15][X[m/2-15:m/2+15,m/2-15:m/2+15]<0] = np.median(X[m/2-15:m/2+15,m/2-15:m/2+15])
"""
X[:m/2-10,m/2-10:m/2+10][X[:m/2-10,m/2-10:m/2+10]<0] = fl
X[m/2+10:,m/2-10:m/2+10][X[m/2+10:,m/2-10:m/2+10]<0] = fl
X[m/2-10:m/2+10,m/2+10:][X[m/2-10:m/2+10,m/2+10:]<0] = fl
X[m/2-10:m/2+10,:m/2-10][X[m/2-10:m/2+10,:m/2-10]<0] = fl
X[m/2+10:,:m/2-10][X[m/2+10:,:m/2-10]<0] = fl
X[m/2+10:,m/2+10:][X[m/2+10:,m/2+10:]<0] = fl
X[:m/2-10,m/2+10:][X[:m/2-10,m/2+10:]<0] = fl
X[:m/2-10,:m/2-10][X[:m/2-10,:m/2-10]<0] = fl"""
#X[X<0] = fl
#X+=fl
#print X.min()
#self.X[-1:,:]*=0
#self.X[:,0:1]*=0
#self.X[:,-1:]*=0
#print X.min()
import pylab as p
from matplotlib.colors import LogNorm
p.imshow(X, interpolation = "None", norm = LogNorm() , origin = "lower")
p.colorbar()
p.xticks(())
p.yticks(())
p.show()
X = X.flatten()
self.lnX = np.log(X)
def grad_lnX(self , params , *args):
"""Gradient w.r.t Log(X)"""
self.F, self.B = args
self.lnX = params
self.X = np.exp(self.lnX)
b = int((self.D)**.5)
Z = self.X.reshape((self.H*b, self.H*b))
c=np.zeros_like(Z)
c[:,:-1] += Z[:, 1:]
c[:, 1:] += Z[:,:-1]
c[1:, :] += Z[:-1,:]
c[:-1,:] += Z[1:, :]
grad = 2.*self.epsilon*(4.*Z - c).flatten()
grad = grad*self.X
#grad = np.zeros_like(self.X)
for p in range(self.N):
Kp = sampler.imatrix_new(self.M, self.H, self.dx[p], self.dy[p])
modelp = self.F[p]*(self.X+fl).dot(Kp) + self.B[p]
ep = self.data[p] - modelp
ep[self.mask[p]!=0] = 0 #excluding flagged pixels from contributing to gradient_X
varp = f + g*np.abs(modelp)
gradp = -1.*self.F[p]*Kp
gainp = (g/2.)*(varp**-1. - ep**2./varp**2.)
gainp[modelp<0] *= -1. #var=f+g|model| to account for numerical artifacts when sr model is sampled at the data grid
gradp = self.X[:,None]*gradp*(ep/varp - gainp)[None,:]
Gradp = gradp.sum(axis = 1)
grad += Gradp
return grad
def grad_F(self, params, *args):
"""Gradient w.r.t F """
self.lnX, self.B = args
self.F = params
self.X = np.exp(self.lnX)
grad = np.zeros_like(self.F)
for p in range(self.N):
Kp = sampler.imatrix_new(self.M, self.H, self.dx[p], self.dy[p])
nmodelp = np.dot(self.X+fl,Kp)
modelp = self.F[p]*nmodelp + self.B[p]
residualp = self.data[p] - modelp
residualp[self.mask[p]!=0] = 0 #excluding flagged pixels from contributing to gradient_X
varp = f + g*np.abs(modelp)
gradp = -1.*nmodelp
gainp = (g/2.)*nmodelp*(varp**-1. - residualp**2./varp**2.)
gainp[modelp<0] *= -1. #var=f+g|model| to account for numerical artifacts when sr model is sampled at the data grid
grad[p] = np.sum(residualp*gradp/varp) + np.sum(gainp)
return grad
def grad_B(self, params, *args):
self.lnX, self.F = args
self.B = params
self.X = np.exp(self.lnX)
grad = np.zeros_like(self.B)
for p in range(self.N):
Kp = sampler.imatrix_new(self.M, self.H, self.dx[p], self.dy[p])
modelp = self.F[p]*np.dot(self.X+fl,Kp) + self.B[p]
varp = f+g*np.abs(modelp)
residualp = self.data[p] - modelp
residualp[self.mask[p]!=0] = 0 #excluding flagged pixels from contributing to gradient_X
gainp = - (g/2.)*(residualp**2./varp**2.) + (g/2.)*(varp**-1.)
gainp[modelp<0] *= -1. #var=f+g|model| to account for numerical artifacts when sr model is sampled at the data grid
grad[p] = -1.*np.sum(residualp/varp) + np.sum(gainp)
return grad
def func_lnX(self , params, *args):
self.F, self.B = args
self.lnX = params
return self.nll()
def func_F(self , params, *args):
self.lnX, self.B = args
self.F = params
return self.nll()
def func_B(self, params, *args):
self.lnX, self.F = args
self.B = params
return self.nll()
def bfgs_lnX(self):
x = op.fmin_l_bfgs_b(self.func_lnX,x0=self.lnX, fprime = self.grad_lnX,args=(self.F, self.B), approx_grad = False, \
bounds = None, m=10, factr=100., pgtol=1e-04, epsilon=1e-04, maxfun=60)
print x
self.lnX = x[0]
def bfgs_F(self):
x = op.fmin_l_bfgs_b(self.func_F,x0=self.F, fprime = self.grad_F,args=(self.lnX, self.B), approx_grad = False, \
bounds = None, m=10, factr=100., pgtol=1e-04, epsilon=1e-04, maxfun=30)
print x
self.F = x[0]
def bfgs_B(self):
x = op.fmin_l_bfgs_b(self.func_B,x0=self.B, fprime = self.grad_B,args=(self.lnX, self.F), approx_grad = False, \
bounds = None, m=10, factr=100., pgtol=1e-04, epsilon=1e-04, maxfun=20)
print x
self.B = x[0]
def nll(self):
self.X = np.exp(self.lnX)
b = int((self.D)**.5)
Z = self.X.reshape((self.H*b, self.H*b))
nll = self.epsilon*((Z[:,1:]-Z[:,:-1])**2.).sum() + self.epsilon*((Z[1:,:]-Z[:-1,:])**2.).sum()
for i in range(self.N):
Ki = sampler.imatrix_new(self.M, self.H, self.dx[i], self.dy[i])
model_i = self.F[i]*np.dot(self.X+fl, Ki) + self.B[i]
residual_i = self.data[i] - model_i
residual_i[self.mask[i]!=0] = 0 #excluding flagged pixels from contributing to NLL
var_i = f + g*np.abs(model_i)
nll += 0.5*np.sum(((residual_i)**2.)/var_i) + .5*np.sum(np.log(var_i))
return nll
def update(self, max_iter, check_iter, min_iter, tol):
np.savetxt("wfc_mean_iter_0.txt" , self.lnX ,fmt='%.12f')
np.savetxt("wfc_flux_iter_0.txt" , self.F ,fmt='%.12f')
np.savetxt("wfc_background_iter_0.txt" , self.B ,fmt='%.12f')
print 'Starting NLL =', self.nll()
nll = self.nll()
chi = []
chi.append(self.nll())
for i in range(max_iter):
a = time.time()
self.bfgs_F()
print time.time()-a
obj = self.nll()
print "NLL after F-step", obj
a = time.time()
self.bfgs_lnX()
print time.time()-a
obj = self.nll()
print "NLL after X-step", obj
a = time.time()
self.bfgs_B()
print time.time()-a
obj = self.nll()
print "NLL after B-step", obj
np.savetxt("noregwfc_mean_iter_%d.txt"%(i+1) , self.lnX ,fmt='%.12f')
np.savetxt("noregwfc_flux_iter_%d.txt"%(i+1) , self.F ,fmt='%.12f')
np.savetxt("noregwfc_background_iter_%d.txt"%(i+1) , self.B ,fmt='%.12f')
chi.append(obj)
if np.mod(i, check_iter) == 0:
new_nll = new_nll = self.nll()
print 'NLL at step %d is:' % (i+1), new_nll
if (((nll - new_nll) / nll) < tol) & (min_iter < i):
print 'Stopping at step %d with NLL:' % i, new_nll
self.nll = new_nll
break
else:
nll = new_nll
self.nll = new_nll
print chi
|
|
import collections
import keyword
import re
import types
from k.config import Config
def _validate(config_dict, config_fields):
"""Validate a parsed config dictionary
Validates the contents of config_dict against the fields
defined in this CheckedConfig. Makes sure that all required
fields are present and have appropriate values.
Args:
config_dict: a dictionary containing config fields and
values to validate.
Returns:
A dictionary of validated fields and values.
"""
valid_dict = {}
for field in config_fields:
try:
value = config_dict[field.name]
except KeyError:
if field.default is not None:
value = field.default
else:
raise ValueError("Missing config field: '{0}'".format(field.name))
valid_dict[field.name] = field.validate(value)
return valid_dict
class CheckedConfig(object):
"""Defines a schema for a config file
Allows you to define the field names, types, and some basic
validation for a config file. Subclasses should override
the CONFIG_FIELDS field to describe the fields available
on this config. CONFIG_FIELDS should be a list of Field
subclasses. Example:
class FooConfig(CheckedConfig):
CONFIG_FIELDS = [
StringField("name", pattern="\w+"),
IntField("age", lower_bound=0, upper_bound=150),
NestedField("attributes",
BoolField("cool_guy", default=False),
BoolField("smart_guy", default=False)
)
]
config.yml contents:
name: Brad
age: 31
attributes:
cool_guy: true
smart_guy: false
After being checked, the resulting config fields can be
accessed via simple attribute access:
> config = FooConfig("config.yml")
> print config.name
Brad
> print config.attributes.cool_guy
True
Invalid values will cause ValueErrors to be raised:
> config = FooConfig({"name": "Brad", "age": -10})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Value for field 'age': -10 is less than lower bound 0
"""
# override in subclasses to define the fields in this config
CONFIG_FIELDS = []
def __init__(self, config):
"""Initialize this CheckedConfig
Args:
config: a dict or a str. If a dict, it contains the
unvalidated fields and values of this config. If a
str, it contains the location of a config file that
will be loaded using Config.
"""
if isinstance(config, types.StringTypes):
config = Config.fetch_config(config)
valid_config = _validate(config, self.CONFIG_FIELDS)
self.__dict__.update(valid_config)
class Field(object):
"""An abstract field definition
Subclasses are used to define fields in a CheckedConfig.
"""
# Acceptable values must start with a letter, and be followed by zero or
# more alpha-numeric or _ characters. Any valid python identifier that
# does not start with _ should match.
FIELD_PATTERN = re.compile("^[a-zA-Z]\w*$")
def __init__(self, name, default=None):
"""Initialize this Field
Args:
name: A str. The name of this field. Must be a valid python
identifier that does not begin with '_'.
default: The default value that this field will be set to
if it is not present in the config file. If this is set
to None (the default), then there is no default value and
an error will be raised if the field is missing.
"""
self.validate_name(name)
self.name = name
self.default = default
def validate_name(self, name):
"""Validate a field name
Makes sure that the name is a valid python identifier that does
not start with '_' and that it is not a python keyword.
Args:
name: A str.
Raises:
ValueError: If this is an invalid name.
"""
if not re.match(self.FIELD_PATTERN, name):
raise ValueError("'{0}' is an invalid name for a config field. "
"Config field names must be valid python "
"identifiers and cannot start with '_'.".format(name))
if keyword.iskeyword(name):
raise ValueError("'{0}' is an invalid name for a config field. "
"It matches a python keyword.".format(name))
def validate(self, value):
"""Validate the supplied value against this field definition
Abstract method. Should be implemented by subclasses.
"""
raise NotImplementedError("validate not implemented")
class IntField(Field):
"""A field that expects an integer value"""
def __init__(self, name, default=None,
lower_bound=None, upper_bound=None):
"""Initialize this IntField
Args:
name: A str. The name of this field.
lower_bound: An int or None. The lowest acceptable value
for this field. If None, there is no lower bound.
upper_bound: An int or None. The highest acceptable value
for this field. If None, there is no upper bound.
"""
super(IntField, self).__init__(name, default)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def validate(self, value):
"""Ensure that the supplied value is a valid integer
It will attempt to convert the supplied value to an integer
and ensure that it is between the upper and lower bounds of
this field if they exist.
Args:
value: An int or value convertable to an int.
Returns:
An int. This is the converted and validated value.
Raises:
ValueError if value is not valid for this field
"""
try:
int_value = int(value)
except ValueError as ve:
raise ValueError("Value for field '{0}': {1}".format(self.name, ve.message))
if self.lower_bound is not None and int_value < self.lower_bound:
raise ValueError("Value for field '{0}': {1} is less than lower bound {2}".format(
self.name, int_value, self.lower_bound))
if self.upper_bound is not None and int_value > self.upper_bound:
raise ValueError("Value for field '{0}': {1} is greater than upper bound {2}".format(
self.name, int_value, self.upper_bound))
return int_value
class StringField(Field):
"""A field that expects a string value"""
def __init__(self, name, default=None,
pattern=None):
"""Initialize this StringField
Args:
name: A str. The name of this field.
pattern: A str or None. A regexp that defines the acceptable
pattern for this field. If None, all strings will be
accepted.
"""
super(StringField, self).__init__(name, default)
if pattern:
self.pattern = re.compile(pattern)
else:
self.pattern = None
def validate(self, value):
"""Ensure that the supplied value is a valid string
It will attempt to convert the supplied value to a string
and ensure that it matches the pattern for this field if
one exists.
Args:
value: A str or value convertable to a str.
Returns:
A str. This is the converted and validated value.
Raises:
ValueError if value is not valid for this field
"""
str_value = str(value)
if self.pattern and not re.match(self.pattern, str_value):
raise ValueError("Value for field '{0}': '{1}' does not match pattern.".format(self.name, str_value))
return str_value
class BoolField(Field):
"""A field that expects a boolean value"""
TRUE_VALUES = ["true", "True", "1", "yes", True, 1]
def validate(self, value):
"""Ensure that supplied value is a valid boolean
The supplied value will be checked against a list of
true values. If the value is not in the list, it is
considered False.
Args:
value: A bool or value convertable to a bool.
Returns:
A bool. This is the converted and validated value.
"""
return value in self.TRUE_VALUES
class ListField(Field):
"""A field that expects a list of values"""
def __init__(self, name, field_type):
"""Initialize this ListField
Args:
name: A str. The name of this field.
field_type: A Field. All values in this sequence
will be validated against it. The name of this
field is meaningless and will be ignored.
"""
super(ListField, self).__init__(name)
self.field_type = field_type
def validate(self, value):
"""Ensure that supplied value is a valid list field
Verifies that the supplied value is a list which contains
fields that validate against self.field_type.
Args:
value: A list. The list should contain values that
validate against self.field_type.
Returns:
A list of validated values.
Raises:
ValueError if any of the list field values are not valid.
"""
return [self.field_type.validate(v) for v in value]
class NestedField(Field):
"""A field that contains a dictionary of other fields"""
def __init__(self, name, *config_fields):
"""Initialize this NestedField
Note: NestedFields cannot have default values. However,
fields nested under them can.
Args:
name: A str. The name of this field.
config_fields: A list of Fields. Defines the fields nested
under this field.
"""
super(NestedField, self).__init__(name)
self.config_fields = config_fields
self.tuple_type = collections.namedtuple("NestedField_{0}".format(name),
[c.name for c in config_fields])
def validate(self, value):
"""Ensure that supplied value is a valid nested field
Verifies that the supplied value contains all the fields defined
by config_fields and that they have appropriate values (or
appropriate defaults if the fields are missing).
Args:
value: A dict. Describes the names and values of the fields
nested under this field.
Returns:
A namedtuple type. This allows attribute access to nested
fields.
Raises:
ValueError if any of the nested field values are not valid.
"""
valid_dict = _validate(value, self.config_fields)
return self.tuple_type(**valid_dict)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service import api
from sahara.service.validations import cluster_templates as ct
from sahara.tests.unit.service.validation import utils as u
class TestClusterTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestClusterTemplateCreateValidation, self).setUp()
self._create_object_fun = ct.check_cluster_template_create
self.scheme = ct.CLUSTER_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_cluster_template_create_v_cluster_configs(self):
self._assert_cluster_configs_validation()
def test_cluster_template_create_v_ng(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a'}
]
},
bad_req_i=(1, 'VALIDATION_ERROR',
"{'name': 'a'} is not valid under "
"any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a',
'flavor_id': '42'}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'name': 'a', 'flavor_id': '42'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{'name': 'a',
'flavor_id': '42',
'node_processes': ['namenode']}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'node_processes': ['namenode'], "
"'name': 'a', "
"'flavor_id': '42'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
'name': 'a',
'flavor_id': '42',
'node_processes': ['namenode'],
'count': 1
},
{
"node_group_template_id": "550e8400-e29b-41d4-a716-"
"446655440000",
"name": "a",
'count': 2
}
]
},
bad_req_i=(1, "INVALID_DATA",
"Duplicates in node group names are detected")
)
def test_cluster_template_create_v_ng_templates(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "",
"name": "test",
}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'node_group_template_id': '', 'name': 'test'} "
"is not valid under any of the given schemas")
)
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "test",
"name": "test",
'count': 3
}
]
},
bad_req_i=(1, "VALIDATION_ERROR",
"{'count': 3, "
"'node_group_template_id': 'test', "
"'name': 'test'} "
"is not valid under any of the given schemas")
)
def test_cluster_template_create_v_ng_templates_right(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1",
'node_groups': [
{
"node_group_template_id": "550e8400-e29b-41d4-a716-"
"446655440000",
"name": "test",
'count': 3
}
]
},
)
def test_cluster_template_create_v_name_base(self):
data = {
'name': "testname",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1"
}
self._assert_valid_name_hostname_validation(data)
def test_cluster_template_create_v_types(self):
data = {
'name': "testname",
'plugin_name': "vanilla",
'hadoop_version': "1.2.1"
}
self._assert_types(data)
def test_cluster_template_create_v_required(self):
self._assert_create_object_validation(
data={},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'test-name'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'plugin_name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'testname',
'plugin_name': 'vanilla'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
def test_cluster_template_create_v_right(self):
self._assert_create_object_validation(
data={
'name': 'testname',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
})
def test_cluster_template_create_v_plugin_name_exists(self):
self._assert_create_object_validation(
data={
'name': "test-name",
'plugin_name': "wrong_plugin",
'hadoop_version': "1.2.1",
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Sahara doesn't contain plugin "
"with name 'wrong_plugin'")
)
def test_cluster_template_create_v_unique_cl(self):
data = {
'name': 'test',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"Cluster template with name 'test' already exists")
)
def test_cluster_template_wrong_neutron_mngmt_net(self):
data = {
'name': 'test-template',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'neutron_management_network': '53a36917-ab9f-4589'
'-94ce-b6df85a68332'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NOT_FOUND', "Network 53a36917-ab9f-4589-"
"94ce-b6df85a68332 not found")
)
def test_cluster_create_v_default_image_required_tags(self):
self._assert_cluster_default_image_tags_validation()
|
|
# -*- coding: utf-8 -*-
'''
Build entity grid using StanfordCoreNLP
Reference: Barzilay, R., & Lapata, M. (2008).
Modeling local coherence: An entity-based approach.
Computational Linguistics, 34(1), 1-34.
'''
from __future__ import print_function, division
from collections import defaultdict
from functools import reduce
from pprint import pprint
import doctest
import os
import pandas as pd
from corenlp import StanfordCoreNLP
class CoreNLP(object):
'''Connect CoreNLP server'''
_NLP = StanfordCoreNLP(os.environ.get('CORENLP_URL') or
'http://localhost:9000')
_LOCAL_DEMO_PROP = {
'annotators':
'tokenize, ssplit, pos, lemma, ner, depparse, openie, coref',
"openie.resolve_coref": "true",
'outputFormat': 'json'
}
_ONLINE_DEMO_PROP = {
"annotators": "tokenize,ssplit,pos,ner,depparse,openie,coref",
"coref.md.type": "dep",
"coref.mode": "statistical",
'outputFormat': 'json'
}
@staticmethod
def annotate(text):
'''Get result from CoreNLP via JSON'''
try:
return CoreNLP.nlp().annotate(text,
properties=CoreNLP._ONLINE_DEMO_PROP)
except UnicodeError:
pprint(text)
@staticmethod
def nlp():
'''Return CoreNLP Server'''
return CoreNLP._NLP
class Constants(object):
'''Some constants'''
REMOVE_ABBR = {'Inc.', 'Inc', 'Corp.', 'Corp'}
_NOUNS = {'NN', 'NNS', 'NNP', 'NNPS', 'PRP'}
# S O X
_SUBJECTS = {'subj', 'nsubj', 'nsubjpass', 'csubj', 'csubjpass'}
_OBJECTS = {'obj', 'iobj', 'dobj'}
SUB, OBJ, OTHER, NOSHOW = 'S', 'O', 'X', '-'
@staticmethod
def noun_tags():
"""Get noun POS tags"""
return Constants._NOUNS
@staticmethod
def get_role(dep):
"""Indentify an entity's grammatical role"""
if dep in Constants._SUBJECTS:
return Constants.SUB
elif dep in Constants._OBJECTS:
return Constants.OBJ
else:
return Constants.OTHER
class EntityGrid(object):
'''
Entity grid
>>> eg = EntityGrid('My friend is Bob. He loves playing basketball.')
>>> 'friend' in eg.grid.columns and 'he' in eg.grid.columns
True
>>> 'he' not in eg.resolve_coreference().grid.columns
True
'''
def __init__(self, text):
self.text = ' '.join([token
for token in text.split(' ')
if token not in Constants.REMOVE_ABBR])
self._data = CoreNLP.annotate(self.text)
self._sentences = self._data['sentences']
# import pdb; pdb.set_trace()
self._depens = [s['basicDependencies'] for s in self._sentences]
self._entity_tokens = [
[t for t in s['tokens']
if t['pos'] in Constants.noun_tags()] for s in self._sentences
]
self._noun2lemma = self._set_up_noun2lemma()
self._grid = self._set_up_grid()
@property
def grid(self):
"""Entity grid"""
return self._grid
@property
def nouns(self):
"""All nouns in text"""
return self._noun2lemma.keys()
@property
def lemmas(self):
"""All lemmas in text"""
return self._noun2lemma.values()
def noun2lemma(self, noun):
"""Convert a noun to its lemma"""
return self._noun2lemma[noun] if noun in self.nouns else None
def _set_up_noun2lemma(self):
noun2lemma = {}
for token in self._entity_tokens:
for ety in token:
noun2lemma[ety['word']] = ety['lemma']
return noun2lemma
def _set_up_grid(self):
depens, entities, noun2lemma = self._depens, self._entity_tokens,\
self._noun2lemma
assert len(depens) == len(entities)
grid = defaultdict(
lambda: [Constants.NOSHOW for i in range(len(depens))])
for i, (dep, ety) in enumerate(zip(depens, entities)):
nouns = [e['word'] for e in ety]
try:
[d['dependentGloss'] for d in dep]
except KeyError:
pprint(dep)
pprint(i)
pprint(self.text)
nouns_dp = [
d
for d in dep
if d['dependentGloss'] in nouns and d['dep'] != 'compound'
]
for n_dp in nouns_dp:
grid[noun2lemma[n_dp['dependentGloss']]][i] = \
Constants.get_role(n_dp['dep']) # yapf: disable
return pd.DataFrame.from_dict(grid)
def _map_phrase_to_entity(self, phrase):
'''e.g. my friend => friend, friend in grid
my friend is Bob => friend, friend and Bob in grid, choose former
'''
nouns = [w for w in phrase.split(' ') if w in self.nouns]
lemmas = [self.noun2lemma(w)
for w in nouns if self.noun2lemma(w) in self.grid.columns]
# pprint(lemmas)
return lemmas[0] if lemmas != [] else None
def _add_column(self, _c1, _c2):
'''Add grid[c2] to grid[c1]'''
assert len(self.grid[_c1]) == len(self.grid[_c2])
assert _c1 != _c2
col1, col2 = self.grid[_c1], self.grid[_c2]
for i, _col1 in enumerate(col1):
if _col1 == Constants.NOSHOW:
col1[i] = col2[i]
self.grid.pop(_c2)
return _c1
def _add_columns(self, _c1, *c):
'''Add columns of grid to the first'''
reduce(self._add_column, [_c1] + list(c))
def resolve_coreference(self):
'''Resolve coreference by merging columns in grid'''
is_rep = 'isRepresentativeMention'
for chain in [chains
for chains in self._data['corefs'].values()
if len(chains) > 1]:
core_entity, other_entities = None, []
for cor in chain:
word = self._map_phrase_to_entity(cor['text'])
if word is not None and word not in other_entities:
if cor[is_rep]:
core_entity = word
elif word != core_entity:
other_entities.append(word)
else:
pass
if core_entity is not None and other_entities != []:
self._add_columns(core_entity, *other_entities)
return self
if __name__ == '__main__':
doctest.testmod()
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import itertools
from st2common.util.enum import Enum
from st2common.constants.types import ResourceType as SystemResourceType
__all__ = [
'SystemRole',
'PermissionType',
'ResourceType',
'RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP',
'PERMISION_TYPE_TO_DESCRIPTION_MAP',
'ALL_PERMISSION_TYPES',
'GLOBAL_PERMISSION_TYPES',
'GLOBAL_PACK_PERMISSION_TYPES',
'LIST_PERMISSION_TYPES',
'get_resource_permission_types_with_descriptions'
]
class PermissionType(Enum):
"""
Available permission types.
"""
# Note: There is no create endpoint for runner types right now
RUNNER_LIST = 'runner_type_list'
RUNNER_VIEW = 'runner_type_view'
RUNNER_MODIFY = 'runner_type_modify'
RUNNER_ALL = 'runner_type_all'
PACK_LIST = 'pack_list'
PACK_VIEW = 'pack_view'
PACK_CREATE = 'pack_create'
PACK_MODIFY = 'pack_modify'
PACK_DELETE = 'pack_delete'
# Pack-management specific permissions
# Note: Right now those permissions are global and apply to all the packs.
# In the future we plan to support globs.
PACK_INSTALL = 'pack_install'
PACK_UNINSTALL = 'pack_uninstall'
PACK_REGISTER = 'pack_register'
PACK_CONFIG = 'pack_config'
PACK_SEARCH = 'pack_search'
PACK_VIEWS_INDEX_HEALTH = 'pack_views_index_health'
PACK_ALL = 'pack_all'
# Note: Right now we only have read endpoints + update for sensors types
SENSOR_LIST = 'sensor_type_list'
SENSOR_VIEW = 'sensor_type_view'
SENSOR_MODIFY = 'sensor_type_modify'
SENSOR_ALL = 'sensor_type_all'
ACTION_LIST = 'action_list'
ACTION_VIEW = 'action_view'
ACTION_CREATE = 'action_create'
ACTION_MODIFY = 'action_modify'
ACTION_DELETE = 'action_delete'
ACTION_EXECUTE = 'action_execute'
ACTION_ALL = 'action_all'
ACTION_ALIAS_LIST = 'action_alias_list'
ACTION_ALIAS_VIEW = 'action_alias_view'
ACTION_ALIAS_CREATE = 'action_alias_create'
ACTION_ALIAS_MODIFY = 'action_alias_modify'
ACTION_ALIAS_MATCH = 'action_alias_match'
ACTION_ALIAS_HELP = 'action_alias_help'
ACTION_ALIAS_DELETE = 'action_alias_delete'
ACTION_ALIAS_ALL = 'action_alias_all'
# Note: Execution create is granted with "action_execute"
EXECUTION_LIST = 'execution_list'
EXECUTION_VIEW = 'execution_view'
EXECUTION_RE_RUN = 'execution_rerun'
EXECUTION_STOP = 'execution_stop'
EXECUTION_ALL = 'execution_all'
EXECUTION_VIEWS_FILTERS_LIST = 'execution_views_filters_list'
RULE_LIST = 'rule_list'
RULE_VIEW = 'rule_view'
RULE_CREATE = 'rule_create'
RULE_MODIFY = 'rule_modify'
RULE_DELETE = 'rule_delete'
RULE_ALL = 'rule_all'
RULE_ENFORCEMENT_LIST = 'rule_enforcement_list'
RULE_ENFORCEMENT_VIEW = 'rule_enforcement_view'
# TODO - Maybe "datastore_item" / key_value_item ?
KEY_VALUE_VIEW = 'key_value_pair_view'
KEY_VALUE_SET = 'key_value_pair_set'
KEY_VALUE_DELETE = 'key_value_pair_delete'
WEBHOOK_LIST = 'webhook_list'
WEBHOOK_VIEW = 'webhook_view'
WEBHOOK_CREATE = 'webhook_create'
WEBHOOK_SEND = 'webhook_send'
WEBHOOK_DELETE = 'webhook_delete'
WEBHOOK_ALL = 'webhook_all'
TIMER_LIST = 'timer_list'
TIMER_VIEW = 'timer_view'
TIMER_ALL = 'timer_all'
API_KEY_LIST = 'api_key_list'
API_KEY_VIEW = 'api_key_view'
API_KEY_CREATE = 'api_key_create'
API_KEY_MODIFY = 'api_key_modify'
API_KEY_DELETE = 'api_key_delete'
API_KEY_ALL = 'api_key_all'
TRACE_LIST = 'trace_list'
TRACE_VIEW = 'trace_view'
TRACE_ALL = 'trace_all'
# Note: Trigger permissions types are also used for Timer API endpoint since timer is just
# a special type of a trigger
TRIGGER_LIST = 'trigger_list'
TRIGGER_VIEW = 'trigger_view'
TRIGGER_ALL = 'trigger_all'
POLICY_TYPE_LIST = 'policy_type_list'
POLICY_TYPE_VIEW = 'policy_type_view'
POLICY_TYPE_ALL = 'policy_type_all'
POLICY_LIST = 'policy_list'
POLICY_VIEW = 'policy_view'
POLICY_CREATE = 'policy_create'
POLICY_MODIFY = 'policy_modify'
POLICY_DELETE = 'policy_delete'
POLICY_ALL = 'policy_all'
@classmethod
def get_valid_permissions_for_resource_type(cls, resource_type):
"""
Return valid permissions for the provided resource type.
:rtype: ``list``
"""
valid_permissions = RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP[resource_type]
return valid_permissions
@classmethod
def get_resource_type(cls, permission_type):
"""
Retrieve resource type from the provided permission type.
:rtype: ``str``
"""
# Special case for:
# * PACK_VIEWS_INDEX_HEALTH
# * EXECUTION_VIEWS_FILTERS_LIST
if permission_type == PermissionType.PACK_VIEWS_INDEX_HEALTH:
return ResourceType.PACK
elif permission_type == PermissionType.EXECUTION_VIEWS_FILTERS_LIST:
return ResourceType.EXECUTION
split = permission_type.split('_')
assert len(split) >= 2
return '_'.join(split[:-1])
@classmethod
def get_permission_name(cls, permission_type):
"""
Retrieve permission name from the provided permission type.
:rtype: ``str``
"""
split = permission_type.split('_')
assert len(split) >= 2
# Special case for PACK_VIEWS_INDEX_HEALTH
if permission_type == PermissionType.PACK_VIEWS_INDEX_HEALTH:
split = permission_type.split('_', 1)
return split[1]
return split[-1]
@classmethod
def get_permission_description(cls, permission_type):
"""
Retrieve a description for the provided permission_type.
:rtype: ``str``
"""
description = PERMISION_TYPE_TO_DESCRIPTION_MAP[permission_type]
return description
@classmethod
def get_permission_type(cls, resource_type, permission_name):
"""
Retrieve permission type enum value for the provided resource type and permission name.
:rtype: ``str``
"""
# Special case for sensor type (sensor_type -> sensor)
if resource_type == ResourceType.SENSOR:
resource_type = 'sensor'
permission_enum = '%s_%s' % (resource_type.upper(), permission_name.upper())
result = getattr(cls, permission_enum, None)
if not result:
raise ValueError('Unsupported permission type for type "%s" and name "%s"' %
(resource_type, permission_name))
return result
class ResourceType(Enum):
"""
Resource types on which permissions can be granted.
"""
RUNNER = SystemResourceType.RUNNER_TYPE
PACK = SystemResourceType.PACK
SENSOR = SystemResourceType.SENSOR_TYPE
ACTION = SystemResourceType.ACTION
ACTION_ALIAS = SystemResourceType.ACTION_ALIAS
RULE = SystemResourceType.RULE
RULE_ENFORCEMENT = SystemResourceType.RULE_ENFORCEMENT
POLICY_TYPE = SystemResourceType.POLICY_TYPE
POLICY = SystemResourceType.POLICY
EXECUTION = SystemResourceType.EXECUTION
KEY_VALUE_PAIR = SystemResourceType.KEY_VALUE_PAIR
WEBHOOK = SystemResourceType.WEBHOOK
TIMER = SystemResourceType.TIMER
API_KEY = SystemResourceType.API_KEY
TRACE = SystemResourceType.TRACE
TRIGGER = SystemResourceType.TRIGGER
class SystemRole(Enum):
"""
Default system roles which can't be manipulated (modified or removed).
"""
SYSTEM_ADMIN = 'system_admin' # Special role which can't be revoked.
ADMIN = 'admin'
OBSERVER = 'observer'
# Maps a list of available permission types for each resource
RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP = {
ResourceType.RUNNER: [
PermissionType.RUNNER_LIST,
PermissionType.RUNNER_VIEW,
PermissionType.RUNNER_MODIFY,
PermissionType.RUNNER_ALL,
],
ResourceType.PACK: [
PermissionType.PACK_LIST,
PermissionType.PACK_VIEW,
PermissionType.PACK_CREATE,
PermissionType.PACK_MODIFY,
PermissionType.PACK_DELETE,
PermissionType.PACK_INSTALL,
PermissionType.PACK_UNINSTALL,
PermissionType.PACK_REGISTER,
PermissionType.PACK_CONFIG,
PermissionType.PACK_SEARCH,
PermissionType.PACK_VIEWS_INDEX_HEALTH,
PermissionType.PACK_ALL,
PermissionType.SENSOR_VIEW,
PermissionType.SENSOR_MODIFY,
PermissionType.SENSOR_ALL,
PermissionType.ACTION_VIEW,
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE,
PermissionType.ACTION_EXECUTE,
PermissionType.ACTION_ALL,
PermissionType.ACTION_ALIAS_VIEW,
PermissionType.ACTION_ALIAS_CREATE,
PermissionType.ACTION_ALIAS_MODIFY,
PermissionType.ACTION_ALIAS_DELETE,
PermissionType.ACTION_ALIAS_ALL,
PermissionType.RULE_VIEW,
PermissionType.RULE_CREATE,
PermissionType.RULE_MODIFY,
PermissionType.RULE_DELETE,
PermissionType.RULE_ALL
],
ResourceType.SENSOR: [
PermissionType.SENSOR_LIST,
PermissionType.SENSOR_VIEW,
PermissionType.SENSOR_MODIFY,
PermissionType.SENSOR_ALL
],
ResourceType.ACTION: [
PermissionType.ACTION_LIST,
PermissionType.ACTION_VIEW,
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE,
PermissionType.ACTION_EXECUTE,
PermissionType.ACTION_ALL
],
ResourceType.ACTION_ALIAS: [
PermissionType.ACTION_ALIAS_LIST,
PermissionType.ACTION_ALIAS_VIEW,
PermissionType.ACTION_ALIAS_CREATE,
PermissionType.ACTION_ALIAS_MODIFY,
PermissionType.ACTION_ALIAS_MATCH,
PermissionType.ACTION_ALIAS_HELP,
PermissionType.ACTION_ALIAS_DELETE,
PermissionType.ACTION_ALIAS_ALL
],
ResourceType.RULE: [
PermissionType.RULE_LIST,
PermissionType.RULE_VIEW,
PermissionType.RULE_CREATE,
PermissionType.RULE_MODIFY,
PermissionType.RULE_DELETE,
PermissionType.RULE_ALL
],
ResourceType.RULE_ENFORCEMENT: [
PermissionType.RULE_ENFORCEMENT_LIST,
PermissionType.RULE_ENFORCEMENT_VIEW,
],
ResourceType.EXECUTION: [
PermissionType.EXECUTION_LIST,
PermissionType.EXECUTION_VIEW,
PermissionType.EXECUTION_RE_RUN,
PermissionType.EXECUTION_STOP,
PermissionType.EXECUTION_ALL,
PermissionType.EXECUTION_VIEWS_FILTERS_LIST,
],
ResourceType.KEY_VALUE_PAIR: [
PermissionType.KEY_VALUE_VIEW,
PermissionType.KEY_VALUE_SET,
PermissionType.KEY_VALUE_DELETE
],
ResourceType.WEBHOOK: [
PermissionType.WEBHOOK_LIST,
PermissionType.WEBHOOK_VIEW,
PermissionType.WEBHOOK_CREATE,
PermissionType.WEBHOOK_SEND,
PermissionType.WEBHOOK_DELETE,
PermissionType.WEBHOOK_ALL
],
ResourceType.TIMER: [
PermissionType.TIMER_LIST,
PermissionType.TIMER_VIEW,
PermissionType.TIMER_ALL
],
ResourceType.API_KEY: [
PermissionType.API_KEY_LIST,
PermissionType.API_KEY_VIEW,
PermissionType.API_KEY_CREATE,
PermissionType.API_KEY_MODIFY,
PermissionType.API_KEY_DELETE,
PermissionType.API_KEY_ALL
],
ResourceType.TRACE: [
PermissionType.TRACE_LIST,
PermissionType.TRACE_VIEW,
PermissionType.TRACE_ALL
],
ResourceType.TRIGGER: [
PermissionType.TRIGGER_LIST,
PermissionType.TRIGGER_VIEW,
PermissionType.TRIGGER_ALL
],
ResourceType.POLICY_TYPE: [
PermissionType.POLICY_TYPE_LIST,
PermissionType.POLICY_TYPE_VIEW,
PermissionType.POLICY_TYPE_ALL,
],
ResourceType.POLICY: [
PermissionType.POLICY_LIST,
PermissionType.POLICY_VIEW,
PermissionType.POLICY_CREATE,
PermissionType.POLICY_MODIFY,
PermissionType.POLICY_DELETE,
PermissionType.POLICY_ALL,
]
}
ALL_PERMISSION_TYPES = RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP.values()
ALL_PERMISSION_TYPES = list(itertools.chain(*ALL_PERMISSION_TYPES))
LIST_PERMISSION_TYPES = [permission_type for permission_type in ALL_PERMISSION_TYPES if
permission_type.endswith('_list')]
# List of global permissions (ones which don't apply to a specific resource)
GLOBAL_PERMISSION_TYPES = [
# Pack global permission types
PermissionType.PACK_INSTALL,
PermissionType.PACK_UNINSTALL,
PermissionType.PACK_CREATE,
PermissionType.PACK_REGISTER,
PermissionType.PACK_CONFIG,
PermissionType.PACK_SEARCH,
PermissionType.PACK_VIEWS_INDEX_HEALTH,
# Action alias global permission types
PermissionType.ACTION_ALIAS_MATCH,
PermissionType.ACTION_ALIAS_HELP,
# API key global permission types
PermissionType.API_KEY_CREATE,
# Policy global permission types
PermissionType.POLICY_CREATE,
# Execution
PermissionType.EXECUTION_VIEWS_FILTERS_LIST
] + LIST_PERMISSION_TYPES
GLOBAL_PACK_PERMISSION_TYPES = [permission_type for permission_type in GLOBAL_PERMISSION_TYPES if
permission_type.startswith('pack_')]
# Maps a permission type to the corresponding description
PERMISION_TYPE_TO_DESCRIPTION_MAP = {
PermissionType.PACK_LIST: 'Ability to list (view all) packs.',
PermissionType.PACK_VIEW: 'Ability to view a pack.',
PermissionType.PACK_CREATE: 'Ability to create a new pack.',
PermissionType.PACK_MODIFY: 'Ability to modify (update) an existing pack.',
PermissionType.PACK_DELETE: 'Ability to delete an existing pack.',
PermissionType.PACK_INSTALL: 'Ability to install packs.',
PermissionType.PACK_UNINSTALL: 'Ability to uninstall packs.',
PermissionType.PACK_REGISTER: 'Ability to register packs and corresponding resources.',
PermissionType.PACK_CONFIG: 'Ability to configure a pack.',
PermissionType.PACK_SEARCH: 'Ability to query registry and search packs.',
PermissionType.PACK_VIEWS_INDEX_HEALTH: 'Ability to query health of pack registries.',
PermissionType.PACK_ALL: ('Ability to perform all the supported operations on a particular '
'pack.'),
PermissionType.SENSOR_LIST: 'Ability to list (view all) sensors.',
PermissionType.SENSOR_VIEW: 'Ability to view a sensor',
PermissionType.SENSOR_MODIFY: ('Ability to modify (update) an existing sensor. Also implies '
'"sensor_type_view" permission.'),
PermissionType.SENSOR_ALL: ('Ability to perform all the supported operations on a particular '
'sensor.'),
PermissionType.ACTION_LIST: 'Ability to list (view all) actions.',
PermissionType.ACTION_VIEW: 'Ability to view an action.',
PermissionType.ACTION_CREATE: ('Ability to create a new action. Also implies "action_view" '
'permission.'),
PermissionType.ACTION_MODIFY: ('Ability to modify (update) an existing action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_DELETE: ('Ability to delete an existing action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_EXECUTE: ('Ability to execute (run) an action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_ALL: ('Ability to perform all the supported operations on a particular '
'action.'),
PermissionType.ACTION_ALIAS_LIST: 'Ability to list (view all) action aliases.',
PermissionType.ACTION_ALIAS_VIEW: 'Ability to view an action alias.',
PermissionType.ACTION_ALIAS_CREATE: ('Ability to create a new action alias. Also implies'
' "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_MODIFY: ('Ability to modify (update) an existing action alias. '
'Also implies "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_MATCH: ('Ability to use action alias match API endpoint.'),
PermissionType.ACTION_ALIAS_HELP: ('Ability to use action alias help API endpoint.'),
PermissionType.ACTION_ALIAS_DELETE: ('Ability to delete an existing action alias. Also '
'implies "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_ALL: ('Ability to perform all the supported operations on a '
'particular action alias.'),
PermissionType.EXECUTION_LIST: 'Ability to list (view all) executions.',
PermissionType.EXECUTION_VIEW: 'Ability to view an execution.',
PermissionType.EXECUTION_RE_RUN: 'Ability to create a new action.',
PermissionType.EXECUTION_STOP: 'Ability to stop (cancel) a running execution.',
PermissionType.EXECUTION_ALL: ('Ability to perform all the supported operations on a '
'particular execution.'),
PermissionType.EXECUTION_VIEWS_FILTERS_LIST: ('Ability view all the distinct execution '
'filters.'),
PermissionType.RULE_LIST: 'Ability to list (view all) rules.',
PermissionType.RULE_VIEW: 'Ability to view a rule.',
PermissionType.RULE_CREATE: ('Ability to create a new rule. Also implies "rule_view" '
'permission'),
PermissionType.RULE_MODIFY: ('Ability to modify (update) an existing rule. Also implies '
'"rule_view" permission.'),
PermissionType.RULE_DELETE: ('Ability to delete an existing rule. Also implies "rule_view" '
'permission.'),
PermissionType.RULE_ALL: ('Ability to perform all the supported operations on a particular '
'rule.'),
PermissionType.RULE_ENFORCEMENT_LIST: 'Ability to list (view all) rule enforcements.',
PermissionType.RULE_ENFORCEMENT_VIEW: 'Ability to view a rule enforcement.',
PermissionType.RUNNER_LIST: 'Ability to list (view all) runners.',
PermissionType.RUNNER_VIEW: 'Ability to view a runner.',
PermissionType.RUNNER_MODIFY: ('Ability to modify (update) an existing runner. Also implies '
'"runner_type_view" permission.'),
PermissionType.RUNNER_ALL: ('Ability to perform all the supported operations on a particular '
'runner.'),
PermissionType.WEBHOOK_LIST: 'Ability to list (view all) webhooks.',
PermissionType.WEBHOOK_VIEW: ('Ability to view a webhook.'),
PermissionType.WEBHOOK_CREATE: ('Ability to create a new webhook.'),
PermissionType.WEBHOOK_SEND: ('Ability to send / POST data to an existing webhook.'),
PermissionType.WEBHOOK_DELETE: ('Ability to delete an existing webhook.'),
PermissionType.WEBHOOK_ALL: ('Ability to perform all the supported operations on a particular '
'webhook.'),
PermissionType.TIMER_LIST: 'Ability to list (view all) timers.',
PermissionType.TIMER_VIEW: ('Ability to view a timer.'),
PermissionType.TIMER_ALL: ('Ability to perform all the supported operations on timers'),
PermissionType.API_KEY_LIST: 'Ability to list (view all) API keys.',
PermissionType.API_KEY_VIEW: ('Ability to view an API Key.'),
PermissionType.API_KEY_CREATE: ('Ability to create a new API Key.'),
PermissionType.API_KEY_MODIFY: ('Ability to modify (update) an existing API key. Also implies '
'"api_key_view" permission.'),
PermissionType.API_KEY_DELETE: ('Ability to delete an existing API Keys.'),
PermissionType.API_KEY_ALL: ('Ability to perform all the supported operations on an API Key.'),
PermissionType.KEY_VALUE_VIEW: ('Ability to view Key-Value Pairs.'),
PermissionType.KEY_VALUE_SET: ('Ability to set a Key-Value Pair.'),
PermissionType.KEY_VALUE_DELETE: ('Ability to delete an existing Key-Value Pair.'),
PermissionType.TRACE_LIST: ('Ability to list (view all) traces.'),
PermissionType.TRACE_VIEW: ('Ability to view a trace.'),
PermissionType.TRACE_ALL: ('Ability to perform all the supported operations on traces.'),
PermissionType.TRIGGER_LIST: ('Ability to list (view all) triggers.'),
PermissionType.TRIGGER_VIEW: ('Ability to view a trigger.'),
PermissionType.TRIGGER_ALL: ('Ability to perform all the supported operations on triggers.'),
PermissionType.POLICY_TYPE_LIST: ('Ability to list (view all) policy types.'),
PermissionType.POLICY_TYPE_VIEW: ('Ability to view a policy types.'),
PermissionType.POLICY_TYPE_ALL: ('Ability to perform all the supported operations on policy'
' types.'),
PermissionType.POLICY_LIST: 'Ability to list (view all) policies.',
PermissionType.POLICY_VIEW: ('Ability to view a policy.'),
PermissionType.POLICY_CREATE: ('Ability to create a new policy.'),
PermissionType.POLICY_MODIFY: ('Ability to modify an existing policy.'),
PermissionType.POLICY_DELETE: ('Ability to delete an existing policy.'),
PermissionType.POLICY_ALL: ('Ability to perform all the supported operations on a particular '
'policy.')
}
def get_resource_permission_types_with_descriptions():
"""
Return available permission types for each resource types with corresponding descriptions.
:rtype: ``dict`
"""
result = {}
for resource_type, permission_types in six.iteritems(RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP):
result[resource_type] = {}
for permission_type in permission_types:
result[resource_type][permission_type] = \
PERMISION_TYPE_TO_DESCRIPTION_MAP[permission_type]
return result
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file ...
#
# Created: Fri May 08 16:50:11 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_projectManager(object):
def setupUi(self, projectManager):
projectManager.setObjectName("projectManager")
projectManager.resize(796, 537)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/tree.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
projectManager.setWindowIcon(icon)
projectManager.setStyleSheet("QWidget{\n"
" dialogbuttonbox-buttons-have-icons: 0;\n"
" combobox-popup: 1;\n"
" tabbar-prefer-no-arrows: true;\n"
" color: #cccccc;\n"
" background-color: #484848;\n"
"}\n"
"\n"
"\n"
"QMenuBar{\n"
" background-color: #484848;\n"
"border-bottom: 1px solid #282828;\n"
"}\n"
"\n"
"\n"
"QMenuBar::item{\n"
" background: transparent;\n"
"}\n"
"\n"
"\n"
"\n"
"QMenuBar::item:selected{\n"
" background: transparent;\n"
" color:#f49c1c ;\n"
" border: 1px solid #f49c1c;\n"
"}\n"
"\n"
"\n"
"QMenuBar::item:pressed{\n"
" background: #545454;\n"
" border: 1px solid #000;\n"
" margin-bottom:-1px;\n"
" padding-bottom:1px;\n"
"}\n"
"\n"
"\n"
"QMenu{\n"
" border: 1px solid #000;\n"
"background: #484848;\n"
"padding:5px;\n"
"}\n"
"\n"
"QMenu::separator{\n"
"height: 1px;\n"
"background-color: #303030;\n"
"}\n"
"\n"
"\n"
"QMenu::item{\n"
" padding: 2px 20px 2px 20px;\n"
"background: #484848;\n"
"}\n"
"\n"
"\n"
"QMenu::item:selected{\n"
" color: #f89a2b;\n"
" background: #545454;\n"
"}\n"
"\n"
"QGroupBox{\n"
"border: 1px solid #696969;\n"
"border-radius:6px;\n"
"margin-top: 5px;\n"
"}\n"
"\n"
"QGroupBox::title{\n"
"margin-top: -12px;\n"
"}\n"
"\n"
"\n"
"QToolTip\n"
"{\n"
" border: 1px solid black;\n"
" background-color: #f0f0b4;\n"
" color: #000000;\n"
" border-radius: 3px;\n"
" opacity: 220;\n"
"}\n"
"\n"
"QLabel{\n"
"background: none;\n"
"color: #919191;\n"
"}\n"
"\n"
"QLineEdit\n"
"{\n"
" color: #000000;\n"
" background-color: #9098a0;\n"
" padding: 1px;\n"
" border-style: solid;\n"
" border: 1px solid #353535;\n"
" border-radius: 6px;\n"
"}\n"
"\n"
"\n"
"\n"
"QPushButton\n"
"{\n"
" icon-size: 12px;\n"
" background-color: #606060;\n"
" border-width: 1px;\n"
" border-color: #353535;\n"
" border-style: solid;\n"
" border-radius: 6px;\n"
" padding: 5px;\n"
" padding-left: 2px;\n"
" padding-right: 2px;\n"
"}\n"
"\n"
"QPushButton:flat {\n"
" border: none;\n"
" background-color: none;\n"
"}\n"
"\n"
"QPushButton:disabled\n"
"{\n"
"border: 1px solid #4A4A4A;\n"
"}\n"
"\n"
"\n"
"QPushButton:hover\n"
"{\n"
" background-color: #686868;\n"
"}\n"
"\n"
"QPushButton:pressed,QPushButton:focus:pressed\n"
"{\n"
" color: #000;\n"
" background-color: #f89a2b;\n"
"}\n"
"\n"
"\n"
"\n"
"QScrollBar:horizontal {\n"
" background: #404040;\n"
" height: 16px;\n"
" margin: 0 -2px 0 -2px;\n"
" border: 1px solid #383838;\n"
"}\n"
"\n"
"\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal\n"
"{\n"
" background: none;\n"
"}\n"
"\n"
"QScrollBar::add-line:horizontal {\n"
" border-radius: 0px;\n"
" border: none;\n"
" width: 0px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:horizontal {\n"
" border-radius: 0px;\n"
" border: none;\n"
" width: 0px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"/*\n"
"QScrollBar::right-arrow:horizontal, QScrollBar::left-arrow:horizontal\n"
"{\n"
" border: 1px solid black;\n"
" width: 1px;\n"
" height: 1px;\n"
" background: white;\n"
"}\n"
"*/\n"
"\n"
"\n"
"\n"
"\n"
"QScrollBar:vertical\n"
"{\n"
" background: #404040;\n"
" width: 16px;\n"
" margin: -2px 0 -2px 0;\n"
" border: 1px solid #383838;\n"
"}\n"
"\n"
"\n"
"QScrollBar::add-line:vertical\n"
"{\n"
" border-radius: 2px;\n"
" border: 1px solid #383838;\n"
" height: 0px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical\n"
"{\n"
" border-radius: 2px;\n"
" border: 1px solid #383838;\n"
" height: 0px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"/*\n"
"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical\n"
"{\n"
" border: 1px solid black;\n"
" width: 1px;\n"
" height: 1px;\n"
" background: white;\n"
"}\n"
"*/\n"
"\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical\n"
"{\n"
" background: none;\n"
"}\n"
"\n"
"\n"
"QTreeView {\n"
" color: #000000;\n"
" background-color: #9098a0;\n"
" alternate-background-color: #9098a0;\n"
"/*\n"
" padding-top: 6px;\n"
" padding-bottom: 6px;\n"
" border-radius: 6px;\n"
"*/\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: #282828;\n"
"}\n"
"\n"
"\n"
" QTreeView::item:hover {\n"
"background-color: #A2A9B0;\n"
" }\n"
"\n"
" QTreeView::item:selected {\n"
"background-color: #B5BCC4;\n"
"color: black;\n"
" }\n"
"\n"
"\n"
"QHeaderView:section {\n"
"min-height: 18px;\n"
" background-color: #64707c;\n"
" color: #bbbbbb;\n"
" padding-left: 4px;\n"
" border: 1px solid #44505c;\n"
"border-top: none;\n"
"border-left: none;\n"
"}\n"
"\n"
"\n"
"\n"
"QHeaderView::section:last{\n"
"\n"
"border-right:none;\n"
"}\n"
"\n"
"/*\n"
"QHeaderView::down-arrow {\n"
" image: url(down_arrow.png);\n"
"}\n"
"\n"
"QHeaderView::up-arrow {\n"
" image: url(up_arrow.png);\n"
"}\n"
"*/\n"
"\n"
"\n"
"\n"
"")
self.centralwidget = QtGui.QWidget(projectManager)
self.centralwidget.setStyleSheet("")
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(5, 5, 5, 5)
self.gridLayout.setObjectName("gridLayout")
self.togglePathsCheckBox = QtGui.QCheckBox(self.centralwidget)
self.togglePathsCheckBox.setMinimumSize(QtCore.QSize(0, 20))
self.togglePathsCheckBox.setMaximumSize(QtCore.QSize(16777215, 20))
self.togglePathsCheckBox.setObjectName("togglePathsCheckBox")
self.gridLayout.addWidget(self.togglePathsCheckBox, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(583, 17, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 1, 1, 1)
self.filtersBtn = QtGui.QToolButton(self.centralwidget)
self.filtersBtn.setMinimumSize(QtCore.QSize(106, 20))
self.filtersBtn.setMaximumSize(QtCore.QSize(120, 20))
self.filtersBtn.setStyleSheet("\n"
"QToolButton\n"
"{\n"
"min-width: 100px;\n"
" icon-size: 12px;\n"
" background-color: #606060;\n"
" border-width: 1px;\n"
" border-color: #353535;\n"
" border-style: solid;\n"
" border-radius: 6px;\n"
" padding: 2px;\n"
" padding-left: 2px;\n"
" padding-right: 2px;\n"
"}\n"
"\n"
"QToolButton:flat {\n"
"min-width: 100px;\n"
" border: none;\n"
" background-color: none;\n"
"}\n"
"\n"
"QToolButton:hover\n"
"{\n"
"min-width: 100px;\n"
" background-color: #686868;\n"
"}\n"
"\n"
"QToolButton:pressed,QToolButton:focus:pressed\n"
"{\n"
"min-width: 100px;\n"
" color: #000;\n"
" background-color: #f89a2b;\n"
"}\n"
"")
self.filtersBtn.setCheckable(False)
self.filtersBtn.setPopupMode(QtGui.QToolButton.InstantPopup)
self.filtersBtn.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
self.filtersBtn.setObjectName("filtersBtn")
self.gridLayout.addWidget(self.filtersBtn, 0, 2, 1, 1)
self.projectsSplitter = QtGui.QSplitter(self.centralwidget)
self.projectsSplitter.setOrientation(QtCore.Qt.Horizontal)
self.projectsSplitter.setObjectName("projectsSplitter")
self.projectTree = QtGui.QTreeWidget(self.projectsSplitter)
self.projectTree.setFocusPolicy(QtCore.Qt.NoFocus)
self.projectTree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.projectTree.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.projectTree.setAlternatingRowColors(True)
self.projectTree.setIndentation(5)
self.projectTree.setRootIsDecorated(False)
self.projectTree.setUniformRowHeights(False)
self.projectTree.setItemsExpandable(False)
self.projectTree.setHeaderHidden(False)
self.projectTree.setExpandsOnDoubleClick(False)
self.projectTree.setObjectName("projectTree")
self.projectTree.header().setVisible(True)
self.projectTree.header().setDefaultSectionSize(200)
self.projectTree.header().setMinimumSectionSize(25)
self.projectTree.header().setSortIndicatorShown(False)
self.sceneTree = QtGui.QTreeWidget(self.projectsSplitter)
self.sceneTree.setMinimumSize(QtCore.QSize(0, 0))
self.sceneTree.setFocusPolicy(QtCore.Qt.NoFocus)
self.sceneTree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.sceneTree.setIndentation(5)
self.sceneTree.setRootIsDecorated(False)
self.sceneTree.setHeaderHidden(False)
self.sceneTree.setExpandsOnDoubleClick(False)
self.sceneTree.setObjectName("sceneTree")
self.sceneTree.header().setVisible(True)
self.sceneTree.header().setDefaultSectionSize(200)
self.gridLayout.addWidget(self.projectsSplitter, 1, 0, 1, 3)
projectManager.setCentralWidget(self.centralwidget)
self.menuBar = QtGui.QMenuBar(projectManager)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 796, 21))
self.menuBar.setNativeMenuBar(True)
self.menuBar.setObjectName("menuBar")
self.menuFile = QtGui.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
self.menuScenes = QtGui.QMenu(self.menuBar)
self.menuScenes.setObjectName("menuScenes")
self.menuHelp = QtGui.QMenu(self.menuBar)
self.menuHelp.setObjectName("menuHelp")
projectManager.setMenuBar(self.menuBar)
self.act_addExisting = QtGui.QAction(projectManager)
self.act_addExisting.setObjectName("act_addExisting")
self.act_removeSelected = QtGui.QAction(projectManager)
self.act_removeSelected.setObjectName("act_removeSelected")
self.act_setAsCurrent = QtGui.QAction(projectManager)
self.act_setAsCurrent.setObjectName("act_setAsCurrent")
self.act_openSelectedScene = QtGui.QAction(projectManager)
self.act_openSelectedScene.setObjectName("act_openSelectedScene")
self.act_importSelectedScene = QtGui.QAction(projectManager)
self.act_importSelectedScene.setObjectName("act_importSelectedScene")
self.act_importSelectedAsRef = QtGui.QAction(projectManager)
self.act_importSelectedAsRef.setObjectName("act_importSelectedAsRef")
self.act_about = QtGui.QAction(projectManager)
self.act_about.setObjectName("act_about")
self.act_docs = QtGui.QAction(projectManager)
self.act_docs.setObjectName("act_docs")
self.act_exploreProject = QtGui.QAction(projectManager)
self.act_exploreProject.setObjectName("act_exploreProject")
self.act_exploreSceneFolder = QtGui.QAction(projectManager)
self.act_exploreSceneFolder.setObjectName("act_exploreSceneFolder")
self.act_newProject = QtGui.QAction(projectManager)
self.act_newProject.setObjectName("act_newProject")
self.menuFile.addAction(self.act_newProject)
self.menuFile.addSeparator()
self.menuFile.addAction(self.act_setAsCurrent)
self.menuFile.addAction(self.act_exploreProject)
self.menuFile.addSeparator()
self.menuFile.addAction(self.act_addExisting)
self.menuFile.addAction(self.act_removeSelected)
self.menuScenes.addAction(self.act_openSelectedScene)
self.menuScenes.addAction(self.act_importSelectedScene)
self.menuScenes.addAction(self.act_importSelectedAsRef)
self.menuScenes.addAction(self.act_exploreSceneFolder)
self.menuHelp.addAction(self.act_docs)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuScenes.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
self.retranslateUi(projectManager)
QtCore.QMetaObject.connectSlotsByName(projectManager)
def retranslateUi(self, projectManager):
projectManager.setWindowTitle(QtGui.QApplication.translate("projectManager", "Project Manager", None, QtGui.QApplication.UnicodeUTF8))
self.togglePathsCheckBox.setToolTip(QtGui.QApplication.translate("projectManager", "Toggle the display of project and scene paths in the lists", None, QtGui.QApplication.UnicodeUTF8))
self.togglePathsCheckBox.setText(QtGui.QApplication.translate("projectManager", "Show Paths", None, QtGui.QApplication.UnicodeUTF8))
self.filtersBtn.setToolTip(QtGui.QApplication.translate("projectManager", "Choose which filetypes to display in the Scene List", None, QtGui.QApplication.UnicodeUTF8))
self.filtersBtn.setText(QtGui.QApplication.translate("projectManager", "Show filetypes...", None, QtGui.QApplication.UnicodeUTF8))
self.projectTree.setToolTip(QtGui.QApplication.translate("projectManager", "The Project List", None, QtGui.QApplication.UnicodeUTF8))
self.projectTree.headerItem().setText(0, QtGui.QApplication.translate("projectManager", "Project", None, QtGui.QApplication.UnicodeUTF8))
self.projectTree.headerItem().setText(1, QtGui.QApplication.translate("projectManager", "Path", None, QtGui.QApplication.UnicodeUTF8))
self.sceneTree.setToolTip(QtGui.QApplication.translate("projectManager", "The Scene List", None, QtGui.QApplication.UnicodeUTF8))
self.sceneTree.headerItem().setText(0, QtGui.QApplication.translate("projectManager", "Scene", None, QtGui.QApplication.UnicodeUTF8))
self.sceneTree.headerItem().setText(1, QtGui.QApplication.translate("projectManager", "Path", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("projectManager", "Projects", None, QtGui.QApplication.UnicodeUTF8))
self.menuScenes.setTitle(QtGui.QApplication.translate("projectManager", "Scenes", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("projectManager", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.act_addExisting.setText(QtGui.QApplication.translate("projectManager", "Add Existing Project to List...", None, QtGui.QApplication.UnicodeUTF8))
self.act_removeSelected.setText(QtGui.QApplication.translate("projectManager", "Remove Selected from List", None, QtGui.QApplication.UnicodeUTF8))
self.act_setAsCurrent.setText(QtGui.QApplication.translate("projectManager", "Set Selected As Current", None, QtGui.QApplication.UnicodeUTF8))
self.act_openSelectedScene.setText(QtGui.QApplication.translate("projectManager", "Open Selected Scene", None, QtGui.QApplication.UnicodeUTF8))
self.act_importSelectedScene.setText(QtGui.QApplication.translate("projectManager", "Import Selected Scene", None, QtGui.QApplication.UnicodeUTF8))
self.act_importSelectedAsRef.setText(QtGui.QApplication.translate("projectManager", "Import Selected as Referenced", None, QtGui.QApplication.UnicodeUTF8))
self.act_about.setText(QtGui.QApplication.translate("projectManager", "About...", None, QtGui.QApplication.UnicodeUTF8))
self.act_docs.setText(QtGui.QApplication.translate("projectManager", "Documentation", None, QtGui.QApplication.UnicodeUTF8))
self.act_exploreProject.setText(QtGui.QApplication.translate("projectManager", "Open Project Folder...", None, QtGui.QApplication.UnicodeUTF8))
self.act_exploreProject.setToolTip(QtGui.QApplication.translate("projectManager", "Open Project Folder", None, QtGui.QApplication.UnicodeUTF8))
self.act_exploreSceneFolder.setText(QtGui.QApplication.translate("projectManager", "Open Scene Folder...", None, QtGui.QApplication.UnicodeUTF8))
self.act_exploreSceneFolder.setToolTip(QtGui.QApplication.translate("projectManager", "Open Containing Folder", None, QtGui.QApplication.UnicodeUTF8))
self.act_newProject.setText(QtGui.QApplication.translate("projectManager", "New Project...", None, QtGui.QApplication.UnicodeUTF8))
self.act_newProject.setToolTip(QtGui.QApplication.translate("projectManager", "Create a new project", None, QtGui.QApplication.UnicodeUTF8))
|
|
# Copyright 2015-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file can't be called email.py because if it is, we cannot:
import email.utils
import logging
import os
from enum import Enum
import attr
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
MISSING_PASSWORD_RESET_CONFIG_ERROR = """\
Password reset emails are enabled on this homeserver due to a partial
'email' block. However, the following required keys are missing:
%s
"""
DEFAULT_SUBJECTS = {
"message_from_person_in_room": "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room...",
"message_from_person": "[%(app)s] You have a message on %(app)s from %(person)s...",
"messages_from_person": "[%(app)s] You have messages on %(app)s from %(person)s...",
"messages_in_room": "[%(app)s] You have messages on %(app)s in the %(room)s room...",
"messages_in_room_and_others": "[%(app)s] You have messages on %(app)s in the %(room)s room and others...",
"messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...",
"invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...",
"invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...",
"invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...",
"password_reset": "[%(server_name)s] Password reset",
"email_validation": "[%(server_name)s] Validate your email",
}
LEGACY_TEMPLATE_DIR_WARNING = """
This server's configuration file is using the deprecated 'template_dir' setting in the
'email' section. Support for this setting has been deprecated and will be removed in a
future version of Synapse. Server admins should instead use the new
'custom_templates_directory' setting documented here:
https://matrix-org.github.io/synapse/latest/templates.html
---------------------------------------------------------------------------------------"""
@attr.s(slots=True, frozen=True, auto_attribs=True)
class EmailSubjectConfig:
message_from_person_in_room: str
message_from_person: str
messages_from_person: str
messages_in_room: str
messages_in_room_and_others: str
messages_from_person_and_others: str
invite_from_person: str
invite_from_person_to_room: str
invite_from_person_to_space: str
password_reset: str
email_validation: str
class EmailConfig(Config):
section = "email"
def read_config(self, config, **kwargs):
# TODO: We should separate better the email configuration from the notification
# and account validity config.
self.email_enable_notifs = False
email_config = config.get("email")
if email_config is None:
email_config = {}
self.email_smtp_host = email_config.get("smtp_host", "localhost")
self.email_smtp_port = email_config.get("smtp_port", 25)
self.email_smtp_user = email_config.get("smtp_user", None)
self.email_smtp_pass = email_config.get("smtp_pass", None)
self.require_transport_security = email_config.get(
"require_transport_security", False
)
self.enable_smtp_tls = email_config.get("enable_tls", True)
if self.require_transport_security and not self.enable_smtp_tls:
raise ConfigError(
"email.require_transport_security requires email.enable_tls to be true"
)
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]
else:
self.email_app_name = "Matrix"
# TODO: Rename notif_from to something more generic, or have a separate
# from for password resets, message notifications, etc?
# Currently the email section is a bit bogged down with settings for
# multiple functions. Would be good to split it out into separate
# sections and only put the common ones under email:
self.email_notif_from = email_config.get("notif_from", None)
if self.email_notif_from is not None:
# make sure it's valid
parsed = email.utils.parseaddr(self.email_notif_from)
if parsed[1] == "":
raise RuntimeError("Invalid notif_from address")
# A user-configurable template directory
template_dir = email_config.get("template_dir")
if template_dir is not None:
logger.warning(LEGACY_TEMPLATE_DIR_WARNING)
if isinstance(template_dir, str):
# We need an absolute path, because we change directory after starting (and
# we don't yet know what auxiliary templates like mail.css we will need).
template_dir = os.path.abspath(template_dir)
elif template_dir is not None:
# If template_dir is something other than a str or None, warn the user
raise ConfigError("Config option email.template_dir must be type str")
self.email_enable_notifs = email_config.get("enable_notifs", False)
self.threepid_behaviour_email = (
# Have Synapse handle the email sending if account_threepid_delegates.email
# is not defined
# msisdn is currently always remote while Synapse does not support any method of
# sending SMS messages
ThreepidBehaviour.REMOTE
if self.root.registration.account_threepid_delegate_email
else ThreepidBehaviour.LOCAL
)
if config.get("trust_identity_server_for_password_resets"):
raise ConfigError(
'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for "
"details and update your config file."
)
self.local_threepid_handling_disabled_due_to_email_config = False
if (
self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
and email_config == {}
):
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
self.local_threepid_handling_disabled_due_to_email_config = True
self.threepid_behaviour_email = ThreepidBehaviour.OFF
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
email_config.get("validation_token_lifetime", "1h")
)
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
if missing:
raise ConfigError(
MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),)
)
# These email templates have placeholders in them, and thus must be
# parsed using a templating engine during a request
password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html"
)
password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt"
)
registration_template_html = email_config.get(
"registration_template_html", "registration.html"
)
registration_template_text = email_config.get(
"registration_template_text", "registration.txt"
)
add_threepid_template_html = email_config.get(
"add_threepid_template_html", "add_threepid.html"
)
add_threepid_template_text = email_config.get(
"add_threepid_template_text", "add_threepid.txt"
)
password_reset_template_failure_html = email_config.get(
"password_reset_template_failure_html", "password_reset_failure.html"
)
registration_template_failure_html = email_config.get(
"registration_template_failure_html", "registration_failure.html"
)
add_threepid_template_failure_html = email_config.get(
"add_threepid_template_failure_html", "add_threepid_failure.html"
)
# These templates do not support any placeholder variables, so we
# will read them from disk once during setup
password_reset_template_success_html = email_config.get(
"password_reset_template_success_html", "password_reset_success.html"
)
registration_template_success_html = email_config.get(
"registration_template_success_html", "registration_success.html"
)
add_threepid_template_success_html = email_config.get(
"add_threepid_template_success_html", "add_threepid_success.html"
)
# Read all templates from disk
(
self.email_password_reset_template_html,
self.email_password_reset_template_text,
self.email_registration_template_html,
self.email_registration_template_text,
self.email_add_threepid_template_html,
self.email_add_threepid_template_text,
self.email_password_reset_template_confirmation_html,
self.email_password_reset_template_failure_html,
self.email_registration_template_failure_html,
self.email_add_threepid_template_failure_html,
password_reset_template_success_html_template,
registration_template_success_html_template,
add_threepid_template_success_html_template,
) = self.read_templates(
[
password_reset_template_html,
password_reset_template_text,
registration_template_html,
registration_template_text,
add_threepid_template_html,
add_threepid_template_text,
"password_reset_confirmation.html",
password_reset_template_failure_html,
registration_template_failure_html,
add_threepid_template_failure_html,
password_reset_template_success_html,
registration_template_success_html,
add_threepid_template_success_html,
],
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
# Render templates that do not contain any placeholders
self.email_password_reset_template_success_html_content = (
password_reset_template_success_html_template.render()
)
self.email_registration_template_success_html_content = (
registration_template_success_html_template.render()
)
self.email_add_threepid_template_success_html_content = (
add_threepid_template_success_html_template.render()
)
if self.email_enable_notifs:
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
if missing:
raise ConfigError(
"email.enable_notifs is True but required keys are missing: %s"
% (", ".join(missing),)
)
notif_template_html = email_config.get(
"notif_template_html", "notif_mail.html"
)
notif_template_text = email_config.get(
"notif_template_text", "notif_mail.txt"
)
(
self.email_notif_template_html,
self.email_notif_template_text,
) = self.read_templates(
[notif_template_html, notif_template_text],
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
self.email_notif_for_new_users = email_config.get(
"notif_for_new_users", True
)
self.email_riot_base_url = email_config.get(
"client_base_url", email_config.get("riot_base_url", None)
)
if self.root.account_validity.account_validity_renew_by_email_enabled:
expiry_template_html = email_config.get(
"expiry_template_html", "notice_expiry.html"
)
expiry_template_text = email_config.get(
"expiry_template_text", "notice_expiry.txt"
)
(
self.account_validity_template_html,
self.account_validity_template_text,
) = self.read_templates(
[expiry_template_html, expiry_template_text],
(
td
for td in (
self.root.server.custom_template_directory,
template_dir,
)
if td
), # Filter out template_dir if not provided
)
subjects_config = email_config.get("subjects", {})
subjects = {}
for key, default in DEFAULT_SUBJECTS.items():
subjects[key] = subjects_config.get(key, default)
self.email_subjects = EmailSubjectConfig(**subjects)
# The invite client location should be a HTTP(S) URL or None.
self.invite_client_location = email_config.get("invite_client_location") or None
if self.invite_client_location:
if not isinstance(self.invite_client_location, str):
raise ConfigError(
"Config option email.invite_client_location must be type str"
)
if not (
self.invite_client_location.startswith("http://")
or self.invite_client_location.startswith("https://")
):
raise ConfigError(
"Config option email.invite_client_location must be a http or https URL",
path=("email", "invite_client_location"),
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return (
"""\
# Configuration for sending emails from Synapse.
#
# Server admins can configure custom templates for email content. See
# https://matrix-org.github.io/synapse/latest/templates.html for more information.
#
email:
# The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
#
#smtp_host: mail.server
# The port on the mail server for outgoing SMTP. Defaults to 25.
#
#smtp_port: 587
# Username/password for authentication to the SMTP server. By default, no
# authentication is attempted.
#
#smtp_user: "exampleusername"
#smtp_pass: "examplepassword"
# Uncomment the following to require TLS transport security for SMTP.
# By default, Synapse will connect over plain text, and will then switch to
# TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
# Synapse will refuse to connect unless the server supports STARTTLS.
#
#require_transport_security: true
# Uncomment the following to disable TLS for SMTP.
#
# By default, if the server supports TLS, it will be used, and the server
# must present a certificate that is valid for 'smtp_host'. If this option
# is set to false, TLS will not be used.
#
#enable_tls: false
# notif_from defines the "From" address to use when sending emails.
# It must be set if email sending is enabled.
#
# The placeholder '%%(app)s' will be replaced by the application name,
# which is normally 'app_name' (below), but may be overridden by the
# Matrix client application.
#
# Note that the placeholder must be written '%%(app)s', including the
# trailing 's'.
#
#notif_from: "Your Friendly %%(app)s homeserver <[email protected]>"
# app_name defines the default value for '%%(app)s' in notif_from and email
# subjects. It defaults to 'Matrix'.
#
#app_name: my_branded_matrix_server
# Uncomment the following to enable sending emails for messages that the user
# has missed. Disabled by default.
#
#enable_notifs: true
# Uncomment the following to disable automatic subscription to email
# notifications for new users. Enabled by default.
#
#notif_for_new_users: false
# Custom URL for client links within the email notifications. By default
# links will be based on "https://matrix.to".
#
# (This setting used to be called riot_base_url; the old name is still
# supported for backwards-compatibility but is now deprecated.)
#
#client_base_url: "http://localhost/riot"
# Configure the time that a validation email will expire after sending.
# Defaults to 1h.
#
#validation_token_lifetime: 15m
# The web client location to direct users to during an invite. This is passed
# to the identity server as the org.matrix.web_client_location key. Defaults
# to unset, giving no guidance to the identity server.
#
#invite_client_location: https://app.element.io
# Subjects to use when sending emails from Synapse.
#
# The placeholder '%%(app)s' will be replaced with the value of the 'app_name'
# setting above, or by a value dictated by the Matrix client application.
#
# If a subject isn't overridden in this configuration file, the value used as
# its example will be used.
#
#subjects:
# Subjects for notification emails.
#
# On top of the '%%(app)s' placeholder, these can use the following
# placeholders:
#
# * '%%(person)s', which will be replaced by the display name of the user(s)
# that sent the message(s), e.g. "Alice and Bob".
# * '%%(room)s', which will be replaced by the name of the room the
# message(s) have been sent to, e.g. "My super room".
#
# See the example provided for each setting to see which placeholder can be
# used and how to use them.
#
# Subject to use to notify about one message from one or more user(s) in a
# room which has a name.
#message_from_person_in_room: "%(message_from_person_in_room)s"
#
# Subject to use to notify about one message from one or more user(s) in a
# room which doesn't have a name.
#message_from_person: "%(message_from_person)s"
#
# Subject to use to notify about multiple messages from one or more users in
# a room which doesn't have a name.
#messages_from_person: "%(messages_from_person)s"
#
# Subject to use to notify about multiple messages in a room which has a
# name.
#messages_in_room: "%(messages_in_room)s"
#
# Subject to use to notify about multiple messages in multiple rooms.
#messages_in_room_and_others: "%(messages_in_room_and_others)s"
#
# Subject to use to notify about multiple messages from multiple persons in
# multiple rooms. This is similar to the setting above except it's used when
# the room in which the notification was triggered has no name.
#messages_from_person_and_others: "%(messages_from_person_and_others)s"
#
# Subject to use to notify about an invite to a room which has a name.
#invite_from_person_to_room: "%(invite_from_person_to_room)s"
#
# Subject to use to notify about an invite to a room which doesn't have a
# name.
#invite_from_person: "%(invite_from_person)s"
# Subject for emails related to account administration.
#
# On top of the '%%(app)s' placeholder, these one can use the
# '%%(server_name)s' placeholder, which will be replaced by the value of the
# 'server_name' setting in your Synapse configuration.
#
# Subject to use when sending a password reset email.
#password_reset: "%(password_reset)s"
#
# Subject to use when sending a verification email to assert an address's
# ownership.
#email_validation: "%(email_validation)s"
"""
% DEFAULT_SUBJECTS
)
class ThreepidBehaviour(Enum):
"""
Enum to define the behaviour of Synapse with regards to when it contacts an identity
server for 3pid registration and password resets
REMOTE = use an external server to send tokens
LOCAL = send tokens ourselves
OFF = disable registration via 3pid and password resets
"""
REMOTE = "remote"
LOCAL = "local"
OFF = "off"
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import pkg_resources
import requests
import unittest
from six.moves import urllib
from mock import patch
from mock import MagicMock as Mock
import pyrax
import pyrax.utils as utils
import pyrax.exceptions as exc
from pyrax import client
from pyrax.client import _safe_quote
from pyrax import fakes
DUMMY_URL = "http://example.com"
ID_CLS = pyrax.settings.get("identity_class") or pyrax.rax_identity.RaxIdentity
class ClientTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ClientTest, self).__init__(*args, **kwargs)
def setUp(self):
save_conf = client.BaseClient._configure_manager
client.BaseClient._configure_manager = Mock()
self.identity = pyrax.identity = ID_CLS()
self.client = client.BaseClient(self.identity)
client.BaseClient._configure_manager = save_conf
self.client._manager = fakes.FakeManager()
def tearDown(self):
self.client = None
def test_safe_quote_ascii(self):
ret = _safe_quote("test")
expected = "test"
self.assertEqual(ret, expected)
def test_safe_quote_unicode(self):
ret = _safe_quote(unichr(1000))
expected = "%CF%A8"
self.assertEqual(ret, expected)
def test_base_client(self):
tenant_id = "faketenantid"
auth_url = "fakeauthurl"
region_name = "fakeregion"
endpoint_type = "fakeenpointtype"
management_url = "fakemanagementurl"
auth_token = "fakeauthtoken"
service_name = "fakeservicename"
timings = "faketimings"
no_cache = "fakenocache"
http_log_debug = "fakehttplogdebug"
timeout = "faketimeout"
auth_system = "fakeauthsystem"
save_conf = client.BaseClient._configure_manager
client.BaseClient._configure_manager = Mock()
bc = client.BaseClient(identity=self.identity, region_name=region_name,
endpoint_type=endpoint_type, management_url=management_url,
service_name=service_name, timings=timings,
http_log_debug=http_log_debug, timeout=timeout,)
self.assertEqual(bc.region_name, region_name)
self.assertEqual(bc.endpoint_type, endpoint_type)
self.assertEqual(bc.management_url, management_url)
self.assertEqual(bc.service_name, service_name)
self.assertEqual(bc.timings, timings)
self.assertEqual(bc.http_log_debug, http_log_debug)
self.assertEqual(bc.timeout, timeout)
client.BaseClient._configure_manager = save_conf
def test_configure_manager(self):
self.assertRaises(NotImplementedError, client.BaseClient, self.identity)
def test_list(self):
mgr = self.client._manager
sav = mgr.list
mgr.list = Mock()
self.client.list()
mgr.list.assert_called_once_with(limit=None, marker=None)
mgr.list = sav
def test_list_limit(self):
mgr = self.client._manager
sav = mgr.list
mgr.list = Mock()
self.client.list(limit=10, marker="abc")
mgr.list.assert_called_once_with(limit=10, marker="abc")
mgr.list = sav
def test_get(self):
mgr = self.client._manager
sav = mgr.get
mgr.get = Mock()
self.client.get("val")
mgr.get.assert_called_once_with("val")
mgr.get = sav
def test_delete(self):
mgr = self.client._manager
sav = mgr.delete
mgr.delete = Mock()
self.client.delete("val")
mgr.delete.assert_called_once_with("val")
mgr.delete = sav
def test_create(self):
mgr = self.client._manager
sav = mgr.create
mgr.create = Mock()
self.client.create("val")
mgr.create.assert_called_once_with("val")
mgr.create = sav
def test_find(self):
mgr = self.client._manager
mgr.find = Mock()
prop = utils.random_unicode()
val = utils.random_unicode()
self.client.find(prop=val)
mgr.find.assert_called_once_with(prop=val)
def test_findall(self):
mgr = self.client._manager
mgr.findall = Mock()
prop = utils.random_unicode()
val = utils.random_unicode()
self.client.findall(prop=val)
mgr.findall.assert_called_once_with(prop=val)
def test_unauthenticate(self):
clt = self.client
id_svc = clt.identity
clt.unauthenticate()
self.assertEqual(id_svc.token, "")
def test_get_timings(self):
clt = self.client
clt.times = expected = [1, 2, 3]
self.assertEqual(clt.get_timings(), expected)
def test_reset_timings(self):
clt = self.client
clt.times = [1, 2, 3]
clt.reset_timings()
self.assertEqual(clt.get_timings(), [])
def test_get_limits(self):
clt = self.client
data = utils.random_unicode()
clt.method_get = Mock(return_value=(None, data))
ret = clt.get_limits()
self.assertEqual(ret, data)
@patch("pyrax.http.request")
def test_request_ok(self, mock_req):
clt = self.client
clt.http_log_debug = False
clt.timeout = utils.random_unicode()
fakeresp = fakes.FakeResponse()
fakeresp.status_code = 200
body_content = {"one": 2, "three": 4}
fake_uri = utils.random_unicode()
fake_method = utils.random_unicode()
mock_req.return_value = (fakeresp, body_content)
resp, body = clt.request(fake_uri, fake_method, body="text")
self.assertTrue(isinstance(resp, fakes.FakeResponse))
self.assertEqual(resp.status_code, 200)
self.assertEqual(body, body_content)
@patch("pyrax.http.request")
def test_request_content_type_header(self, mock_req):
clt = self.client
clt.http_log_debug = False
clt.timeout = utils.random_unicode()
fakeresp = fakes.FakeResponse()
fakeresp.status_code = 200
body_content = {"one": 2, "three": 4}
body = "text"
headers = {"Content-Type": None}
fake_uri = utils.random_unicode()
fake_method = utils.random_unicode()
mock_req.return_value = (fakeresp, body_content)
resp, body = clt.request(fake_uri, fake_method, body=body,
headers=headers)
self.assertTrue(isinstance(resp, fakes.FakeResponse))
self.assertEqual(resp.status_code, 200)
self.assertEqual(body, body_content)
@patch("pyrax.exceptions.from_response")
@patch("pyrax.http.request")
def test_request_400(self, mock_req, mock_from):
clt = self.client
clt.http_log_debug = False
fakeresp = fakes.FakeResponse()
fakeresp.status_code = 400
body_content = {"one": 2, "three": 4}
fakebody = json.dumps(body_content)
fake_uri = utils.random_unicode()
fake_method = utils.random_unicode()
mock_req.return_value = (fakeresp, fakebody)
mock_from.side_effect = fakes.FakeException
self.assertRaises(fakes.FakeException, clt.request, fake_uri,
fake_method)
@patch("pyrax.exceptions.from_response")
@patch("pyrax.http.request")
def test_request_no_json_resp(self, mock_req, mock_from):
clt = self.client
clt.http_log_debug = False
fakeresp = fakes.FakeResponse()
fakeresp.status_code = 400
body_content = {"one": 2, "three": 4}
fakebody = json.dumps(body_content)
# Test non-json response
fakebody = "{{{{{{"
fake_uri = utils.random_unicode()
fake_method = utils.random_unicode()
mock_req.return_value = (fakeresp, fakebody)
mock_from.side_effect = fakes.FakeException
self.assertRaises(fakes.FakeException, clt.request, fake_uri,
fake_method)
@patch("pyrax.exceptions.from_response")
@patch("pyrax.http.request")
def test_request_empty_body(self, mock_req, mock_from):
clt = self.client
clt.http_log_debug = False
fakeresp = fakes.FakeResponse()
fakeresp.status_code = 400
body_content = {"one": 2, "three": 4}
fakebody = json.dumps(body_content)
fakebody = ""
fake_uri = utils.random_unicode()
fake_method = utils.random_unicode()
mock_req.return_value = (fakeresp, fakebody)
mock_from.side_effect = fakes.FakeException
self.assertRaises(fakes.FakeException, clt.request, fake_uri,
fake_method)
mock_from.assert_called_once_with(fakeresp, "")
def test_time_request(self):
clt = self.client
sav = clt.request
clt.request = Mock()
url = DUMMY_URL
method = "PUT"
clt.request(url, method)
clt.request.assert_called_once_with(url, method)
clt.request = sav
def test_api_request_expired(self):
clt = self.client
id_svc = clt.identity
sav_auth = id_svc.authenticate
returns = [exc.Unauthorized(""), (fakes.FakeIdentityResponse(),
fakes.fake_identity_response)]
def auth_resp(*args, **kwargs):
result = returns.pop(0)
if isinstance(result, Exception):
raise result
return result
id_svc.authenticate = Mock()
sav_req = clt.request
clt.request = Mock(side_effect=auth_resp)
url = DUMMY_URL
method = "PUT"
clt.unauthenticate()
clt.management_url = url
id_svc.token = ""
id_svc.tenant_id = utils.random_unicode()
clt._api_request(url, method)
self.assertEqual(id_svc.authenticate.call_count, 2)
clt.request = sav_req
id_svc.authenticate = sav_auth
def test_api_request_not_authed(self):
clt = self.client
id_svc = clt.identity
sav_auth = id_svc.authenticate
id_svc.authenticate = Mock()
sav_req = clt.request
clt.request = Mock(return_value=(1, 1))
url = DUMMY_URL
method = "PUT"
clt.unauthenticate()
clt.management_url = url
id_svc.token = ""
id_svc.tenant_id = utils.random_unicode()
clt._api_request(url, method)
id_svc.authenticate.assert_called_once_with()
clt.request = sav_req
id_svc.authenticate = sav_auth
def test_api_request_auth_failed(self):
clt = self.client
id_svc = clt.identity
sav_auth = id_svc.authenticate
id_svc.authenticate = Mock()
sav_req = clt.request
clt.request = Mock(return_value=(1, 1))
url = DUMMY_URL
method = "PUT"
clt.request = Mock(side_effect=exc.Unauthorized(""))
clt.management_url = clt.auth_token = "test"
self.assertRaises(exc.Unauthorized, clt._api_request, url, method)
clt.request = sav_req
clt.authenticate = sav_auth
def test_api_request_service_unavailable(self):
clt = self.client
id_svc = clt.identity
sav_auth = id_svc.authenticate
id_svc.authenticate = Mock()
sav_req = clt.request
clt.request = Mock(return_value=(1, 1))
url = DUMMY_URL
method = "GET"
clt.request = Mock(side_effect=exc.Unauthorized(""))
clt.management_url = ""
self.assertRaises(exc.ServiceNotAvailable, clt._api_request, url,
method)
clt.request = sav_req
id_svc.authenticate = sav_auth
def test_api_request_url_quoting(self):
clt = self.client
id_svc = clt.identity
sav_mgt = clt.management_url
clt.management_url = "/FAKE"
sav_auth = id_svc.authenticate
id_svc.authenticate = Mock()
sav_req = clt._time_request
clt._time_request = Mock(return_value=((None, None)))
uri = "/abc/[email protected]"
expected = "%s%s" % (clt.management_url, urllib.parse.quote(uri,
safe="/.?="))
clt._api_request(uri, "GET")
clt._time_request.assert_called_once_with(expected, 'GET',
headers={'X-Auth-Token': None})
id_svc.authenticate = sav_auth
clt._time_request = sav_req
clt.management_url = sav_mgt
def test_api_request_url_safe_quoting(self):
clt = self.client
id_svc = clt.identity
sav_mgt = clt.management_url
clt.management_url = "/FAKE"
sav_auth = id_svc.authenticate
id_svc.authenticate = Mock()
sav_req = clt._time_request
clt._time_request = Mock(return_value=((None, None)))
uri = "/abc/def"
expected = "%s%s" % (clt.management_url, urllib.parse.quote(uri,
safe="/.?="))
clt._api_request(uri, "GET")
clt._time_request.assert_called_once_with(expected, 'GET',
headers={'X-Auth-Token': None})
id_svc.authenticate = sav_auth
clt._time_request = sav_req
clt.management_url = sav_mgt
def test_method_head(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_head(url)
clt._api_request.assert_called_once_with(url, "HEAD")
clt._api_request = sav
def test_method_get(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_get(url)
clt._api_request.assert_called_once_with(url, "GET")
clt._api_request = sav
def test_method_post(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_post(url)
clt._api_request.assert_called_once_with(url, "POST")
clt._api_request = sav
def test_method_put(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_put(url)
clt._api_request.assert_called_once_with(url, "PUT")
clt._api_request = sav
def test_method_delete(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_delete(url)
clt._api_request.assert_called_once_with(url, "DELETE")
clt._api_request = sav
def test_method_patch(self):
clt = self.client
sav = clt._api_request
clt._api_request = Mock()
url = DUMMY_URL
clt.method_patch(url)
clt._api_request.assert_called_once_with(url, "PATCH")
clt._api_request = sav
def test_authenticate(self):
clt = self.client
sav_auth = clt.identity.authenticate
clt.identity.authenticate = Mock()
ret = clt.authenticate()
clt.identity.authenticate.assert_called_once_with()
clt.identity.authenticate = sav_auth
def test_project_id(self):
clt = self.client
id_svc = clt.identity
id_svc.tenant_id = "FAKE"
self.assertEqual(clt.projectid, "FAKE")
if __name__ == "__main__":
unittest.main()
|
|
"""Message class."""
import sys
from .compression import decompress
from .exceptions import reraise, MessageStateError
from .serialization import loads
from .utils.functional import dictfilter
__all__ = ('Message',)
ACK_STATES = {'ACK', 'REJECTED', 'REQUEUED'}
IS_PYPY = hasattr(sys, 'pypy_version_info')
class Message:
"""Base class for received messages.
Keyword Arguments:
channel (ChannelT): If message was received, this should be the
channel that the message was received on.
body (str): Message body.
delivery_mode (bool): Set custom delivery mode.
Defaults to :attr:`delivery_mode`.
priority (int): Message priority, 0 to broker configured
max priority, where higher is better.
content_type (str): The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
content_encoding (str): The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
properties (Dict): Message properties.
headers (Dict): Message headers.
"""
MessageStateError = MessageStateError
errors = None
if not IS_PYPY: # pragma: no cover
__slots__ = (
'_state', 'channel', 'delivery_tag',
'content_type', 'content_encoding',
'delivery_info', 'headers', 'properties',
'body', '_decoded_cache', 'accept', '__dict__',
)
def __init__(self, body=None, delivery_tag=None,
content_type=None, content_encoding=None, delivery_info=None,
properties=None, headers=None, postencode=None,
accept=None, channel=None, **kwargs):
delivery_info = {} if not delivery_info else delivery_info
self.errors = [] if self.errors is None else self.errors
self.channel = channel
self.delivery_tag = delivery_tag
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_info = delivery_info
self.headers = headers or {}
self.properties = properties or {}
self._decoded_cache = None
self._state = 'RECEIVED'
self.accept = accept
compression = self.headers.get('compression')
if not self.errors and compression:
try:
body = decompress(body, compression)
except Exception:
self.errors.append(sys.exc_info())
if not self.errors and postencode and isinstance(body, str):
try:
body = body.encode(postencode)
except Exception:
self.errors.append(sys.exc_info())
self.body = body
def _reraise_error(self, callback=None):
try:
reraise(*self.errors[0])
except Exception as exc:
if not callback:
raise
callback(self, exc)
def ack(self, multiple=False):
"""Acknowledge this message as being processed.
This will remove the message from the queue.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.channel.no_ack_consumers is not None:
try:
consumer_tag = self.delivery_info['consumer_tag']
except KeyError:
pass
else:
if consumer_tag in self.channel.no_ack_consumers:
return
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_ack(self.delivery_tag, multiple=multiple)
self._state = 'ACK'
def ack_log_error(self, logger, errors, multiple=False):
try:
self.ack(multiple=multiple)
except errors as exc:
logger.critical("Couldn't ack %r, reason:%r",
self.delivery_tag, exc, exc_info=True)
def reject_log_error(self, logger, errors, requeue=False):
try:
self.reject(requeue=requeue)
except errors as exc:
logger.critical("Couldn't reject %r, reason: %r",
self.delivery_tag, exc, exc_info=True)
def reject(self, requeue=False):
"""Reject this message.
The message will be discarded by the server.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_reject(self.delivery_tag, requeue=requeue)
self._state = 'REJECTED'
def requeue(self):
"""Reject this message and put it back on the queue.
Warning:
You must not use this method as a means of selecting messages
to process.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_reject(self.delivery_tag, requeue=True)
self._state = 'REQUEUED'
def decode(self):
"""Deserialize the message body.
Returning the original python structure sent by the publisher.
Note:
The return value is memoized, use `_decode` to force
re-evaluation.
"""
if not self._decoded_cache:
self._decoded_cache = self._decode()
return self._decoded_cache
def _decode(self):
return loads(self.body, self.content_type,
self.content_encoding, accept=self.accept)
@property
def acknowledged(self):
"""Set to true if the message has been acknowledged."""
return self._state in ACK_STATES
@property
def payload(self):
"""The decoded message body."""
return self._decoded_cache if self._decoded_cache else self.decode()
def __repr__(self):
return '<{} object at {:#x} with details {!r}>'.format(
type(self).__name__, id(self), dictfilter(
state=self._state,
content_type=self.content_type,
delivery_tag=self.delivery_tag,
body_length=len(self.body) if self.body is not None else None,
properties=dictfilter(
correlation_id=self.properties.get('correlation_id'),
type=self.properties.get('type'),
),
delivery_info=dictfilter(
exchange=self.delivery_info.get('exchange'),
routing_key=self.delivery_info.get('routing_key'),
),
),
)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from contextlib import nested
import mock
from test.unit import FakeLogger
from swift.container import sync
from swift.common import utils
from swift.common.exceptions import ClientException
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'endcap'
class FakeRing(object):
def __init__(self):
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in xrange(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
class TestContainerSync(unittest.TestCase):
def test_FileLikeIter(self):
# Retained test to show new FileLikeIter acts just like the removed
# _Iter2FileLikeObject did.
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
expect = '1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEquals(flo.read(), expect)
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
self.assertEquals(flo.read(), '1234567890')
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
def test_init(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(cs.object_ring is oring)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603] # Elapsed time for "under interval" (yes)
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit
yield 'container.db', 'device', 'partition'
return
orig_time = sync.time
orig_sleep = sync.sleep
orig_ContainerBroker = sync.ContainerBroker
orig_audit_location_generator = sync.audit_location_generator
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c'})
sync.time = fake_time
sync.sleep = fake_sleep
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_forever(1, 2, a=3, b=4, verbose=True)
except Exception as err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.sleep = orig_sleep
sync.audit_location_generator = orig_audit_location_generator
sync.ContainerBroker = orig_ContainerBroker
self.assertEquals(time_calls, [9])
self.assertEquals(len(sleep_calls), 2)
self.assertTrue(sleep_calls[0] <= cs.interval)
self.assertTrue(sleep_calls[1] == cs.interval - 1)
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interim report.
time_calls = [0]
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605] # For elapsed
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit
yield 'container.db', 'device', 'partition'
return
orig_time = sync.time
orig_audit_location_generator = sync.audit_location_generator
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c'})
sync.time = fake_time
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_once(1, 2, a=3, b=4, verbose=True)
self.assertEquals(time_calls, [6])
self.assertEquals(audit_location_generator_calls, [1])
self.assertEquals(cs.reported, 3602)
cs.run_once()
except Exception as err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.audit_location_generator = orig_audit_location_generator
sync.ContainerBroker = orig_ContainerBroker
self.assertEquals(time_calls, [10])
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertEquals(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c'})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c'}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c'}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 2)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with nested(
mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb),
mock.patch('swift.container.sync.hash_path', fake_hash_path)):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, -1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker('path', info={'account': 'a',
'container': 'c',
'x_container_sync_point1': 1,
'x_container_sync_point2': 1},
metadata={'x-container-sync-to':
('http://127.0.0.1/a/c', 1),
'x-container-sync-key':
('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with nested(
mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb),
mock.patch('swift.container.sync.hash_path', fake_hash_path)):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated yet
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker', lambda p: fcb):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEquals(cs.container_failures, 2)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
raise ClientException
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with nested(
mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb),
mock.patch('swift.container.sync.delete_object',
fake_delete_object)):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEquals(cs.container_failures, 3)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with nested(
mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb),
mock.patch('swift.container.sync.delete_object',
lambda *x, **y: None)):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEquals(cs.container_failures, 3)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, 1)
def test_container_second_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
# Nevertheless the fault is skipped
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_sync_row_delete(self):
self._test_container_sync_row_delete(None, None)
def test_container_sync_row_delete_using_realms(self):
self._test_container_sync_row_delete('US', 'realm_key')
def _test_container_sync_row_delete(self, realm, realm_key):
orig_uuid = sync.uuid
orig_delete_object = sync.delete_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
def fake_delete_object(path, name=None, headers=None, proxy=None):
self.assertEquals(path, 'http://sync/to/path')
self.assertEquals(name, 'object')
if realm:
self.assertEquals(headers, {
'x-container-sync-auth':
'US abcdef 90e95aabb45a6cdc0892a3db5535e7f918428c90',
'x-timestamp': '1.2'})
else:
self.assertEquals(
headers,
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
self.assertEquals(proxy, 'http://proxy')
sync.delete_object = fake_delete_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.http_proxies = ['http://proxy']
# Success
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info', realm,
realm_key))
self.assertEquals(cs.container_deletes, 1)
exc = []
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info', realm,
realm_key))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info', realm,
realm_key))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 2)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info', realm,
realm_key))
self.assertEquals(cs.container_deletes, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test client exception: 404')
finally:
sync.uuid = orig_uuid
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
self._test_container_sync_row_put(None, None)
def test_container_sync_row_put_using_realms(self):
self._test_container_sync_row_put('US', 'realm_key')
def _test_container_sync_row_put(self, realm, realm_key):
orig_uuid = sync.uuid
orig_shuffle = sync.shuffle
orig_put_object = sync.put_object
orig_direct_get_object = sync.direct_get_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
sync.shuffle = lambda x: x
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
self.assertEquals(sync_to, 'http://sync/to/path')
self.assertEquals(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef ef62c64bb88a33fa00722daa23d5d43253164962',
'x-timestamp': '1.2',
'etag': 'etagvalue',
'other-header': 'other header value'})
else:
self.assertEquals(headers, {
'x-container-sync-key': 'key',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': 'etagvalue'})
self.assertEquals(contents.read(), 'contents')
self.assertEquals(proxy, 'http://proxy')
sync.put_object = fake_put_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.http_proxies = ['http://proxy']
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'etag': '"etagvalue"', 'x-timestamp': '1.2'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 1)
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': '"etagvalue"'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
exc = []
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(Exception('test exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test exception')
exc = []
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'x-timestamp': '1.2', 'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=401)
sync.direct_get_object = fake_direct_get_object
sync.put_object = fake_put_object
cs.logger = FakeLogger()
# Fail due to 401
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
self.assert_(re.match('Unauth ',
cs.logger.log_dict['info'][0][0][0]))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
cs.logger = FakeLogger()
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
self.assert_(re.match('Not found ',
cs.logger.log_dict['info'][0][0][0]))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {
'account': 'a',
'container': 'c'}, realm, realm_key))
self.assertEquals(cs.container_puts, 2)
self.assertTrue(
cs.logger.log_dict['exception'][0][0][0].startswith(
'ERROR Syncing '))
finally:
sync.uuid = orig_uuid
sync.shuffle = orig_shuffle
sync.put_object = orig_put_object
sync.direct_get_object = orig_direct_get_object
def test_select_http_proxy_None(self):
cs = sync.ContainerSync(
{'sync_proxy': ''}, container_ring=FakeRing(),
object_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), None)
def test_select_http_proxy_one(self):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one'}, container_ring=FakeRing(),
object_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), 'http://one')
def test_select_http_proxy_multiple(self):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one,http://two,http://three'},
container_ring=FakeRing(),
object_ring=FakeRing())
self.assertEqual(
set(cs.http_proxies),
set(['http://one', 'http://two', 'http://three']))
if __name__ == '__main__':
unittest.main()
|
|
import numpy
import six
import chainer
import chainerx
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function_node.FunctionNode):
"""Long short-term memory unit with forget gate.
It has two inputs (c, x) and two outputs (c, h), where c indicates the cell
state. x must have four times channels compared to the number of units.
"""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('c', 'x'))
c_type, x_type = in_types
type_check.expect(
c_type.dtype.kind == 'f',
x_type.dtype == c_type.dtype,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] <= c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in six.moves.range(2, type_check.eval(c_type.ndim)):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward_chainerx(self, inputs):
c, x = inputs
c_next, h = chainerx.lstm(c, x)
return c_next, h
def forward(self, inputs):
self.retain_inputs((0, 1))
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
batch = len(x)
if isinstance(x, chainer.get_cpu_array_types()):
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
else:
xp = numpy
a = xp.tanh(a)
i = _sigmoid(i, xp)
f = _sigmoid(f, xp)
o = _sigmoid(o, xp)
c_next = numpy.empty_like(c_prev)
c_next[:batch] = a * i + f * c_prev[:batch]
h = o * xp.tanh(c_next[:batch])
else:
c_next = cuda.cupy.empty_like(c_prev)
h = cuda.cupy.empty_like(c_next[:batch])
cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(
c_prev[:batch], a, i, f, o, c_next[:batch], h)
c_next[batch:] = c_prev[batch:]
self.retain_outputs((0,))
return c_next, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return LSTMGrad()(*grad_inputs)
class LSTMGrad(function.Function):
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
c_prev, x, c_next, gc, gh = inputs
batch = len(x)
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
# Consider the case that either gradient is not given
if gc is None:
gc_update = 0
gc_rest = 0
else:
gc_update = gc[:batch]
gc_rest = gc[batch:]
if gh is None:
gh = 0
a, i, f, o = _extract_gates(x)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
tanh_a = xp.tanh(a)
sig_i = _sigmoid(i, xp)
sig_f = _sigmoid(f, xp)
sig_o = _sigmoid(o, xp)
co = xp.tanh(c_next[:batch])
gc_prev = numpy.empty_like(c_prev)
# multiply f later
gc_prev[:batch] = gh * sig_o * _grad_tanh(co) + gc_update
gc = gc_prev[:batch]
ga[:] = gc * sig_i * _grad_tanh(tanh_a)
gi[:] = gc * tanh_a * _grad_sigmoid(sig_i)
gf[:] = gc * c_prev[:batch] * _grad_sigmoid(sig_f)
go[:] = gh * co * _grad_sigmoid(sig_o)
gc_prev[:batch] *= sig_f # multiply f here
gc_prev[batch:] = gc_rest
else:
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev[:batch], c_next[:batch], gc_update, gh, a, i, f, o,
gc_prev[:batch], ga, gi, gf, go)
gc_prev[batch:] = gc_rest
return gc_prev, gx
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
c_prev, x, c, gc, gh = inputs
ggc_prev, ggx = grads
batch = len(x)
gc_is_none = gc is None
gh_is_none = gh is None
ggc_prev_is_none = ggc_prev is None
ggx_is_none = ggx is None
if gc_is_none:
gc = 0
if gh_is_none:
gh = 0
if ggc_prev_is_none:
ggc_prev = 0
if ggx_is_none:
ggx = 0
gc_prev = xp.empty_like(c_prev)
gx = xp.empty_like(x)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(c_prev)
ggh = xp.empty_like(c[:batch])
gc_prev[batch:] = 0
gc_next[batch:] = 0
ggc[batch:] = 0 if ggc_prev_is_none else ggc_prev[batch:]
ggh[batch:] = 0
c_prev = c_prev[:batch]
c = c[:batch]
if not gc_is_none:
gc = gc[:batch]
if not ggc_prev_is_none:
ggc_prev = ggc_prev[:batch]
if not ggx_is_none:
ggx = ggx[:batch]
a, i, f, o = _extract_gates(x)
if not ggx_is_none:
gga, ggi, ggf, ggo = _extract_gates(ggx)
else:
gga = 0
ggi = 0
ggf = 0
ggo = 0
ga, gi, gf, go = _extract_gates(gx)
lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev[:batch], ga[:], gi[:], gf[:], go[:], gc_next[:batch],
ggc[:batch], ggh[:batch])
if gc_is_none:
ggc = None
if gh_is_none:
ggh = None
return gc_prev, gx, gc_next, ggc, ggh
@cuda.fuse()
def lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev, ga, gi, gf, go, gc_next, ggc, ggh):
xp = backend.get_array_module(a)
sig_o = _sigmoid(o, xp)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i = _sigmoid(i, xp)
gsig_i = _grad_sigmoid(sig_i)
ggsig_i = _grad_grad_sigmoid(sig_i)
sig_f = _sigmoid(f, xp)
gsig_f = _grad_sigmoid(sig_f)
ggsig_f = _grad_grad_sigmoid(sig_f)
tanh_a = xp.tanh(a)
gtanh_a = _grad_tanh(tanh_a)
ggtanh_a = _grad_grad_tanh(tanh_a, gtanh_a)
tanh_c = xp.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev[:] = ggf * gc_bar * gsig_f
ga[:] = (gga * sig_i * ggtanh_a + ggi * gtanh_a * gsig_i) * gc_bar
gi[:] = (gga * gtanh_a * gsig_i + ggi * tanh_a * ggsig_i) * gc_bar
gf[:] = (ggc_prev * (gh * sig_o * gtanh_c + gc) * gsig_f +
ggf * gc_bar * c_prev * ggsig_f)
ggc[:] = (ggc_prev * sig_f +
gga * sig_i * gtanh_a +
ggi * tanh_a * gsig_i +
ggf * c_prev * gsig_f)
dgc_do = gh * gsig_o * gtanh_c
go[:] = ggc * dgc_do + ggo * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next[:] = ggc * dgc_dc + ggo * gh * gtanh_c * gsig_o
ggh[:] = ggc * sig_o * gtanh_c + ggo * tanh_c * gsig_o
return gc_prev, ga, gi, gf, go, gc_next, ggc, ggh
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as:
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. seealso::
:class:`~chainer.links.LSTM`
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> n_units = 100
>>> y = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> h = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> model = chainer.Chain()
>>> with model.init_scope():
... model.w = L.Linear(n_units, 4 * n_units)
... model.v = L.Linear(n_units, 4 * n_units)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
.. note::
We use the naming rule below.
- incoming signal
The formal input of the formulation of LSTM (e.g. in NLP, word
vector or output of lower RNN layer). The input of
:class:`chainer.links.LSTM` is the *incoming signal*.
- input array
The array which is linear transformed from *incoming signal* and
the previous outgoing signal. The *input array* contains four
sources, the sources of cell input, input gate, forget gate and
output gate. The input of
:class:`chainer.functions.activation.lstm.LSTM` is the
*input array*.
"""
return LSTM().apply((c_prev, x))
|
|
from django.db import models
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete, pre_save,\
m2m_changed
from django.db.models import Q
from django.conf import settings
import os
from django.utils.html import strip_tags
from bioshareX.utils import test_path, paths_contain, path_contains
from jsonfield import JSONField
import datetime
from guardian.shortcuts import get_users_with_perms, get_objects_for_group
from django.core.urlresolvers import reverse
from guardian.models import UserObjectPermissionBase, GroupObjectPermissionBase
import subprocess
from django.utils import timezone
from django.contrib.postgres.fields.array import ArrayField
import re
def pkgen():
import string, random
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(15))
class ShareStats(models.Model):
share = models.OneToOneField('Share',unique=True,related_name='stats')
num_files = models.IntegerField(default=0)
bytes = models.BigIntegerField(default=0)
updated = models.DateTimeField(null=True)
def hr_size(self):
from utils import sizeof_fmt
return sizeof_fmt(self.bytes)
def update_stats(self):
from utils import get_share_stats
from django.utils import timezone
# if self.updated is None:
stats = get_share_stats(self.share)
# self.num_files = stats['files']
self.bytes = stats['size']
self.updated = timezone.now()
self.save()
class Filesystem(models.Model):
TYPE_STANDARD = 'STANDARD'
TYPE_ZFS = 'ZFS'
TYPES = ((TYPE_STANDARD,'Standard'),(TYPE_ZFS,'ZFS'))
name = models.CharField(max_length=50)
description = models.TextField()
path = models.CharField(max_length=200)
users = models.ManyToManyField(User, related_name='filesystems')
type = models.CharField(max_length=20,choices=TYPES,default=TYPE_STANDARD)
def __unicode__(self):
return '%s: %s' %(self.name, self.path)
class FilePath(models.Model):
path = models.CharField(max_length=200)
name = models.CharField(max_length=50,null=True,blank=True)
description = models.TextField(null=True,blank=True)
regexes = ArrayField(models.CharField(max_length=200), blank=False)
users = models.ManyToManyField(User, related_name='file_paths', blank=True)
show_path = models.BooleanField(default=False)
def is_valid(self, path):
if not path_contains(self.path, path):
return False
if not self.regexes or len(self.regexes) == 0:
return True
for regex in self.regexes:
if re.match(regex, path):
return True
return False
def __unicode__(self):
return '%s: %s' %(self.name, self.path) if self.name else self.path
class Share(models.Model):
id = models.CharField(max_length=15,primary_key=True,default=pkgen)
slug = models.SlugField(max_length=50,blank=True,null=True)
parent = models.ForeignKey('self',null=True,blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now_add=True,null=True,blank=True)
owner = models.ForeignKey(User, on_delete=models.PROTECT)
name = models.CharField(max_length=125)
secure = models.BooleanField(default=True)
read_only = models.BooleanField(default=False)
notes = models.TextField(null=True,blank=True)
tags = models.ManyToManyField('Tag')
link_to_path = models.CharField(max_length=200,blank=True,null=True)
filepath = models.ForeignKey(FilePath,blank=True,null=True)
sub_directory = models.CharField(max_length=200,blank=True,null=True)
real_path = models.CharField(max_length=200,blank=True,null=True)
filesystem = models.ForeignKey(Filesystem, on_delete=models.PROTECT)
path_exists = models.BooleanField(default=True)
last_data_access = models.DateTimeField(null=True)
PERMISSION_VIEW = 'view_share_files'
PERMISSION_DELETE = 'delete_share_files'
PERMISSION_DOWNLOAD = 'download_share_files'
PERMISSION_WRITE = 'write_to_share'
PERMISSION_LINK_TO_PATH = 'link_to_path'
PERMISSION_ADMIN = 'admin'
def __unicode__(self):
return self.name
@property
def slug_or_id(self):
return self.slug if self.slug else self.id
@staticmethod
def get_by_slug_or_id(slug_or_id):
return Share.objects.get(Q(id=slug_or_id)|Q(slug=slug_or_id))
def update_last_modified(self,commit=True):
self.updated = timezone.now()
if commit:
self.save()
def get_url(self,subpath=None):
if subpath:
return reverse('list_directory',kwargs={'share':self.slug_or_id,'subpath':subpath})
return reverse('list_directory',kwargs={'share':self.slug_or_id})
def get_stats(self):
stats = ShareStats.objects.get_or_create(share=self)[0]
stats.update_stats()
return stats
@staticmethod
def user_queryset(user,include_stats=True):
from guardian.shortcuts import get_objects_for_user
shares = get_objects_for_user(user, 'bioshareX.view_share_files')
# query = Q(id__in=[s.id for s in shares])|Q(owner=user) if user.is_authenticated() else Q(id__in=[s.id for s in shares])
query = Q(id__in=shares)|Q(owner=user) if user.is_authenticated() else Q(id__in=shares)
if include_stats:
return Share.objects.select_related('stats').filter(query)
else:
return Share.objects.filter(query)
#Get a list of users with ANY permission. Useful for getting lists of emails, etc.
def get_users_with_permissions(self):
return list(
set(
[uop.user for uop in ShareUserObjectPermission.objects.filter(content_object=self).select_related('user')] +
list(User.objects.filter(groups__in=ShareGroupObjectPermission.objects.filter(content_object=self).values_list('group_id',flat=True)))
)
)
def get_permissions(self,user_specific=False):
from guardian.shortcuts import get_groups_with_perms
user_perms = self.get_all_user_permissions(user_specific=user_specific)
groups = get_groups_with_perms(self,attach_perms=True)
group_perms = [{'group':{'name':group.name,'id':group.id},'permissions':permissions} for group, permissions in groups.iteritems()]
return {'user_perms':user_perms,'group_perms':group_perms}
def get_user_permissions(self,user,user_specific=False):
if user_specific:
from utils import fetchall
perms = [uop.permission.codename for uop in ShareUserObjectPermission.objects.filter(user=user,content_object=self).select_related('permission')]
else:
from guardian.shortcuts import get_perms
if user.username == self.owner.username:
perms = [perm[0] for perm in self._meta.permissions]
else:
perms = get_perms(user, self)
if not self.secure and not user_specific:
perms = list(set(perms+['view_share_files','download_share_files']))
if self.read_only:
if 'write_to_share' in perms:
perms.remove('write_to_share')
if 'delete_share_files' in perms:
perms.remove('delete_share_files')
return perms
def get_all_user_permissions(self,user_specific=False):
if not user_specific:
from guardian.shortcuts import get_users_with_perms
users = get_users_with_perms(self,attach_perms=True, with_group_users=False)
print 'users'
print users
user_perms = [{'user':{'username':user.username, 'email':user.email, 'first_name':user.first_name, 'last_name':user.last_name},'permissions':permissions} for user, permissions in users.iteritems()]
else:
perms = ShareUserObjectPermission.objects.filter(content_object=self).select_related('permission','user')
user_perms={}
for perm in perms:
if not user_perms.has_key(perm.user.username):
user_perms[perm.user.username]={'user':{'username':perm.user.username},'permissions':[]}
user_perms[perm.user.username]['permissions'].append(perm.permission.codename)
return user_perms
def get_path(self):
return os.path.join(self.filesystem.path,self.id)
def get_link_path(self, add_trailing_slash=True):
if self.link_to_path and add_trailing_slash:
return os.path.join(self.link_to_path, '')
return None
def get_zfs_path(self):
if not getattr(settings,'ZFS_BASE',False) or not self.filesystem.type == Filesystem.TYPE_ZFS:
return None
return os.path.join(settings.ZFS_BASE,self.id)
def get_realpath(self):
return os.path.realpath(self.get_path())
def check_path(self,subdir=None):
path = self.get_path()
if subdir:
path = os.path.join(path,subdir)
return os.path.exists(path)
def get_removed_path(self):
return os.path.join(settings.REMOVED_FILES_ROOT,self.id)
def get_path_type(self,subpath):
full_path = os.path.join(self.get_path(),subpath)
if os.path.isfile(full_path):
return 'file'
elif os.path.isdir(full_path):
return 'directory'
else:
return None
def create_folder(self,name,subdir=None):
os.umask(settings.UMASK)
path = self.get_path() if subdir is None else os.path.join(self.get_path(),subdir)
if os.path.exists(path):
folder_path = os.path.join(path,name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path
def delete_path(self,subpath):
import shutil
if subpath is None or subpath == '' or subpath.count('..') != 0:
return False
path = os.path.join(self.get_path(),subpath)
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
return False
def add_tags(self,tags,save=True):
tag_list = []
for tag in tags:
tag = tag.strip()
if len(tag) > 2 :
tag_list.append(Tag.objects.get_or_create(name=tag)[0])
self.tags = tag_list
if save:
self.save()
def set_tags(self,tags,save=True):
print tags
self.tags.clear()
self.add_tags(tags,save)
def move_path(self,item_subpath,destination_subpath=''):
os.umask(settings.UMASK)
import shutil
if destination_subpath.count('..') != 0:
return False
destination_path = os.path.join(self.get_path(),destination_subpath)
item_path = os.path.join(self.get_path(),item_subpath)
if os.path.exists(destination_path):
shutil.move(item_path,destination_path)
return True
else:
return False
def move_share(self,filesystem):
os.umask(settings.UMASK)
import shutil
new_path = os.path.join(filesystem.path, self.id)
if self.link_to_path and os.path.islink(self.get_path()):
self.check_link_path()
self.unlink()
os.symlink(self.link_to_path,new_path)
else:
shutil.move(self.get_path(),new_path)
self.filesystem = filesystem
def check_link_path(self):
if self.parent:
if not path_contains(self.parent.get_path(), self.link_to_path,real_path=False):
raise Exception('Subshare must be under the real share.')
elif self.link_to_path:
test_path(self.link_to_path,allow_absolute=True)
if not paths_contain(settings.LINK_TO_DIRECTORIES,self.link_to_path):
raise Exception('Path not allowed.')
def create_link(self):
os.umask(settings.UMASK)
self.check_link_path()
if self.link_to_path:
os.symlink(self.link_to_path,self.get_path())
def unlink(self):
path = self.get_path()
if os.path.islink(path):
os.unlink(path)
def create_archive_stream(self,items,subdir=None):
import zipstream
from django.http.response import StreamingHttpResponse
from settings.settings import ZIPFILE_SIZE_LIMIT_BYTES
from utils import zipdir, get_total_size
from os.path import isfile, isdir
path = self.get_path() if subdir is None else os.path.join(self.get_path(),subdir)
if not os.path.exists(path):
raise Exception('Invalid subdirectory provided')
share_path = self.get_path()
z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED)
# total_size = get_total_size([os.path.join(path,item) for item in items])
# if total_size > ZIPFILE_SIZE_LIMIT_BYTES:
# raise Exception("%d bytes is above bioshare's limit for creating zipfiles, please use rsync or wget instead" % (total_size))
for item in items:
item_path = os.path.join(path,item)
if not os.path.exists(item_path):
raise Exception("File or folder: '%s' does not exist" % (item))
if isfile(item_path):
item_name = item#os.path.join(self.id,item)
z.write(item_path,arcname=item_name)
elif isdir(item_path):
zipdir(share_path,item_path,z)
from datetime import datetime
zip_name = 'archive_'+datetime.now().strftime('%Y_%m_%d__%H_%M_%S')+'.zip'
response = StreamingHttpResponse(z, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename={}'.format(zip_name)
return response
Share._meta.permissions = (
(Share.PERMISSION_VIEW, 'View share files'),
(Share.PERMISSION_DELETE, 'Delete share files'),
(Share.PERMISSION_DOWNLOAD, 'Download share files'),
(Share.PERMISSION_WRITE, 'Write to share'),
(Share.PERMISSION_LINK_TO_PATH, 'Link to a specific path'),
(Share.PERMISSION_ADMIN, 'Administer'),
)
def share_post_save(sender, **kwargs):
if kwargs['created'] and not kwargs.get('raw',False):
os.umask(settings.UMASK)
instance = kwargs['instance']
path = instance.get_path()
import pwd, grp
if not os.path.exists(path):
if instance.link_to_path:
instance.create_link()
else:
from settings.settings import FILES_GROUP, FILES_OWNER
if instance.get_zfs_path():
subprocess.check_call(['zfs','create',instance.get_zfs_path()])
else:
os.makedirs(path)
uid = pwd.getpwnam(FILES_OWNER).pw_uid
gid = grp.getgrnam(FILES_GROUP).gr_gid
if not instance.real_path:
instance.real_path = os.path.realpath(instance.get_path())
instance.save()
post_save.connect(share_post_save, sender=Share)
@receiver(pre_save, sender=Share)
def share_pre_save(sender, instance, **kwargs):
try:
old_share = Share.objects.get(pk=instance.pk)
if instance.filesystem.pk != old_share.filesystem.pk:
old_share.move_share(instance.filesystem)
elif instance.link_to_path and instance.link_to_path != old_share.link_to_path:
old_share.unlink()
instance.create_link()
except Share.DoesNotExist, e:
pass
def share_post_delete(sender, instance, **kwargs):
path = instance.get_path()
import shutil
if os.path.islink(path):
instance.unlink()
elif instance.get_zfs_path():
subprocess.check_call(['zfs','destroy',instance.get_zfs_path()])
else:
if os.path.isdir(path):
shutil.rmtree(path)
post_delete.connect(share_post_delete, sender=Share)
class Tag(models.Model):
name = models.CharField(blank=False,null=False,max_length=30,primary_key=True)
def __unicode__(self):
return self.name
def to_html(self):
return '<span class="tag">%s</span>'%self.name
def clean(self):
self.name = strip_tags(self.name)
class MetaData(models.Model):
share = models.ForeignKey(Share)
subpath = models.CharField(max_length=250,null=True,blank=True)
notes = models.TextField(blank=True,null=True)
tags = models.ManyToManyField(Tag)
class Meta:
unique_together = (("share", "subpath"),)
@staticmethod
def get_or_none(share,subpath):
try:
return MetaData.objects.get(share=share,subpath=subpath)
except MetaData.DoesNotExist:
return None
def to_dict(self):
return {'tags':self.tags.all(),'notes':self.notes}
def json(self):
return {'tags':[tag.name for tag in self.tags.all()],'notes':self.notes}
class SSHKey(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=50)
key = models.TextField(blank=False,null=False)
def create_authorized_key(self):
key = self.get_key()
return 'command="%s %s/manage.py rsync %s" ssh-rsa %s %s' % (settings.PYTHON_BIN, settings.CURRENT_DIR, self.user.username, key, self.user.username)
def get_key(self):
return self.extract_key(self.key)
@staticmethod
def extract_key(full_key):
import re
match = re.match('ssh-rsa (?P<key>[A-Za-z0-9\+\/=]{300,}) .*', full_key)
if match is None:
raise Exception('Unable to parse key')
matches = match.groupdict()
return matches['key']
class ShareLog(models.Model):
ACTION_FILE_ADDED = 'File Added'
ACTION_FOLDER_CREATED = 'Folder Created'
ACTION_DELETED = 'File(s)/Folder(s) Deleted'
ACTION_MOVED = 'File(s)/Folder(s) Moved'
ACTION_RENAMED = 'File/Folder Renamed'
ACTION_RSYNC = 'Files rsynced'
ACTION_PERMISSIONS_UPDATED = 'Permissions updated'
share = models.ForeignKey(Share, related_name="logs")
user = models.ForeignKey(User, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=30,null=True,blank=True)
text = models.TextField(null=True,blank=True)
paths = JSONField()
@staticmethod
def create(share,action,user=None,text='',paths=[],subdir=None,share_updated=True):
if subdir:
paths = [os.path.join(subdir,path) for path in paths]
log = ShareLog.objects.create(share=share,user=user,action=action,text=text,paths=paths)
if share_updated:
share.update_last_modified()
return log
class Message(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=50)
description = models.TextField(null=True,blank=True)
active = models.BooleanField(default=True)
expires = models.DateField(null=True,blank=True)
viewed_by = models.ManyToManyField(User)
def __unicode__(self):
return self.title
class GroupProfile(models.Model):
group = models.OneToOneField(Group,related_name='profile')
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User,on_delete=models.PROTECT)
description = models.TextField(blank=True,null=True)
"""
Make permissions more efficient to check by having a direct foreign key:
http://django-guardian.readthedocs.io/en/stable/userguide/performance.html#direct-foreign-keys
"""
class ShareUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Share,related_name='user_permissions')
class ShareGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Share,related_name='group_permissions')
def group_shares(self):
return Share.objects.filter(group_permissions__group=self)
Group.shares = property(group_shares)
def user_permission_codes(self):
return [p.codename for p in self.user_permissions.all()]
User.permissions = user_permission_codes
Group._meta.permissions += (('manage_group', 'Manage group'),)
User._meta.ordering = ['username']
def lowercase_user(sender, instance, **kwargs):
if instance.username != instance.username.lower() or instance.email != instance.email.lower():
User.objects.filter(id=instance.id).update(username=instance.username.lower(),email=instance.email.lower())
post_save.connect(lowercase_user, sender=User)
|
|
import ctypes
import json
import pickle
import random
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import mock
from django.contrib.gis import gdal
from django.contrib.gis.geos import (
GeometryCollection, GEOSException, GEOSGeometry, LinearRing, LineString,
MultiLineString, MultiPoint, MultiPolygon, Point, Polygon, fromfile,
fromstr,
)
from django.contrib.gis.geos.libgeos import geos_version_tuple
from django.contrib.gis.shortcuts import numpy
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from django.utils.encoding import force_bytes
from ..test_data import TestDataMixin
class GEOSTest(SimpleTestCase, TestDataMixin):
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz:
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertIs(GEOSGeometry(hexewkb_3d).hasz, True)
# Same for EWKB.
self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
with self.assertRaises(GEOSException):
GEOSGeometry(memoryview(b'0'))
class NotAGeometry:
pass
# Some other object
with self.assertRaises(TypeError):
GEOSGeometry(NotAGeometry())
# None
with self.assertRaises(TypeError):
GEOSGeometry(None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt, 4326), GEOSGeometry(geom.json))
def test_json_srid(self):
geojson_data = {
"type": "Point",
"coordinates": [2, 49],
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:EPSG::4322"
}
}
}
self.assertEqual(GEOSGeometry(json.dumps(geojson_data)), Point(2, 49, srid=4322))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
self.assertEqual(p, 'POINT(5.0 23.0)')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_eq_with_srid(self):
"Testing non-equivalence with different srids."
p0 = Point(5, 23)
p1 = Point(5, 23, srid=4326)
p2 = Point(5, 23, srid=32632)
# GEOS
self.assertNotEqual(p0, p1)
self.assertNotEqual(p1, p2)
# EWKT
self.assertNotEqual(p0, p1.ewkt)
self.assertNotEqual(p1, p0.ewkt)
self.assertNotEqual(p1, p2.ewkt)
# Equivalence with matching SRIDs
self.assertEqual(p2, p2)
self.assertEqual(p2, p2.ewkt)
# WKT contains no SRID so will not equal
self.assertNotEqual(p2, p2.wkt)
# SRID of 0
self.assertEqual(p0, 'SRID=0;POINT (5 23)')
self.assertNotEqual(p1, 'SRID=0;POINT (5 23)')
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(pnt.dims, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertIs(pnt.hasz, True)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertIs(pnt.hasz, False)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertEqual(mpnt.dims, 0)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
with self.assertRaises(IndexError):
mpnt.__getitem__(len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertIs(p.empty, False)
self.assertIs(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.dims, 1)
self.assertIs(ls.empty, False)
self.assertIs(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
with self.assertRaises(IndexError):
ls.__getitem__(len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
with self.assertRaisesMessage(TypeError, 'Each coordinate should be a sequence (list or tuple)'):
LineString((0, 0))
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString(numpy.array([(0, 0)]))
with mock.patch('django.contrib.gis.geos.linestring.numpy', False):
with self.assertRaisesMessage(TypeError, 'Invalid initialization input for LineStrings.'):
LineString('wrong input')
# Test __iter__().
self.assertEqual(list(LineString((0, 0), (1, 1), (2, 2))), [(0, 0), (1, 1), (2, 2)])
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertEqual(ml.dims, 1)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertIs(ls.empty, False)
with self.assertRaises(IndexError):
ml.__getitem__(len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(lr.dims, 1)
self.assertEqual(rr.n_p, len(lr))
self.assertIs(lr.valid, True)
self.assertIs(lr.empty, False)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 3.'):
LinearRing((0, 0), (1, 1), (0, 0))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing(numpy.array([(0, 0)]))
def test_linearring_json(self):
self.assertJSONEqual(
LinearRing((0, 0), (0, 1), (1, 1), (0, 0)).json,
'{"coordinates": [[0, 0], [0, 1], [1, 1], [0, 0]], "type": "LineString"}',
)
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.dims, 2)
self.assertIs(poly.empty, False)
self.assertIs(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
with self.assertRaises(IndexError):
poly.__getitem__(len(poly))
with self.assertRaises(IndexError):
poly.__setitem__(len(poly), False)
with self.assertRaises(IndexError):
poly.__getitem__(-1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
with self.assertRaises(TypeError):
Polygon(0, [1, 2, 3])
with self.assertRaises(TypeError):
Polygon('foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mpoly.dims, 2)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
with self.assertRaises(IndexError):
mpoly.__getitem__(len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertIs(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
with self.assertRaises(GEOSException):
g.relate_pattern(0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertIs(a.intersects(b), True)
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_unary_union(self):
"Testing unary_union."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = GeometryCollection(a, b).unary_union
self.assertTrue(u1.equals(u2))
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
with self.assertRaises(ctypes.ArgumentError):
g.buffer(width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_covers(self):
poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
self.assertTrue(poly.covers(Point(5, 5)))
self.assertFalse(poly.covers(Point(100, 100)))
def test_closed(self):
ls_closed = LineString((0, 0), (1, 1), (0, 0))
ls_not_closed = LineString((0, 0), (1, 1))
self.assertFalse(ls_not_closed.closed)
self.assertTrue(ls_closed.closed)
if geos_version_tuple() >= (3, 5):
self.assertFalse(MultiLineString(ls_closed, ls_not_closed).closed)
self.assertTrue(MultiLineString(ls_closed, ls_closed).closed)
with mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: b'3.4.9'):
with self.assertRaisesMessage(GEOSException, "MultiLineString.closed requires GEOS >= 3.5.0."):
MultiLineString().closed
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
with self.assertRaises(ctypes.ArgumentError):
pnt.srid = '4326'
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
# Testing that geometry SRID could be set to its own value
pnt_wo_srid = Point(1, 1)
pnt_wo_srid.srid = pnt_wo_srid.srid
# Input geometries that have an SRID.
self.assertEqual(GEOSGeometry(pnt.ewkt, srid=pnt.srid).srid, pnt.srid)
self.assertEqual(GEOSGeometry(pnt.ewkb, srid=pnt.srid).srid, pnt.srid)
with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid):
GEOSGeometry(pnt.ewkt, srid=1)
with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid):
GEOSGeometry(pnt.ewkb, srid=1)
def test_custom_srid(self):
"""Test with a null srid and a srid unknown to GDAL."""
for srid in [None, 999999]:
pnt = Point(111200, 220900, srid=srid)
self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
with self.assertRaises(TypeError):
poly.__setitem__(0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_point_list_assignment(self):
p = Point(0, 0)
p[:] = (1, 2, 3)
self.assertEqual(p, Point(1, 2, 3))
p[:] = ()
self.assertEqual(p.wkt, Point())
p[:] = (1, 2)
self.assertEqual(p.wkt, Point(1, 2))
with self.assertRaises(ValueError):
p[:] = (1,)
with self.assertRaises(ValueError):
p[:] = (1, 2, 3, 4, 5)
def test_linestring_list_assignment(self):
ls = LineString((0, 0), (1, 1))
ls[:] = ()
self.assertEqual(ls, LineString())
ls[:] = ((0, 0), (1, 1), (2, 2))
self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2)))
with self.assertRaises(ValueError):
ls[:] = (1,)
def test_linearring_list_assignment(self):
ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
ls[:] = ()
self.assertEqual(ls, LinearRing())
ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
with self.assertRaises(ValueError):
ls[:] = ((0, 0), (1, 1), (2, 2))
def test_polygon_list_assignment(self):
pol = Polygon()
pol[:] = (((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),)
self.assertEqual(pol, Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),))
pol[:] = ()
self.assertEqual(pol, Polygon())
def test_geometry_collection_list_assignment(self):
p = Point()
gc = GeometryCollection()
gc[:] = [p]
self.assertEqual(gc, GeometryCollection(p))
gc[:] = ()
self.assertEqual(gc, GeometryCollection())
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
with self.assertRaises(TypeError):
pnt.tuple = (1., 2.)
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
with self.assertRaises(TypeError):
ls.__setitem__(0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
geoms = [
GeometryCollection([]),
fromstr('GEOMETRYCOLLECTION EMPTY'),
GeometryCollection(),
fromstr('POINT EMPTY'),
Point(),
fromstr('LINESTRING EMPTY'),
LineString(),
fromstr('POLYGON EMPTY'),
Polygon(),
fromstr('MULTILINESTRING EMPTY'),
MultiLineString(),
fromstr('MULTIPOLYGON EMPTY'),
MultiPolygon(()),
MultiPolygon(),
]
if numpy:
geoms.append(LineString(numpy.array([])))
for g in geoms:
self.assertIs(g.empty, True)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
with self.assertRaises(IndexError):
g.x
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertIs(lr.empty, True)
with self.assertRaises(IndexError):
lr.__getitem__(0)
else:
with self.assertRaises(IndexError):
g.__getitem__(0)
def test_collection_dims(self):
gc = GeometryCollection([])
self.assertEqual(gc.dims, -1)
gc = GeometryCollection(Point(0, 0))
self.assertEqual(gc.dims, 0)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0))
self.assertEqual(gc.dims, 1)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0))
self.assertEqual(gc.dims, 2)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1 = pickle.dumps(geom)
g1 = pickle.loads(s1)
self.assertEqual(geom, g1)
self.assertEqual(geom.srid, g1.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
for pnt in pnts:
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(mpoly.covers(pnt), prep.covers(pnt))
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, str)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, str)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_deconstructible(self):
"""
Geometry classes should be deconstructible.
"""
point = Point(4.337844, 50.827537, srid=4326)
path, args, kwargs = point.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.point.Point')
self.assertEqual(args, (4.337844, 50.827537))
self.assertEqual(kwargs, {'srid': 4326})
ls = LineString(((0, 0), (1, 1)))
path, args, kwargs = ls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, (((0, 0), (1, 1)),))
self.assertEqual(kwargs, {})
ls2 = LineString([Point(0, 0), Point(1, 1)], srid=4326)
path, args, kwargs = ls2.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, ([Point(0, 0), Point(1, 1)],))
self.assertEqual(kwargs, {'srid': 4326})
ext_coords = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
int_coords = ((0.4, 0.4), (0.4, 0.6), (0.6, 0.6), (0.6, 0.4), (0.4, 0.4))
poly = Polygon(ext_coords, int_coords)
path, args, kwargs = poly.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.polygon.Polygon')
self.assertEqual(args, (ext_coords, int_coords))
self.assertEqual(kwargs, {})
lr = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
path, args, kwargs = lr.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LinearRing')
self.assertEqual(args, ((0, 0), (0, 1), (1, 1), (0, 0)))
self.assertEqual(kwargs, {})
mp = MultiPoint(Point(0, 0), Point(1, 1))
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPoint')
self.assertEqual(args, (Point(0, 0), Point(1, 1)))
self.assertEqual(kwargs, {})
ls1 = LineString((0, 0), (1, 1))
ls2 = LineString((2, 2), (3, 3))
mls = MultiLineString(ls1, ls2)
path, args, kwargs = mls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiLineString')
self.assertEqual(args, (ls1, ls2))
self.assertEqual(kwargs, {})
p1 = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
p2 = Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
mp = MultiPolygon(p1, p2)
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPolygon')
self.assertEqual(args, (p1, p2, ))
self.assertEqual(kwargs, {})
poly = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
gc = GeometryCollection(Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly)
path, args, kwargs = gc.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.GeometryCollection')
self.assertEqual(args, (Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly))
self.assertEqual(kwargs, {})
def test_subclassing(self):
"""
GEOSGeometry subclass may itself be subclassed without being forced-cast
to the parent class during `__init__`.
"""
class ExtendedPolygon(Polygon):
def __init__(self, *args, data=0, **kwargs):
super().__init__(*args, **kwargs)
self._data = data
def __str__(self):
return "EXT_POLYGON - data: %d - %s" % (self._data, self.wkt)
ext_poly = ExtendedPolygon(((0, 0), (0, 1), (1, 1), (0, 0)), data=3)
self.assertEqual(type(ext_poly), ExtendedPolygon)
# ExtendedPolygon.__str__ should be called (instead of Polygon.__str__).
self.assertEqual(str(ext_poly), "EXT_POLYGON - data: 3 - POLYGON ((0 0, 0 1, 1 1, 0 0))")
self.assertJSONEqual(
ext_poly.json,
'{"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], "type": "Polygon"}',
)
def test_geos_version_tuple(self):
versions = (
(b'3.0.0rc4-CAPI-1.3.3', (3, 0, 0)),
(b'3.0.0-CAPI-1.4.1', (3, 0, 0)),
(b'3.4.0dev-CAPI-1.8.0', (3, 4, 0)),
(b'3.4.0dev-CAPI-1.8.0 r0', (3, 4, 0)),
(b'3.6.2-CAPI-1.10.2 4d2925d6', (3, 6, 2)),
)
for version_string, version_tuple in versions:
with self.subTest(version_string=version_string):
with mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: version_string):
self.assertEqual(geos_version_tuple(), version_tuple)
def test_from_gml(self):
self.assertEqual(
GEOSGeometry('POINT(0 0)'),
GEOSGeometry.from_gml(
'<gml:Point gml:id="p21" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">'
' <gml:pos srsDimension="2">0 0</gml:pos>'
'</gml:Point>'
),
)
def test_from_ewkt(self):
self.assertEqual(GEOSGeometry.from_ewkt('SRID=1;POINT(1 1)'), Point(1, 1, srid=1))
self.assertEqual(GEOSGeometry.from_ewkt('POINT(1 1)'), Point(1, 1))
def test_from_ewkt_empty_string(self):
msg = 'Expected WKT but got an empty string.'
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('')
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRID=1;')
def test_from_ewkt_invalid_srid(self):
msg = 'EWKT has invalid SRID part.'
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRUD=1;POINT(1 1)')
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRID=WGS84;POINT(1 1)')
def test_normalize(self):
g = MultiPoint(Point(0, 0), Point(2, 2), Point(1, 1))
self.assertIsNone(g.normalize())
self.assertTrue(g.equals_exact(MultiPoint(Point(2, 2), Point(1, 1), Point(0, 0))))
def test_empty_point(self):
p = Point(srid=4326)
self.assertEqual(p.ogr.ewkt, p.ewkt)
self.assertEqual(p.transform(2774, clone=True), Point(srid=2774))
p.transform(2774)
self.assertEqual(p, Point(srid=2774))
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fluid Metrics
"""
from __future__ import print_function
import numpy as np
import copy
import warnings
import six
from .layer_helper import LayerHelper
from .initializer import Constant
from . import unique_name
from .framework import Program, Variable, program_guard
from . import layers
__all__ = [
'MetricBase',
'CompositeMetric',
'Precision',
'Recall',
'Accuracy',
'ChunkEvaluator',
'EditDistance',
'DetectionMAP',
'Auc',
]
def _is_numpy_(var):
return isinstance(var, (np.ndarray, np.generic))
def _is_number_(var):
return isinstance(var, int) or isinstance(var, np.int64) or isinstance(
var, float) or (isinstance(var, np.ndarray) and var.shape == (1, ))
def _is_number_or_matrix_(var):
return _is_number_(var) or isinstance(var, np.ndarray)
class MetricBase(object):
"""
Base Class for all Metrics.
MetricBase define a group of interfaces for the
model evaluation methods. Metrics accumulate metric states between
consecutive minibatches, at every minibatch, use update
interface to add current minibatch value to global states.
Use eval to compute accumative metric value from last reset()
or from scratch on.
If you need to custom a new metric, please inherit from MetricBase and
custom implementation.
Args:
name(str): The name of metric instance. such as, "accuracy".
It needed if you want to distinct different metrics in a model.
"""
def __init__(self, name):
self._name = str(name) if name != None else self.__class__.__name__
def __str__(self):
return self._name
def reset(self):
"""
reset clear the states of metrics. By default, the states
are the members who do not has _ prefix, reset set them to inital states.
If you violate the implicit name rule, please also custom the reset
interface.
"""
states = {
attr: value
for attr, value in six.iteritems(self.__dict__)
if not attr.startswith("_")
}
for attr, value in six.iteritems(states):
if isinstance(value, int):
setattr(self, attr, 0)
elif isinstance(value, float):
setattr(self, attr, .0)
elif isinstance(value, (np.ndarray, np.generic)):
setattr(self, attr, np.zeros_like(value))
else:
setattr(self, attr, None)
def get_config(self):
"""
Get the metric and current states.
The states are the members who do not has "_" prefix.
Args:
None
Returns:
dict: a dict of metric and states
"""
states = {
attr: value
for attr, value in six.iteritems(self.__dict__)
if not attr.startswith("_")
}
config = {}
config.update({"name": self._name, "states": copy.deepcopy(states)})
return config
def update(self, preds, labels):
"""
Updates the metric states at every minibatch.
One user can compute the minibatch metric via pure Python, or
via a c++ operator.
Args:
preds(numpy.array): the predictions of current minibatch
labels(numpy.array): the labels of current minibatch, if the label is one-hot
or soft-label, should custom the corresponding update rule.
"""
raise NotImplementedError(
"Should not use it directly, please extend it.")
def eval(self):
"""
Evalute the current metrics based the accumulated states.
Returns:
float|list(float)|numpy.array: the metrics via Python.
"""
raise NotImplementedError(
"Should not use it directly, please extend it.")
class CompositeMetric(MetricBase):
"""
Composite multiple metrics in one instance.
for example, merge F1, accuracy, recall into one Metric.
Examples:
.. code-block:: python
labels = fluid.layers.data(name="data", shape=[1], dtype="int32")
data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32")
pred = fluid.layers.fc(input=data, size=1000, act="tanh")
comp = fluid.metrics.CompositeMetric()
acc = fluid.metrics.Precision()
recall = fluid.metrics.Recall()
comp.add_metric(acc)
comp.add_metric(recall)
for pass in range(PASSES):
comp.reset()
for data in train_reader():
loss, preds, labels = exe.run(fetch_list=[cost, preds, labels])
comp.update(preds=preds, labels=labels)
numpy_acc, numpy_recall = comp.eval()
"""
def __init__(self, name=None):
super(CompositeMetric, self).__init__(name)
self._metrics = []
def add_metric(self, metric):
"""
add one metric instance to CompositeMetric.
Args:
metric: a instance of MetricBase.
"""
if not isinstance(metric, MetricBase):
raise ValueError("SubMetric should be inherit from MetricBase.")
self._metrics.append(metric)
def update(self, preds, labels):
"""
Update every metrics in sequence.
Args:
preds(numpy.array): the predictions of current minibatch
labels(numpy.array): the labels of current minibatch, if the label is one-hot
or soft-label, should custom the corresponding update rule.
"""
for m in self._metrics:
m.update(preds, labels)
def eval(self):
"""
Evaluate every metrics in sequence.
Returns:
list(float|numpy.array): a list of metrics value in Python.
"""
ans = []
for m in self._metrics:
ans.append(m.eval())
return ans
class Precision(MetricBase):
"""
Precision (also called positive predictive value) is the fraction of
relevant instances among the retrieved instances.
https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers
Note Precision is different with Accuracy in binary classifiers.
accuracy = true positive / total instances
precision = true positive / all positive instance
Examples:
.. code-block:: python
metric = fluid.metrics.Precision()
for pass in range(PASSES):
metric.reset()
for data in train_reader():
loss, preds, labels = exe.run(fetch_list=[cost, preds, labels])
metric.update(preds=preds, labels=labels)
numpy_precision = metric.eval()
"""
def __init__(self, name=None):
super(Precision, self).__init__(name)
self.tp = 0 # true positive
self.fp = 0 # false positive
def update(self, preds, labels):
if not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray.")
if not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray.")
sample_num = labels.shape[0]
preds = np.rint(preds).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if label == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
def eval(self):
ap = self.tp + self.fp
return float(self.tp) / ap if ap != 0 else .0
class Recall(MetricBase):
"""
Recall (also known as sensitivity) is the fraction of
relevant instances that have been retrieved over the
total amount of relevant instances
https://en.wikipedia.org/wiki/Precision_and_recall
Examples:
.. code-block:: python
metric = fluid.metrics.Recall()
for pass in range(PASSES):
metric.reset()
for data in train_reader():
loss, preds, labels = exe.run(fetch_list=[cost, preds, labels])
metric.update(preds=preds, labels=labels)
numpy_recall = metric.eval()
"""
def __init__(self, name=None):
super(Recall, self).__init__(name)
self.tp = 0 # true positive
self.fn = 0 # false negtive
def update(self, preds, labels):
if not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray.")
if not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray.")
sample_num = labels[0]
for i in range(sample_num):
pred = preds[i].astype("int32")
label = labels[i]
if label == 1:
if pred == label:
self.tp += 1
else:
if pred != label:
self.fn += 1
def eval(self):
recall = self.tp + self.fn
return float(self.tp) / recall if recall != 0 else .0
class Accuracy(MetricBase):
"""
Accumulate the accuracy from minibatches and compute the average accuracy
for every pass.
https://en.wikipedia.org/wiki/Accuracy_and_precision
Args:
name: the metrics name
Examples:
.. code-block:: python
labels = fluid.layers.data(name="data", shape=[1], dtype="int32")
data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32")
pred = fluid.layers.fc(input=data, size=1000, act="tanh")
minibatch_accuracy = fluid.layers.accuracy(pred, label)
accuracy_evaluator = fluid.metrics.Accuracy()
for pass in range(PASSES):
accuracy_evaluator.reset()
for data in train_reader():
batch_size = data[0]
loss = exe.run(fetch_list=[cost, minibatch_accuracy])
accuracy_evaluator.update(value=minibatch_accuracy, weight=batch_size)
numpy_acc = accuracy_evaluator.eval()
"""
def __init__(self, name=None):
super(Accuracy, self).__init__(name)
self.value = .0
self.weight = .0
def update(self, value, weight):
"""
Update minibatch states.
Args:
value(float|numpy.array): accuracy of one minibatch.
weight(int|float): batch size.
"""
if not _is_number_or_matrix_(value):
raise ValueError(
"The 'value' must be a number(int, float) or a numpy ndarray.")
if not _is_number_(weight):
raise ValueError("The 'weight' must be a number(int, float).")
self.value += value * weight
self.weight += weight
def eval(self):
if self.weight == 0:
raise ValueError("There is no data in Accuracy Metrics. \
Please check layers.accuracy output has added to Accuracy.")
return self.value / self.weight
class ChunkEvaluator(MetricBase):
"""
Accumulate counter numbers output by chunk_eval from mini-batches and
compute the precision recall and F1-score using the accumulated counter
numbers.
For some basics of chunking, please refer to
`Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
ChunkEvalEvaluator computes the precision, recall, and F1-score of chunk detection,
and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
Examples:
.. code-block:: python
labels = fluid.layers.data(name="data", shape=[1], dtype="int32")
data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32")
pred = fluid.layers.fc(input=data, size=1000, act="tanh")
precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval(
input=pred,
label=label)
metric = fluid.metrics.ChunkEvaluator()
for data in train_reader():
loss, preds, labels = exe.run(fetch_list=[cost, preds, labels])
metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks)
numpy_precision, numpy_recall, numpy_f1 = metric.eval()
"""
def __init__(self, name=None):
super(ChunkEvaluator, self).__init__(name)
self.num_infer_chunks = 0
self.num_label_chunks = 0
self.num_correct_chunks = 0
def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks):
"""
Update the states based on the layers.chunk_eval() ouputs.
Args:
num_infer_chunks(int|numpy.array): The number of chunks in Inference on the given minibatch.
num_label_chunks(int|numpy.array): The number of chunks in Label on the given mini-batch.
num_correct_chunks(int|float|numpy.array): The number of chunks both in Inference and Label on the
given mini-batch.
"""
if not _is_number_or_matrix_(num_infer_chunks):
raise ValueError(
"The 'num_infer_chunks' must be a number(int) or a numpy ndarray."
)
if not _is_number_or_matrix_(num_label_chunks):
raise ValueError(
"The 'num_label_chunks' must be a number(int, float) or a numpy ndarray."
)
if not _is_number_or_matrix_(num_correct_chunks):
raise ValueError(
"The 'num_correct_chunks' must be a number(int, float) or a numpy ndarray."
)
self.num_infer_chunks += num_infer_chunks
self.num_label_chunks += num_label_chunks
self.num_correct_chunks += num_correct_chunks
def eval(self):
precision = float(
self.num_correct_chunks
) / self.num_infer_chunks if self.num_infer_chunks else 0
recall = float(self.num_correct_chunks
) / self.num_label_chunks if self.num_label_chunks else 0
f1_score = float(2 * precision * recall) / (
precision + recall) if self.num_correct_chunks else 0
return precision, recall, f1_score
class EditDistance(MetricBase):
"""
Edit distance is a way of quantifying how dissimilar two strings
(e.g., words) are to one another by counting the minimum number
of operations required to transform one string into the other.
Refer to https://en.wikipedia.org/wiki/Edit_distance
Accumulate edit distance sum and sequence number from mini-batches and
compute the average edit_distance and instance error of all batches.
Args:
name: the metrics name
Examples:
.. code-block:: python
distances, seq_num = fluid.layers.edit_distance(input, label)
distance_evaluator = fluid.metrics.EditDistance()
for epoch in PASS_NUM:
distance_evaluator.reset()
for data in batches:
loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics))
distance_evaluator.update(distances, seq_num)
distance, instance_error = distance_evaluator.eval()
In the above example:
- 'distance' is the average of the edit distance in a pass.
- 'instance_error' is the instance error rate in a pass.
"""
def __init__(self, name):
super(EditDistance, self).__init__(name)
self.total_distance = .0
self.seq_num = 0
self.instance_error = 0
def update(self, distances, seq_num):
if not _is_numpy_(distances):
raise ValueError("The 'distances' must be a numpy ndarray.")
if not _is_number_(seq_num):
raise ValueError("The 'seq_num' must be a number(int, float).")
seq_right_count = np.sum(distances == 0)
total_distance = np.sum(distances)
self.seq_num += seq_num
self.instance_error += seq_num - seq_right_count
self.total_distance += total_distance
def eval(self):
if self.seq_num == 0:
raise ValueError(
"There is no data in EditDistance Metric. Please check layers.edit_distance output has been added to EditDistance."
)
avg_distance = self.total_distance / self.seq_num
avg_instance_error = self.instance_error / float(self.seq_num)
return avg_distance, avg_instance_error
class Auc(MetricBase):
"""
Auc metric adapts to the binary classification.
Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve
Need to note that auc metric compute the value via Python natively.
If you concern the speed, please use the fluid.layers.auc instead.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
Args:
name: metric name
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
"NOTE: only implement the ROC curve type via Python now."
Examples:
.. code-block:: python
pred = fluid.layers.fc(input=data, size=1000, act="tanh")
metric = fluid.metrics.Auc()
for data in train_reader():
loss, preds, labels = exe.run(fetch_list=[cost, preds, labels])
metric.update(preds, labels)
numpy_auc = metric.eval()
"""
def __init__(self, name, curve='ROC', num_thresholds=4095):
super(Auc, self).__init__(name=name)
self._curve = curve
self._num_thresholds = num_thresholds
_num_pred_buckets = num_thresholds + 1
self._stat_pos = [0] * _num_pred_buckets
self._stat_neg = [0] * _num_pred_buckets
def update(self, preds, labels):
if not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray.")
if not _is_numpy_(preds):
raise ValueError("The 'predictions' must be a numpy ndarray.")
for i, lbl in enumerate(labels):
value = preds[i, 1]
bin_idx = int(value * self._num_thresholds)
assert bin_idx <= self._num_thresholds
if lbl:
self._stat_pos[bin_idx] += 1.0
else:
self._stat_neg[bin_idx] += 1.0
@staticmethod
def trapezoid_area(x1, x2, y1, y2):
return abs(x1 - x2) * (y1 + y2) / 2.0
def eval(self):
tot_pos = 0.0
tot_neg = 0.0
auc = 0.0
idx = self._num_thresholds
while idx >= 0:
tot_pos_prev = tot_pos
tot_neg_prev = tot_neg
tot_pos += self._stat_pos[idx]
tot_neg += self._stat_neg[idx]
auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos,
tot_pos_prev)
idx -= 1
return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0
class DetectionMAP(object):
"""
Calculate the detection mean average precision (mAP).
The general steps are as follows:
1. calculate the true positive and false positive according to the input
of detection and labels.
2. calculate mAP value, support two versions: '11 point' and 'integral'.
Please get more information from the following articles:
https://sanchom.wordpress.com/tag/average-precision/
https://arxiv.org/abs/1512.02325
Args:
input (Variable): The detection results, which is a LoDTensor with shape
[M, 6]. The layout is [label, confidence, xmin, ymin, xmax, ymax].
gt_label (Variable): The ground truth label index, which is a LoDTensor
with shape [N, 1].
gt_box (Variable): The ground truth bounding box (bbox), which is a
LoDTensor with shape [N, 4]. The layout is [xmin, ymin, xmax, ymax].
gt_difficult (Variable|None): Whether this ground truth is a difficult
bounding bbox, which can be a LoDTensor [N, 1] or not set. If None,
it means all the ground truth labels are not difficult bbox.
class_num (int): The class number.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all categories will be
considered, 0 by defalut.
overlap_threshold (float): The threshold for deciding true/false
positive, 0.5 by defalut.
evaluate_difficult (bool): Whether to consider difficult ground truth
for evaluation, True by defalut. This argument does not work when
gt_difficult is None.
ap_version (string): The average precision calculation ways, it must be
'integral' or '11point'. Please check
https://sanchom.wordpress.com/tag/average-precision/ for details.
- 11point: the 11-point interpolated average precision.
- integral: the natural integral of the precision-recall curve.
Examples:
.. code-block:: python
exe = fluid.Executor(place)
map_evaluator = fluid.Evaluator.DetectionMAP(input,
gt_label, gt_box, gt_difficult)
cur_map, accum_map = map_evaluator.get_map_var()
fetch = [cost, cur_map, accum_map]
for epoch in PASS_NUM:
map_evaluator.reset(exe)
for data in batches:
loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch)
In the above example:
- 'cur_map_v' is the mAP of current mini-batch.
- 'accum_map_v' is the accumulative mAP of one pass.
"""
def __init__(self,
input,
gt_label,
gt_box,
gt_difficult=None,
class_num=None,
background_label=0,
overlap_threshold=0.5,
evaluate_difficult=True,
ap_version='integral'):
self.helper = LayerHelper('map_eval')
gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype)
if gt_difficult:
gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype)
label = layers.concat([gt_label, gt_difficult, gt_box], axis=1)
else:
label = layers.concat([gt_label, gt_box], axis=1)
# calculate mean average precision (mAP) of current mini-batch
map = layers.detection_map(
input,
label,
class_num,
background_label,
overlap_threshold=overlap_threshold,
evaluate_difficult=evaluate_difficult,
ap_version=ap_version)
states = []
states.append(
self._create_state(
dtype='int32', shape=None, suffix='accum_pos_count'))
states.append(
self._create_state(
dtype='float32', shape=None, suffix='accum_true_pos'))
states.append(
self._create_state(
dtype='float32', shape=None, suffix='accum_false_pos'))
var = self._create_state(dtype='int32', shape=[1], suffix='has_state')
self.helper.set_variable_initializer(
var, initializer=Constant(value=int(0)))
self.has_state = var
# calculate accumulative mAP
accum_map = layers.detection_map(
input,
label,
class_num,
background_label,
overlap_threshold=overlap_threshold,
evaluate_difficult=evaluate_difficult,
has_state=self.has_state,
input_states=states,
out_states=states,
ap_version=ap_version)
layers.fill_constant(
shape=self.has_state.shape,
value=1,
dtype=self.has_state.dtype,
out=self.has_state)
self.cur_map = map
self.accum_map = accum_map
def _create_state(self, suffix, dtype, shape):
"""
Create state variable.
Args:
suffix(str): the state suffix.
dtype(str|core.VarDesc.VarType): the state data type
shape(tuple|list): the shape of state
Returns: State variable
"""
state = self.helper.create_variable(
name="_".join([unique_name.generate(self.helper.name), suffix]),
persistable=True,
dtype=dtype,
shape=shape)
return state
def get_map_var(self):
"""
Returns: mAP variable of current mini-batch and
accumulative mAP variable cross mini-batches.
"""
return self.cur_map, self.accum_map
def reset(self, executor, reset_program=None):
"""
Reset metric states at the begin of each pass/user specified batch.
Args:
executor(Executor): a executor for executing
the reset_program.
reset_program(Program|None): a single Program for reset process.
If None, will create a Program.
"""
def _clone_var_(block, var):
assert isinstance(var, Variable)
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=var.persistable)
if reset_program is None:
reset_program = Program()
with program_guard(main_program=reset_program):
var = _clone_var_(reset_program.current_block(), self.has_state)
layers.fill_constant(
shape=var.shape, value=0, dtype=var.dtype, out=var)
executor.run(reset_program)
|
|
# -*- coding:Utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pylab as plt
import scipy.linalg as la
def dist_nonvectorized(A,B,C,D,alpha,beta):
"""
Parameters
----------
A
B
C
D
alpha
beta
Return
------
distance f ( =g <= comuted from another way)
"""
AC=C-A
CD=D-C
BA=A-B
u0 = np.dot(AC,AC)
u4 = np.dot(BA,BA)
u5 = np.dot(CD,CD)
u1 = np.dot(BA,AC)
u2 = np.dot(CD,AC)
u3 = np.dot(CD,BA)
f = u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
M = A - alpha*BA
N = C + beta*CD
g = np.dot(M-N,M-N)
return(f,g)
def dmin3d_nonvectorized(A,B,C,D):
"""
dmin3d evaluate the minimal distance between 2 set of segments
this should be vectorized
A : (3xN) initial point segment 1
B (3xN) end point segment 1
C (3xN) starting point segment 2
D (3xN) end point segment 2
"""
AC=C-A
CD=D-C
BA=A-B
u0 = np.dot(AC,AC)
u4 = np.dot(BA,BA)
u5 = np.dot(CD,CD)
u1 = np.dot(BA,AC)
u2 = np.dot(CD,AC)
u3 = np.dot(CD,BA)
den = u4*u5-u3*u3
alpha = (u2*u3-u1*u5)/(1.*den)
beta = (u1*u3-u2*u4)/(1.*den)
#~ print ' dmin**2 ', u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
dmin = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
#~ print 'dmin', dmin
return(alpha,beta,dmin)
# def dist_old(A,B,C,D,alpha,beta):
# """
# Parameters
# ----------
# A
# B
# C
# D
# alpha
# beta
# """
# if len(A.shape) ==1 :
# A=A.reshape(3,1)
# if len(B.shape) ==1 :
# B=B.reshape(3,1)
# if len(C.shape) ==1 :
# C=C.reshape(3,1)
# if len(D.shape) ==1 :
# D=D.reshape(3,1)
# AC=C-A
# CD=D-C
# BA=A-B
# u0 = np.einsum('ij,ij->j',AC,AC)#np.dot(AC,AC)
# u4 = np.einsum('ij,ij->j',BA,BA)#np.dot(BA,BA)
# u5 = np.einsum('ij,ij->j',CD,CD)#np.dot(CD,CD)
# u1 = np.einsum('ij,ij->j',BA,AC)#np.dot(BA,AC)
# u2 = np.einsum('ij,ij->j',CD,AC)#np.dot(CD,AC)
# u3 = np.einsum('ij,ij->j',CD,BA)#np.dot(CD,BA)
# f = u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
# M = A - alpha*BA
# N = C + beta*CD
# g = np.einsum('ij,ij->j',M-N,M-N)#np.dot(M-N,M-N)
# return(f,g)
# def dmin3d_old(A,B,C,D):
# """
# dmin3d evaluate the minimal distance between 2 set of segments
# this should be vectorized
# A : (3xN) initial point segment 1
# B (3xN) end point segment 1
# C (3xN) starting point segment 2
# D (3xN) end point segment 2
# """
# if len(A.shape) ==1 :
# A=A.reshape(3,1)
# if len(B.shape) ==1 :
# B=B.reshape(3,1)
# if len(C.shape) ==1 :
# C=C.reshape(3,1)
# if len(D.shape) ==1 :
# D=D.reshape(3,1)
# AC=C-A
# CD=D-C
# BA=A-B
# u0 = np.einsum('ij,ij->j',AC,AC)#np.dot(AC,AC)
# u4 = np.einsum('ij,ij->j',BA,BA)#np.dot(BA,BA)
# u5 = np.einsum('ij,ij->j',CD,CD)#np.dot(CD,CD)
# u1 = np.einsum('ij,ij->j',BA,AC)#np.dot(BA,AC)
# u2 = np.einsum('ij,ij->j',CD,AC)#np.dot(CD,AC)
# u3 = np.einsum('ij,ij->j',CD,BA)#np.dot(CD,BA)
# den = u4*u5-u3*u3
# alpha = (u2*u3-u1*u5)/(1.*den)
# beta = (u1*u3-u2*u4)/(1.*den)
# #~ print ' dmin**2 ', u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
# dmin = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
# #~ print 'dmin', dmin
# return(alpha,beta,dmin)
def dist(A,B,C,D,alpha,beta):
"""
distance between AB-CD
AB (3xN) or (3xNxK) to add a time axis
CD (3xM) or (3xMxK) to add a time axis
Parameters
----------
A : (3xN) initial point segment 1
[or (3xNxK) initial point segment 1 for K realizations]
B (3xN) end point segment 1
[or (3xNxK) end point segment 1 for K realizations]
C (3xM) starting point segment 2
[or (3xMxK) initial point segment 2 for K realizations]
D (3xM) end point segment 2
[or (3xMxK) end point segment 2 for K realizations]
alpha : (N x M) parametrization
[or (NxMxK)]
beta : (N x M) parametrization
[or (NxMxK)]
Returns
-------
f : N x M
[or (NxMxK)]
g : N x M
[or (NxMxK)]
"""
if len(A.shape) ==1 :
A=A.reshape(3,1)
if len(B.shape) ==1 :
B=B.reshape(3,1)
if len(C.shape) ==1 :
C=C.reshape(3,1)
if len(D.shape) ==1 :
D=D.reshape(3,1)
assert alpha.shape[0] == A.shape[1]
assert alpha.shape[1] == C.shape[1]
assert beta.shape[0] == A.shape[1]
assert beta.shape[1] == C.shape[1]
#3 x N x M
AC = C[:,np.newaxis,:]-A[:,:,np.newaxis]
# 3 x M
CD = D-C
# 3 x N
BA = A-B
#u0 : N x M
u0 = np.einsum('ijk...,ijk...->jk...',AC,AC)#np.dot(AC,AC)
#u4 : N
u4 = np.einsum('ij...,ij...->j...',BA,BA)[:,np.newaxis]#np.dot(BA,BA)
# u5 : M
u5 = np.einsum('ij...,ij...->j...',CD,CD)[np.newaxis,:]#np.dot(CD,CD)
# u1 : N x M
u1 = np.einsum('ij...,ijk...->jk...',BA,AC)#np.dot(BA,AC)
# u2 : N x M
u2 = np.einsum('ik...,ijk...->jk...',CD,AC)#np.dot(CD,AC)
# u3 : N x M
u3 = np.einsum('ik...,ij...->jk...',CD,BA)#np.dot(CD,BA)
# f : N x M
f = u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5
# X : 3 x N x M
X = A[:,:,np.newaxis]-alpha[np.newaxis,:,:]*BA[:,:,np.newaxis] # A - alpha*BA
# Y : 3 x N x M
Y = C[:,np.newaxis,:] + beta[np.newaxis,:,:]*CD[:,np.newaxis,:]# C + beta*CD
#g : N x M
g =np.einsum('ijk...,ijk...->jk...',X-Y,X-Y)
return(f,g)
def dmin3d(A,B,C,D):
"""
dmin3d evaluate the minimal distance between 2 set of segments
Note that the number of segment of AB is NOT NECESSARILY the same than CD
AB (3xN) or (3xNxK) to add a time axis
CD (3xM) or (3xMxK) to add a time axis
Parameters
----------
A : (3xN) initial point segment 1
[or (3xNxK) initial point segment 1 for K realizations]
B (3xN) end point segment 1
[or (3xNxK) end point segment 1 for K realizations]
C (3xM) starting point segment 2
[or (3xMxK) initial point segment 2 for K realizations]
D (3xM) end point segment 2
[or (3xMxK) end point segment 2 for K realizations]
Returns
-------
alpha
parametrization N x M
[or (NxMxK) ]
# beta
parametrization N x M
[or (NxMxK)]
dmin
minimal distance N x M
[or (NxMxK)]
"""
if len(A.shape) ==1 :
A=A.reshape(3,1)
if len(B.shape) ==1 :
B=B.reshape(3,1)
if len(C.shape) ==1 :
C=C.reshape(3,1)
if len(D.shape) ==1 :
D=D.reshape(3,1)
#3 x N x M
AC = C[:,np.newaxis,:]-A[:,:,np.newaxis]
# 3 x M
CD = D-C
# 3 x N
BA = A-B
#u0 : N x M
u0 = np.einsum('ijk...,ijk...->jk...',AC,AC)#np.dot(AC,AC)
#u4 : N
u4 = np.einsum('ij...,ij...->j...',BA,BA)[:,np.newaxis]#np.dot(BA,BA)
# u5 : M
u5 = np.einsum('ij...,ij...->j...',CD,CD)[np.newaxis,:]#np.dot(CD,CD)
# u1 : N x M
u1 = np.einsum('ij...,ijk...->jk...',BA,AC)#np.dot(BA,AC)
# u2 : N x M
u2 = np.einsum('ik...,ijk...->jk...',CD,AC)#np.dot(CD,AC)
# u3 : N x M
u3 = np.einsum('ik...,ij...->jk...',CD,BA)#np.dot(CD,BA)
# den : N x M
den = u4*u5-u3*u3
#alpha = N x M
alpha = (u2*u3-u1*u5)/(1.*den)
# beta = N x M
beta = (u1*u3-u2*u4)/(1.*den)
# dmin : N x M
dmin = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
return(alpha,beta,dmin)
def segdist(A,B,C,D,hard=True):
"""
distance between AB-CD
Note that the number of segment of AB is NOT NECESSARILY the same than CD
(This function gathers dmin3d and dist from DeuxSeg)
AB (3xN) or (3xNxK) to add a time axis
CD (3xM) or (3xMxK) to add a time axis
Parameters
----------
A : (3xN) initial point segment 1
[or (3xNxK) initial point segment 1 for K realizations]
B (3xN) end point segment 1
[or (3xNxK) end point segment 1 for K realizations]
C (3xM) starting point segment 2
[or (3xMxK) initial point segment 2 for K realizations]
D (3xM) end point segment 2
[or (3xMxK) end point segment 2 for K realizations]
hard : boolean:
if True minimal distance between AB and CD must be on both segments
elif False: minimal distnace can be located on the lines descibed by AB or CD
Returns
-------
f : N x M
[or (NxMxK)]
g : N x M
[or (NxMxK)]
alpha : (N x M) parametrization
[or (NxMxK)]
beta : (N x M) parametrization
[or (NxMxK)]
dmin
minimal distance N x M
[or (NxMxK)]
"""
if len(A.shape) ==1 :
A=A.reshape(3,1)
if len(B.shape) ==1 :
B=B.reshape(3,1)
if len(C.shape) ==1 :
C=C.reshape(3,1)
if len(D.shape) ==1 :
D=D.reshape(3,1)
#3 x N x M
AC = C[:,np.newaxis,:]-A[:,:,np.newaxis]
# 3 x M
CD = D-C
# 3 x N p
BA = A-B
#u0 : N x M
u0 = np.einsum('ijk...,ijk...->jk...',AC,AC)#np.dot(AC,AC)
#u4 : N
u4 = np.einsum('ij...,ij...->j...',BA,BA)[:,np.newaxis]#np.dot(BA,BA)
# u5 : M
u5 = np.einsum('ij...,ij...->j...',CD,CD)[np.newaxis,:]#np.dot(CD,CD)
# u1 : N x M
u1 = np.einsum('ij...,ijk...->jk...',BA,AC)#np.dot(BA,AC)
# u2 : N x M
u2 = np.einsum('ik...,ijk...->jk...',CD,AC)#np.dot(CD,AC)
# u3 : N x M
u3 = np.einsum('ik...,ij...->jk...',CD,BA)#np.dot(CD,BA)
# den : N x M
den = u4*u5-u3*u3
#alpha = N x M
alpha = (u2*u3-u1*u5)/(1.*den)
# beta = N x M
beta = (u1*u3-u2*u4)/(1.*den)
# dmin : N x M
dmin = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
if hard :
ap = np.where(alpha>1)
am = np.where(alpha<0)
bp = np.where(beta>1)
bm = np.where(beta<0)
alpha[ap[0],ap[1],ap[2]]=1.
alpha[am[0],am[1],am[2]]=0.
beta[bp[0],bp[1],bp[2]]=1.
beta[bm[0],bm[1],bm[2]]=0.
# f : N x M
f = np.sqrt(u0 + 2*(alpha*u1+beta*u2+alpha*beta*u3)+alpha*alpha*u4+ beta*beta*u5)
# X : 3 x N x M
X = A[:,:,np.newaxis]-alpha[np.newaxis,:,:]*BA[:,:,np.newaxis] # A - alpha*BA
# Y : 3 x N x M
Y = C[:,np.newaxis,:] + beta[np.newaxis,:,:]*CD[:,np.newaxis,:]# C + beta*CD
#g : N x M
g =np.sqrt(np.einsum('ijk...,ijk...->jk...',X-Y,X-Y))
return(f,g,X,Y,alpha,beta,dmin)
# <codecell>
if (__name__=="__main__"):
A = np.random.rand(3)
B = np.random.rand(3)
C = np.random.rand(3)
D = np.random.rand(3)
a,b,d=dmin3d_nonvectorized(A,B,C,D)
if a < 0:
a = 0
if a > 1:
a = 1
if b < 0:
b = 0
if b > 1:
b = 1
f, g = dist_nonvectorized(A,B,C,D,a,b)
print(a,b,d)
print(f,g)
print('sqrt ' , np.sqrt(f), np.sqrt(g))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import http_client
from ironic_tempest_plugin.services.baremetal import base
class BaremetalClient(base.BaremetalClient):
"""Base Tempest REST client for Ironic API v1."""
version = '1'
uri_prefix = 'v1'
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_nodes_detail(self, **kwargs):
"""Detailed list of all existing nodes."""
return self._list_request('/nodes/detail', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_portgroups(self, **kwargs):
"""List all existing port groups."""
return self._list_request('portgroups', **kwargs)
@base.handle_errors
def list_volume_connectors(self, **kwargs):
"""List all existing volume connectors."""
return self._list_request('volume/connectors', **kwargs)
@base.handle_errors
def list_volume_targets(self, **kwargs):
"""List all existing volume targets."""
return self._list_request('volume/targets', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""Gets a node associated with given instance uuid.
:param instance_uuid: Unique identifier of the instance in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_portgroup(self, portgroup_ident):
"""Gets a specific port group.
:param portgroup_ident: Name or UUID of the port group.
:return: Serialized port group as a dictionary.
"""
return self._show_request('portgroups', portgroup_ident)
@base.handle_errors
def show_volume_connector(self, volume_connector_ident):
"""Gets a specific volume connector.
:param volume_connector_ident: UUID of the volume connector.
:return: Serialized volume connector as a dictionary.
"""
return self._show_request('volume/connectors', volume_connector_ident)
@base.handle_errors
def show_volume_target(self, volume_target_ident):
"""Gets a specific volume target.
:param volume_target_ident: UUID of the volume target.
:return: Serialized volume target as a dictionary.
"""
return self._show_request('volume/targets', volume_target_ident)
@base.handle_errors
def show_port_by_address(self, address):
"""Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id=None, **kwargs):
"""Create a baremetal node with the specified parameters.
:param chassis_id: The unique identifier of the chassis.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
:param memory_mb: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {}
if kwargs.get('resource_class'):
node['resource_class'] = kwargs['resource_class']
node.update(
{'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpus': kwargs.get('cpus', 8),
'local_gb': kwargs.get('local_gb', 1024),
'memory_mb': kwargs.get('memory_mb', 4096)},
'driver': kwargs.get('driver', 'fake')}
)
return self._create_request('nodes', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
if 'uuid' in kwargs:
chassis.update({'uuid': kwargs.get('uuid')})
return self._create_request('chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:param portgroup_uuid: The UUID of a portgroup of which this port is a
member.
:param physical_network: The physical network to which the port is
attached.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
for key in ('address', 'physical_network', 'portgroup_uuid'):
if kwargs.get(key) is not None:
port[key] = kwargs[key]
return self._create_request('ports', port)
@base.handle_errors
def create_portgroup(self, node_uuid, **kwargs):
"""Create a port group with the specified parameters.
:param node_uuid: The UUID of the node which owns the port group.
:param kwargs:
address: MAC address of the port group. Optional.
extra: Meta data of the port group. Default: {'foo': 'bar'}.
name: Name of the port group. Optional.
uuid: UUID of the port group. Optional.
:return: A tuple with the server response and the created port group.
"""
portgroup = {'extra': kwargs.get('extra', {'foo': 'bar'})}
portgroup['node_uuid'] = node_uuid
if kwargs.get('address'):
portgroup['address'] = kwargs['address']
if kwargs.get('name'):
portgroup['name'] = kwargs['name']
return self._create_request('portgroups', portgroup)
@base.handle_errors
def create_volume_connector(self, node_uuid, **kwargs):
"""Create a volume connector with the specified parameters.
:param node_uuid: The UUID of the node which owns the volume connector.
:param kwargs:
type: type of the volume connector.
connector_id: connector_id of the volume connector.
uuid: UUID of the volume connector. Optional.
extra: meta data of the volume connector; a dictionary. Optional.
:return: A tuple with the server response and the created volume
connector.
"""
volume_connector = {'node_uuid': node_uuid}
for arg in ('type', 'connector_id', 'uuid', 'extra'):
if arg in kwargs:
volume_connector[arg] = kwargs[arg]
return self._create_request('volume/connectors', volume_connector)
@base.handle_errors
def create_volume_target(self, node_uuid, **kwargs):
"""Create a volume target with the specified parameters.
:param node_uuid: The UUID of the node which owns the volume target.
:param kwargs:
volume_type: type of the volume target.
volume_id: volume_id of the volume target.
boot_index: boot index of the volume target.
uuid: UUID of the volume target. Optional.
extra: meta data of the volume target; a dictionary. Optional.
properties: properties related to the type of the volume target;
a dictionary. Optional.
:return: A tuple with the server response and the created volume
target.
"""
volume_target = {'node_uuid': node_uuid}
for arg in ('volume_type', 'volume_id', 'boot_index', 'uuid', 'extra',
'properties'):
if arg in kwargs:
volume_target[arg] = kwargs[arg]
return self._create_request('volume/targets', volume_target)
@base.handle_errors
def delete_node(self, uuid):
"""Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def delete_portgroup(self, portgroup_ident):
"""Deletes a port group having the specified UUID or name.
:param portgroup_ident: Name or UUID of the port group.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('portgroups', portgroup_ident)
@base.handle_errors
def delete_volume_connector(self, volume_connector_ident):
"""Deletes a volume connector having the specified UUID.
:param volume_connector_ident: UUID of the volume connector.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('volume/connectors',
volume_connector_ident)
@base.handle_errors
def delete_volume_target(self, volume_target_ident):
"""Deletes a volume target having the specified UUID.
:param volume_target_ident: UUID of the volume target.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('volume/targets', volume_target_ident)
@base.handle_errors
def update_node(self, uuid, patch=None, **kwargs):
"""Update the specified node.
:param uuid: The unique identifier of the node.
:param patch: A JSON path that sets values of the specified attributes
to the new ones.
:param **kwargs: Attributes and new values for them, used only when
patch param is not set.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpus',
'properties/local_gb',
'properties/memory_mb',
'driver',
'instance_uuid',
'resource_class')
if not patch:
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def update_volume_connector(self, uuid, patch):
"""Update the specified volume connector.
:param uuid: The unique identifier of the volume connector.
:param patch: List of dicts representing json patches. Each dict
has keys 'path', 'op' and 'value'; to update a field.
:return: A tuple with the server response and the updated volume
connector.
"""
return self._patch_request('volume/connectors', uuid, patch)
@base.handle_errors
def update_volume_target(self, uuid, patch):
"""Update the specified volume target.
:param uuid: The unique identifier of the volume target.
:param patch: List of dicts representing json patches. Each dict
has keys 'path', 'op' and 'value'; to update a field.
:return: A tuple with the server response and the updated volume
target.
"""
return self._patch_request('volume/targets', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:param state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def set_node_provision_state(self, node_uuid, state, configdrive=None,
clean_steps=None):
"""Set provision state of the specified node.
:param node_uuid: The unique identifier of the node.
:param state: desired state to set
(active/rebuild/deleted/inspect/manage/provide).
:param configdrive: A gzipped, base64-encoded
configuration drive string.
:param clean_steps: A list with clean steps to execute.
"""
data = {'target': state}
# NOTE (vsaienk0): Add both here if specified, do not check anything.
# API will return an error in case of invalid parameters.
if configdrive is not None:
data['configdrive'] = configdrive
if clean_steps is not None:
data['clean_steps'] = clean_steps
return self._put_request('nodes/%s/states/provision' % node_uuid,
data)
@base.handle_errors
def set_node_raid_config(self, node_uuid, target_raid_config):
"""Set raid config of the specified node.
:param node_uuid: The unique identifier of the node.
:param target_raid_config: desired RAID configuration of the node.
"""
return self._put_request('nodes/%s/states/raid' % node_uuid,
target_raid_config)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""Get all driver interfaces of a specific node.
:param node_uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(http_client.NO_CONTENT, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(http_client.OK, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(http_client.OK, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(http_client.OK, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(http_client.ACCEPTED, resp.status)
return resp, body
@base.handle_errors
def vif_list(self, node_uuid, api_version=None):
"""Get list of attached VIFs.
:param node_uuid: Unique identifier of the node in UUID format.
:param api_version: Ironic API version to use.
"""
extra_headers = False
headers = None
if api_version is not None:
extra_headers = True
headers = {'x-openstack-ironic-api-version': api_version}
return self._list_request('nodes/%s/vifs' % node_uuid,
headers=headers,
extra_headers=extra_headers)
@base.handle_errors
def vif_attach(self, node_uuid, vif_id):
"""Attach a VIF to a node
:param node_uuid: Unique identifier of the node in UUID format.
:param vif_id: An ID representing the VIF
"""
vif = {'id': vif_id}
resp = self._create_request_no_response_body(
'nodes/%s/vifs' % node_uuid, vif)
return resp
@base.handle_errors
def vif_detach(self, node_uuid, vif_id):
"""Detach a VIF from a node
:param node_uuid: Unique identifier of the node in UUID format.
:param vif_id: An ID representing the VIF
"""
resp, body = self._delete_request('nodes/%s/vifs' % node_uuid, vif_id)
self.expected_success(http_client.NO_CONTENT, resp.status)
return resp, body
@base.handle_errors
def get_driver_properties(self, driver_name):
"""Get properties information about driver.
:param driver_name: Name of driver.
:return: tuple of response and serialized properties as a dictionary.
"""
uri = 'drivers/%s/properties' % driver_name
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return resp, self.deserialize(body)
@base.handle_errors
def get_driver_logical_disk_properties(self, driver_name):
"""Get driver logical disk properties.
:param driver_name: Name of driver.
:return: tuple of response and serialized logical disk properties as
a dictionary.
"""
uri = 'drivers/%s/raid/logical_disk_properties' % driver_name
resp, body = self.get(uri)
self.expected_success(200, resp.status)
return resp, self.deserialize(body)
|
|
from distutils.version import LooseVersion
import json
import warnings
from pandas import DataFrame
from geopandas._compat import import_optional_dependency
from geopandas.array import from_wkb
from geopandas import GeoDataFrame
import geopandas
from .file import _expand_user
METADATA_VERSION = "0.1.0"
# reference: https://github.com/geopandas/geo-arrow-spec
# Metadata structure:
# {
# "geo": {
# "columns": {
# "<name>": {
# "crs": "<WKT or None: REQUIRED>",
# "encoding": "WKB"
# }
# },
# "creator": {
# "library": "geopandas",
# "version": "<geopandas.__version__>"
# }
# "primary_column": "<str: REQUIRED>",
# "schema_version": "<METADATA_VERSION>"
# }
# }
def _is_fsspec_url(url):
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _create_metadata(df):
"""Create and encode geo metadata dict.
Parameters
----------
df : GeoDataFrame
Returns
-------
dict
"""
# Construct metadata for each geometry
column_metadata = {}
for col in df.columns[df.dtypes == "geometry"]:
series = df[col]
column_metadata[col] = {
"crs": series.crs.to_wkt() if series.crs else None,
"encoding": "WKB",
"bbox": series.total_bounds.tolist(),
}
return {
"primary_column": df._geometry_column_name,
"columns": column_metadata,
"schema_version": METADATA_VERSION,
"creator": {"library": "geopandas", "version": geopandas.__version__},
}
def _encode_metadata(metadata):
"""Encode metadata dict to UTF-8 JSON string
Parameters
----------
metadata : dict
Returns
-------
UTF-8 encoded JSON string
"""
return json.dumps(metadata).encode("utf-8")
def _decode_metadata(metadata_str):
"""Decode a UTF-8 encoded JSON string to dict
Parameters
----------
metadata_str : string (UTF-8 encoded)
Returns
-------
dict
"""
if metadata_str is None:
return None
return json.loads(metadata_str.decode("utf-8"))
def _validate_dataframe(df):
"""Validate that the GeoDataFrame conforms to requirements for writing
to Parquet format.
Raises `ValueError` if the GeoDataFrame is not valid.
copied from `pandas.io.parquet`
Parameters
----------
df : GeoDataFrame
"""
if not isinstance(df, DataFrame):
raise ValueError("Writing to Parquet/Feather only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {"string", "unicode", "empty"}:
raise ValueError("Writing to Parquet/Feather requires string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def _validate_metadata(metadata):
"""Validate geo metadata.
Must not be empty, and must contain the structure specified above.
Raises ValueError if metadata is not valid.
Parameters
----------
metadata : dict
"""
if not metadata:
raise ValueError("Missing or malformed geo metadata in Parquet/Feather file")
required_keys = ("primary_column", "columns")
for key in required_keys:
if metadata.get(key, None) is None:
raise ValueError(
"'geo' metadata in Parquet/Feather file is missing required key: "
"'{key}'".format(key=key)
)
if not isinstance(metadata["columns"], dict):
raise ValueError("'columns' in 'geo' metadata must be a dict")
# Validate that geometry columns have required metadata and values
required_col_keys = ("crs", "encoding")
for col, column_metadata in metadata["columns"].items():
for key in required_col_keys:
if key not in column_metadata:
raise ValueError(
"'geo' metadata in Parquet/Feather file is missing required key "
"'{key}' for column '{col}'".format(key=key, col=col)
)
if column_metadata["encoding"] != "WKB":
raise ValueError("Only WKB geometry encoding is supported")
def _geopandas_to_arrow(df, index=None):
"""
Helper function with main, shared logic for to_parquet/to_feather.
"""
from pyarrow import Table
warnings.warn(
"this is an initial implementation of Parquet/Feather file support and "
"associated metadata. This is tracking version 0.1.0 of the metadata "
"specification at "
"https://github.com/geopandas/geo-arrow-spec\n\n"
"This metadata specification does not yet make stability promises. "
"We do not yet recommend using this in a production setting unless you "
"are able to rewrite your Parquet/Feather files.\n\n"
"To further ignore this warning, you can do: \n"
"import warnings; warnings.filterwarnings('ignore', "
"message='.*initial implementation of Parquet.*')",
UserWarning,
stacklevel=4,
)
_validate_dataframe(df)
# create geo metadata before altering incoming data frame
geo_metadata = _create_metadata(df)
df = df.to_wkb()
table = Table.from_pandas(df, preserve_index=index)
# Store geopandas specific file-level metadata
# This must be done AFTER creating the table or it is not persisted
metadata = table.schema.metadata
metadata.update({b"geo": _encode_metadata(geo_metadata)})
return table.replace_schema_metadata(metadata)
def _to_parquet(df, path, index=None, compression="snappy", **kwargs):
"""
Write a GeoDataFrame to the Parquet format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow'.
WARNING: this is an initial implementation of Parquet file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
This metadata specification does not yet make stability promises. As such,
we do not yet recommend using this in a production setting unless you are
able to rewrite your Parquet files.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
Additional keyword arguments passed to pyarrow.parquet.write_table().
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
path = _expand_user(path)
table = _geopandas_to_arrow(df, index=index)
parquet.write_table(table, path, compression=compression, **kwargs)
def _to_feather(df, path, index=None, compression=None, **kwargs):
"""
Write a GeoDataFrame to the Feather format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow' >= 0.17.
WARNING: this is an initial implementation of Feather file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
This metadata specification does not yet make stability promises. As such,
we do not yet recommend using this in a production setting unless you are
able to rewrite your Feather files.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'zstd', 'lz4', 'uncompressed'}, optional
Name of the compression to use. Use ``"uncompressed"`` for no
compression. By default uses LZ4 if available, otherwise uncompressed.
kwargs
Additional keyword arguments passed to pyarrow.feather.write_feather().
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if pyarrow.__version__ < LooseVersion("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
path = _expand_user(path)
table = _geopandas_to_arrow(df, index=index)
feather.write_feather(table, path, compression=compression, **kwargs)
def _arrow_to_geopandas(table):
"""
Helper function with main, shared logic for read_parquet/read_feather.
"""
df = table.to_pandas()
metadata = table.schema.metadata
if metadata is None or b"geo" not in metadata:
raise ValueError(
"""Missing geo metadata in Parquet/Feather file.
Use pandas.read_parquet/read_feather() instead."""
)
try:
metadata = _decode_metadata(metadata.get(b"geo", b""))
except (TypeError, json.decoder.JSONDecodeError):
raise ValueError("Missing or malformed geo metadata in Parquet/Feather file")
_validate_metadata(metadata)
# Find all geometry columns that were read from the file. May
# be a subset if 'columns' parameter is used.
geometry_columns = df.columns.intersection(metadata["columns"])
if not len(geometry_columns):
raise ValueError(
"""No geometry columns are included in the columns read from
the Parquet/Feather file. To read this file without geometry columns,
use pandas.read_parquet/read_feather() instead."""
)
geometry = metadata["primary_column"]
# Missing geometry likely indicates a subset of columns was read;
# promote the first available geometry to the primary geometry.
if len(geometry_columns) and geometry not in geometry_columns:
geometry = geometry_columns[0]
# if there are multiple non-primary geometry columns, raise a warning
if len(geometry_columns) > 1:
warnings.warn(
"Multiple non-primary geometry columns read from Parquet/Feather "
"file. The first column read was promoted to the primary geometry."
)
# Convert the WKB columns that are present back to geometry.
for col in geometry_columns:
df[col] = from_wkb(df[col].values, crs=metadata["columns"][col]["crs"])
return GeoDataFrame(df, geometry=geometry)
def _get_filesystem_path(path, filesystem=None, storage_options=None):
"""
Get the filesystem and path for a given filesystem and path.
If the filesystem is not None then it's just returned as is.
"""
import pyarrow
if (
isinstance(path, str)
and storage_options is None
and filesystem is None
and LooseVersion(pyarrow.__version__) >= "5.0.0"
):
# Use the native pyarrow filesystem if possible.
try:
from pyarrow.fs import FileSystem
filesystem, path = FileSystem.from_uri(path)
except Exception:
# fallback to use get_handle / fsspec for filesystems
# that pyarrow doesn't support
pass
if _is_fsspec_url(path) and filesystem is None:
fsspec = import_optional_dependency(
"fsspec", extra="fsspec is requred for 'storage_options'."
)
filesystem, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
if filesystem is None and storage_options:
raise ValueError(
"Cannot provide 'storage_options' with non-fsspec path '{}'".format(path)
)
return filesystem, path
def _read_parquet(path, columns=None, storage_options=None, **kwargs):
"""
Load a Parquet object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_parquet` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow'.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g. host,
port, username, password, etc. For HTTP(S) URLs the key-value pairs are
forwarded to urllib as header options. For other URLs (e.g. starting with
"s3://", and "gcs://") the key-value pairs are forwarded to fsspec. Please
see fsspec and urllib for more details.
When no storage options are provided and a filesystem is implemented by
both ``pyarrow.fs`` and ``fsspec`` (e.g. "s3://") then the ``pyarrow.fs``
filesystem is preferred. Provide the instantiated fsspec filesystem using
the ``filesystem`` keyword if you wish to use its implementation.
**kwargs
Any additional kwargs passed to pyarrow.parquet.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_parquet("data.parquet") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_parquet(
... "data.parquet",
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
# TODO(https://github.com/pandas-dev/pandas/pull/41194): see if pandas
# adds filesystem as a keyword and match that.
filesystem = kwargs.pop("filesystem", None)
filesystem, path = _get_filesystem_path(
path, filesystem=filesystem, storage_options=storage_options
)
path = _expand_user(path)
kwargs["use_pandas_metadata"] = True
table = parquet.read_table(path, columns=columns, filesystem=filesystem, **kwargs)
return _arrow_to_geopandas(table)
def _read_feather(path, columns=None, **kwargs):
"""
Load a Feather object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_feather` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow' >= 0.17.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.feather.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_feather("data.feather") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_feather(
... "data.feather",
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if pyarrow.__version__ < LooseVersion("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
path = _expand_user(path)
table = feather.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
|
|
# -*- coding: utf-8 -*-
"""
On the Subject of Keypads
:Copyright: 2015 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from collections import Counter
from itertools import chain, zip_longest
from tkinter import ttk
from tkinter import E, N, S, W
COPYRIGHT_SIGN = '\u00A9'
PILCROW_SIGN = '\u00B6'
INVERTED_QUESTION_MARK = '\u00BF'
LATIN_SMALL_LETTER_LAMBDA_WITH_STROKE = '\u019B'
GREEK_CAPITAL_LETTER_PSI = '\u03A8'
GREEK_CAPITAL_LETTER_OMEGA = '\u03A9'
GREEK_KAI_SYMBOL = '\u03D7'
GREEK_LETTER_ARCHAIC_KOPPA = '\u03D8'
GREEK_LETTER_KOPPA = '\u03DE'
COPTIC_CAPITAL_LETTER_SHIMA = '\u03EC'
GREEK_CAPITAL_DOTTED_LUNATE_SIGMASYMBOL = '\u03FE'
GREEK_CAPITAL_REVERSED_DOTTED_LUNATESIGMA_SYMBOL = '\u03FF'
CYRILLIC_CAPITAL_LETTER_YAT = '\u0462'
CYRILLIC_CAPITAL_LETTER_LITTLE_YUS = '\u0466'
CYRILLIC_CAPITAL_LETTER_IOTIFIED_BIG_YUS = '\u046C'
CYRILLIC_CAPITAL_LETTER_KSI = '\u046E'
CYRILLIC_CAPITAL_LETTER_OMEGA_WITH_TITLO = '\u047C'
CYRILLIC_THOUSANDS_SIGN = '\u0482'
CYRILLIC_CAPITAL_LETTER_SHORT_I_WITH_TAIL = '\u048A'
CYRILLIC_CAPITAL_LETTER_ZHE_WITH_DESCENDER = '\u0496'
CYRILLIC_CAPITAL_LETTER_ABKHASIAN_HA = '\u04A8'
CYRILLIC_SMALL_LIGATURE_A_IE = '\u04D5'
CYRILLIC_CAPITAL_LETTER_E_WITH_DIAERESIS = '\u04EC'
CYRILLIC_CAPITAL_LETTER_KOMI_DZJE = '\u0506'
ARABIC_LETTER_TEH_WITH_RING = '\u067C'
BLACK_STAR = '\u2605'
WHITE_STAR = '\u2606'
SYMBOLS = frozenset([
COPYRIGHT_SIGN,
PILCROW_SIGN,
INVERTED_QUESTION_MARK,
LATIN_SMALL_LETTER_LAMBDA_WITH_STROKE,
GREEK_CAPITAL_LETTER_PSI,
GREEK_CAPITAL_LETTER_OMEGA,
GREEK_KAI_SYMBOL,
GREEK_LETTER_ARCHAIC_KOPPA,
GREEK_LETTER_KOPPA,
COPTIC_CAPITAL_LETTER_SHIMA,
GREEK_CAPITAL_DOTTED_LUNATE_SIGMASYMBOL,
GREEK_CAPITAL_REVERSED_DOTTED_LUNATESIGMA_SYMBOL,
CYRILLIC_CAPITAL_LETTER_YAT,
CYRILLIC_CAPITAL_LETTER_LITTLE_YUS,
CYRILLIC_CAPITAL_LETTER_IOTIFIED_BIG_YUS,
CYRILLIC_CAPITAL_LETTER_KSI,
CYRILLIC_CAPITAL_LETTER_OMEGA_WITH_TITLO,
CYRILLIC_THOUSANDS_SIGN,
CYRILLIC_CAPITAL_LETTER_SHORT_I_WITH_TAIL,
CYRILLIC_CAPITAL_LETTER_ZHE_WITH_DESCENDER,
CYRILLIC_CAPITAL_LETTER_ABKHASIAN_HA,
CYRILLIC_SMALL_LIGATURE_A_IE,
CYRILLIC_CAPITAL_LETTER_E_WITH_DIAERESIS,
CYRILLIC_CAPITAL_LETTER_KOMI_DZJE,
ARABIC_LETTER_TEH_WITH_RING,
BLACK_STAR,
WHITE_STAR,
])
COLUMNS = [
[
# column 1
GREEK_LETTER_ARCHAIC_KOPPA,
CYRILLIC_CAPITAL_LETTER_LITTLE_YUS,
LATIN_SMALL_LETTER_LAMBDA_WITH_STROKE,
GREEK_LETTER_KOPPA,
CYRILLIC_CAPITAL_LETTER_IOTIFIED_BIG_YUS,
GREEK_KAI_SYMBOL,
GREEK_CAPITAL_REVERSED_DOTTED_LUNATESIGMA_SYMBOL,
],
[
# column 2
CYRILLIC_CAPITAL_LETTER_E_WITH_DIAERESIS,
GREEK_LETTER_ARCHAIC_KOPPA,
GREEK_CAPITAL_REVERSED_DOTTED_LUNATESIGMA_SYMBOL,
CYRILLIC_CAPITAL_LETTER_ABKHASIAN_HA,
WHITE_STAR,
GREEK_KAI_SYMBOL,
INVERTED_QUESTION_MARK,
],
[
# column 3
COPYRIGHT_SIGN,
CYRILLIC_CAPITAL_LETTER_OMEGA_WITH_TITLO,
CYRILLIC_CAPITAL_LETTER_ABKHASIAN_HA,
CYRILLIC_CAPITAL_LETTER_ZHE_WITH_DESCENDER,
CYRILLIC_CAPITAL_LETTER_KOMI_DZJE,
LATIN_SMALL_LETTER_LAMBDA_WITH_STROKE,
WHITE_STAR,
],
[
# column 4
COPTIC_CAPITAL_LETTER_SHIMA,
PILCROW_SIGN,
CYRILLIC_CAPITAL_LETTER_YAT,
CYRILLIC_CAPITAL_LETTER_IOTIFIED_BIG_YUS,
CYRILLIC_CAPITAL_LETTER_ZHE_WITH_DESCENDER,
INVERTED_QUESTION_MARK,
ARABIC_LETTER_TEH_WITH_RING,
],
[
# column 5
GREEK_CAPITAL_LETTER_PSI,
ARABIC_LETTER_TEH_WITH_RING,
CYRILLIC_CAPITAL_LETTER_YAT,
GREEK_CAPITAL_DOTTED_LUNATE_SIGMASYMBOL,
PILCROW_SIGN,
CYRILLIC_CAPITAL_LETTER_KSI,
BLACK_STAR,
],
[
# column 6
COPTIC_CAPITAL_LETTER_SHIMA,
CYRILLIC_CAPITAL_LETTER_E_WITH_DIAERESIS,
CYRILLIC_THOUSANDS_SIGN,
CYRILLIC_SMALL_LIGATURE_A_IE,
GREEK_CAPITAL_LETTER_PSI,
CYRILLIC_CAPITAL_LETTER_SHORT_I_WITH_TAIL,
GREEK_CAPITAL_LETTER_OMEGA,
],
]
SYMBOL_COUNTS = Counter(chain.from_iterable(COLUMNS))
def is_symbol_unique(symbol):
"""Return `True` if the symbol appears only once."""
return SYMBOL_COUNTS[symbol] == 1
class KeypadFrame(ttk.Frame):
def __init__(self, parent, symbols):
super().__init__(parent)
self.parent = parent
button_style = create_style('lightgray')
button_style_unique = create_style('lightgreen')
ordered_symbols = sorted(symbols)
chunked_ordered_symbols = split_into_chunks(ordered_symbols, 9)
for row, chunk in enumerate(chunked_ordered_symbols):
for column, symbol in enumerate(chunk):
if symbol is None:
continue
button = SymbolButton(self, text=symbol, value=symbol)
style = button_style_unique if is_symbol_unique(symbol) else button_style
button.configure(style=style)
button.grid(column=column, row=row, sticky=(N, W, E, S))
self.columnconfigure(column, weight=1)
self.rowconfigure(row, weight=1)
class SymbolButton(ttk.Button):
def __init__(self, *args, **kwargs):
self.value = kwargs.pop('value')
super().__init__(*args, **kwargs)
def create_style(background_color_name):
style_name = '{}.TButton'.format(background_color_name)
style = ttk.Style()
style.configure(style_name, background=background_color_name)
return style_name
def split_into_chunks(iterable, chunk_length):
args = [iter(iterable)] * chunk_length
return zip_longest(*args)
def display_symbols(ui):
def create_frame(parent):
return KeypadFrame(parent, SYMBOLS)
ui.run_frame(create_frame)
def execute(ui):
while True:
display_symbols(ui)
|
|
"""Support for DoorBird devices."""
from __future__ import annotations
from http import HTTPStatus
import logging
from typing import Any
from aiohttp import web
from doorbirdpy import DoorBird
import requests
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util, slugify
from .const import (
CONF_EVENTS,
DOMAIN,
DOOR_STATION,
DOOR_STATION_EVENT_ENTITY_IDS,
DOOR_STATION_INFO,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .util import get_doorstation_by_token
_LOGGER = logging.getLogger(__name__)
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the DoorBird component."""
hass.data.setdefault(DOMAIN, {})
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
if (token := event.data.get("token")) is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token")
return
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.async_listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up DoorBird from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
doorstation_config = entry.data
doorstation_options = entry.options
config_entry_id = entry.entry_id
device_ip = doorstation_config[CONF_HOST]
username = doorstation_config[CONF_USERNAME]
password = doorstation_config[CONF_PASSWORD]
device = DoorBird(device_ip, username, password)
try:
status, info = await hass.async_add_executor_job(_init_doorbird_device, device)
except requests.exceptions.HTTPError as err:
if err.response.status_code == HTTPStatus.UNAUTHORIZED:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
raise ConfigEntryNotReady from err
except OSError as oserr:
_LOGGER.error("Failed to setup doorbird at %s: %s", device_ip, oserr)
raise ConfigEntryNotReady from oserr
if not status[0]:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
raise ConfigEntryNotReady
token = doorstation_config.get(CONF_TOKEN, config_entry_id)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
name = doorstation_config.get(CONF_NAME)
events = doorstation_options.get(CONF_EVENTS, [])
doorstation = ConfiguredDoorBird(device, name, custom_url, token)
doorstation.update_events(events)
# Subscribe to doorbell or motion events
if not await _async_register_events(hass, doorstation):
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(_update_listener)
hass.data[DOMAIN][config_entry_id] = {
DOOR_STATION: doorstation,
DOOR_STATION_INFO: info,
UNDO_UPDATE_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
def _init_doorbird_device(device):
return device.ready(), device.info()
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_register_events(
hass: HomeAssistant, doorstation: ConfiguredDoorBird
) -> bool:
try:
await hass.async_add_executor_job(doorstation.register_events, hass)
except requests.exceptions.HTTPError:
persistent_notification.async_create(
hass,
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
config_entry_id = entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation.update_events(entry.options[CONF_EVENTS])
# Subscribe to doorbell or motion events
await _async_register_events(hass, doorstation)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = False
for importable_option in (CONF_EVENTS,):
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self.events = None
self.doorstation_events = None
self._token = token
def update_events(self, events):
"""Update the doorbird events."""
self.events = events
self.doorstation_events = [self._get_event_name(event) for event in self.events]
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass: HomeAssistant) -> None:
"""Register events on device."""
# Get the URL of this server
hass_url = get_url(hass)
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
if not self.doorstation_events:
# User may not have permission to get the favorites
return
favorites = self.device.favorites()
for event in self.doorstation_events:
if self._register_event(hass_url, event, favs=favorites):
_LOGGER.info(
"Successfully registered URL for %s on %s", event, self.name
)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(
self, hass_url: str, event: str, favs: dict[str, Any] | None = None
) -> bool:
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if self.webhook_is_registered(url, favs=favs):
return True
self.device.change_favorite("http", f"Home Assistant ({event})", url)
if not self.webhook_is_registered(url):
_LOGGER.warning(
'Unable to set favorite URL "%s". ' 'Event "%s" will not fire',
url,
event,
)
return False
return True
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
return self.get_webhook_id(url, favs) is not None
def get_webhook_id(self, url, favs=None) -> str | None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
# pylint: disable=no-self-use
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(
status=HTTPStatus.UNAUTHORIZED, text="Invalid token provided."
)
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(text=message)
event_data[ATTR_ENTITY_ID] = hass.data[DOMAIN][
DOOR_STATION_EVENT_ENTITY_IDS
].get(event)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(text="OK")
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import urllib
import xbmcplugin
import xbmc
import xbmcgui
import common
import database_tv as tv_db
import database_common
pluginhandle = common.pluginHandle
# 501-POSTER WRAP 503-MLIST3 504=MLIST2 508-FANARTPOSTER
confluence_views = [500, 501, 502, 503, 504, 508]
###################### Television
def list_tv_root():
tv_db.update_tv(False)
cm_u = sys.argv[0] + '?mode=tv&sitemode=list_tvshows_favor_filtered_export&url=""'
cm = [('Export Favorites to Library', 'XBMC.RunPlugin(%s)' % cm_u)]
common.add_directory('Favorites', 'tv', 'list_tvshows_favor_filtered', contextmenu=cm)
cm_u = sys.argv[0] + '?mode=tv&sitemode=list_tvshows_export&url=""'
cm = [('Export All to Library', 'XBMC.RunPlugin(%s)' % cm_u)]
common.add_directory('All Shows', 'tv', 'list_tvshows', contextmenu=cm)
# common.add_directory('Genres', 'tv', 'list_tvshow_types', 'GENRE')
#common.add_directory('Years', 'tv', 'list_tvshow_types', 'YEARS')
#common.add_directory('TV Rating', 'tv', 'list_tvshow_types', 'MPAA')
common.add_directory('Actors', 'tv', 'list_tvshow_types', 'ACTORS')
#common.add_directory('Watched', 'tv', 'list_tvshows_watched_filtered')
xbmcplugin.endOfDirectory(pluginhandle)
def list_tvshow_types(type=False):
if not type:
type = common.args.url
if type == 'GENRE':
mode = 'list_tvshows_genre_filtered'
items = tv_db.get_types('genres')
elif type == 'YEARS':
mode = 'list_tvshows_years_filtered'
items = tv_db.get_types('year')
elif type == 'MPAA':
mode = 'list_tvshows_mpaa_filtered'
items = tv_db.get_types('mpaa')
elif type == 'ACTORS':
mode = 'list_tvshows_actors_filtered'
items = tv_db.get_types('actors')
for item in items:
common.add_directory(item, 'tv', mode, item)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.endOfDirectory(pluginhandle)
def list_tvshows_genre_filtered():
list_tvshows(export=False, genrefilter=common.args.url)
def list_tvshows_years_filtered():
list_tvshows(export=False, yearfilter=common.args.url)
def list_tvshows_mpaa_filtered():
list_tvshows(export=False, mpaafilter=common.args.url)
def list_tvshows_creators_filtered():
list_tvshows(export=False, creatorfilter=common.args.url)
def list_tvshows_favor_filtered_export():
list_tvshows_favor_filtered(export=True)
def list_tvshows_favor_filtered():
list_tvshows(export=False, favorfilter=True)
def list_tvshows_export():
list_tvshows(export=True)
def list_tvshows(export=False, mpaafilter=False, genrefilter=False, creatorfilter=False, yearfilter=False,
favorfilter=False):
if export:
import xbmclibrary
added_folders = xbmclibrary.setup_library()
shows = tv_db.get_series(favorfilter=favorfilter).fetchall()
total = len(shows)
for showdata in shows:
if export:
xbmclibrary.export_series(showdata)
else:
_add_series_item(showdata, total)
if export:
xbmclibrary.complete_export(added_folders)
else:
xbmcplugin.setContent(pluginhandle, 'tvshows')
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)
# xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_MPAA_RATING)
#xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE)
xbmcplugin.endOfDirectory(pluginhandle)
viewenable = common.get_setting("viewenable")
if viewenable == 'true':
view = int(common.get_setting("showview"))
xbmc.executebuiltin("Container.SetViewMode(" + str(confluence_views[view]) + ")")
def _add_series_item(data, total=0):
fanart = database_common.get_thumb(data['content_id'])
poster = database_common.get_poster(data['content_id'])
total_episodes = tv_db.get_series_episode_count(data['content_id'])
watched_episodes = tv_db.get_series_episode_count(data['content_id'], 'watched')
total_seasons = tv_db.get_series_season_count(data['content_id'])
labels = {
'title': data['title'],
'tvshowtitle': data['title'],
'plot': data['plot'],
'studio': data['studio'],
'episode': total_episodes,
'year': tv_db.get_series_year(data['content_id']),
'trailer': data['trailer']
}
if data['directors']:
labels['director'] = ' / '.join(data['directors'].split(','))
if data['genres']:
labels['genres'] = ' / '.join(data['genres'].split(','))
if data['actors']:
labels['cast'] = data['actors'].split(',')
item = xbmcgui.ListItem(data['title'], iconImage=poster, thumbnailImage=poster)
item.setInfo(type='Video', infoLabels=labels)
item.setProperty('fanart_image', fanart)
item.setProperty('TVShowThumb', poster)
item.setProperty('TotalSeasons', str(total_seasons))
item.setProperty('TotalEpisodes', str(total_episodes))
item.setProperty('WatchedEpisodes', str(watched_episodes))
item.setProperty('UnWatchedEpisodes', str(total_episodes - watched_episodes))
contextmenu = []
if data['favor']:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=unfavor_series&title={1}'.format(data['content_id'],
urllib.unquote_plus(
data['title']))
contextmenu.append((common.localise(39006).format(database_common.SERVICE_NAME), 'XBMC.RunPlugin(%s)' % cm_u))
else:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=favor_series&title={1}'.format(data['content_id'],
urllib.unquote_plus(
data['title']))
contextmenu.append((common.localise(39007).format(database_common.SERVICE_NAME), 'XBMC.RunPlugin(%s)' % cm_u))
if data['trailer']:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=play_trailer&title={1}&series_id={2}'.format(
data['trailer'], data['title'], data['content_id'])
contextmenu.append(('Play trailer', 'XBMC.RunPlugin(%s)' % cm_u))
contextmenu.append(('TV Show Information', 'XBMC.Action(Info)'))
item.addContextMenuItems(contextmenu)
u = sys.argv[0] + '?url={0}&mode=tv&sitemode=list_tv_seasons'.format(data['content_id'])
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=True, totalItems=total)
def list_tv_seasons():
series_id = common.args.url
seasons = tv_db.get_seasons(series_id).fetchall()
total = len(seasons)
for season in seasons:
_add_season_item(season, total)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setContent(pluginhandle, 'tvshows')
xbmcplugin.endOfDirectory(pluginhandle)
viewenable = common.get_setting("viewenable")
if viewenable == 'true':
view = int(common.get_setting("seasonview"))
xbmc.executebuiltin("Container.SetViewMode(" + str(confluence_views[view]) + ")")
def _add_season_item(data, total=0):
fanart = database_common.get_thumb(data['series_content_id'])
poster = database_common.get_poster(data['series_content_id'])
total_episodes = tv_db.get_season_episode_count(data['series_content_id'])
watched_episodes = tv_db.get_season_episode_count(data['series_content_id'], 'watched')
labels = {
'title': data['title'],
'tvshowtitle': data['series_title'],
'studio': data['studio'],
'season': data['order_rank'],
'episode': total_episodes,
'year': tv_db.get_season_year(data['content_id'])
}
if data['directors']:
labels['director'] = ' / '.join(data['directors'].split(','))
if data['genres']:
labels['genres'] = ' / '.join(data['genres'].split(','))
if data['actors']:
labels['cast'] = data['actors'].split(',')
item = xbmcgui.ListItem(data['title'], iconImage=poster, thumbnailImage=poster)
item.setInfo(type='Video', infoLabels=labels)
item.setProperty('fanart_image', fanart)
item.setProperty('TVShowThumb', poster)
item.setProperty('TotalEpisodes', str(total_episodes))
item.setProperty('WatchedEpisodes', str(watched_episodes))
item.setProperty('UnWatchedEpisodes', str(total_episodes - watched_episodes))
u = sys.argv[0] + '?url={0}&mode=tv&sitemode=list_episodes'.format(data['content_id'])
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=True, totalItems=total)
def list_episodes(export=False):
season_id = common.args.url
episodes = tv_db.get_episodes(season_id).fetchall()
total = len(episodes)
for episode in episodes:
_add_episode_item(episode, total)
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.setContent(pluginhandle, 'Episodes')
xbmcplugin.endOfDirectory(pluginhandle)
viewenable = common.get_setting("viewenable")
if viewenable == 'true':
view = int(common.get_setting("episodeview"))
xbmc.executebuiltin("Container.SetViewMode(" + str(confluence_views[view]) + ")")
def _add_episode_item(data, total):
fanart = database_common.get_thumb(data['content_id'])
poster = database_common.get_poster(data['series_id'])
labels = {
'title': data['title'],
'sorttitle': data['title_sort'],
'tvshowtitle': data['series_title'],
'plot': data['plot'],
'studio': data['studio'],
'season': data['season_num'],
'episode': str(data['order_rank'])[-2:],
'year': data['year'],
'duration': data['duration'],
'playcount': data['playcount']
}
if data['mpaa']:
labels['mpaa'] = 'Rated ' + data['mpaa']
if data['directors']:
labels['director'] = ' / '.join(data['directors'].split(','))
if data['genres']:
labels['genres'] = ' / '.join(data['genres'].split(','))
if data['actors']:
labels['cast'] = data['actors'].split(',')
item = xbmcgui.ListItem(data['title'], data['mpaa'], iconImage=fanart, thumbnailImage=fanart)
item.setInfo(type='Video', infoLabels=labels)
item.setProperty('fanart_image', fanart)
item.setProperty('TVShowThumb', poster)
try:
if data['is_hd']:
item.addStreamInfo('video', {'codec': 'h264', 'width': 1280, 'height': 720, 'duration': data['duration']})
else:
item.addStreamInfo('video', {'codec': 'h264', 'width': 720, 'height': 400, 'duration': data['duration']})
if data['audio_type'] == '5.1 Surround':
item.addStreamInfo('audio', {'codec': 'aac', 'channels': 6})
else:
item.addStreamInfo('audio', {'codec': 'aac', 'channels': 2})
if data['cc_available']:
item.addStreamInfo('subtitle', {'language': 'en'})
except:
pass
contextmenu = []
if data['playcount'] > 0:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=unwatch_episode'.format(data['content_id'])
contextmenu.append(('Mark as unwatched', 'XBMC.RunPlugin(%s)' % cm_u))
else:
cm_u = sys.argv[0] + '?url={0}&mode=tv&sitemode=watch_episode'.format(data['content_id'])
contextmenu.append(('Mark as watched', 'XBMC.RunPlugin(%s)' % cm_u))
contextmenu.append(('Episode Information', 'XBMC.Action(Info)'))
item.addContextMenuItems(contextmenu)
play_url = database_common.get_play_url(data['media_id'])
u = sys.argv[0] + '?url={0}&mode=tv&sitemode=play_movie&content_id={1}'.format(play_url, data['content_id'])
xbmcplugin.addDirectoryItem(pluginhandle, url=u, listitem=item, isFolder=False, totalItems=total)
def play_movie():
url = common.args.url
content_id = int(common.args.content_id)
if tv_db.watch_episode(content_id) > 0:
common.refresh_menu()
common.play_url(url)
##########################################
# Context Menu Links
##########################################
def refresh_db():
tv_db.update_tv(True)
def play_trailer():
url = common.args.url
title = common.args.title
series_id = common.args.series_id
poster = database_common.get_poster(series_id)
item = xbmcgui.ListItem(label=title, iconImage=poster, thumbnailImage=poster, path=url)
player = xbmc.Player()
player.play(url, item)
def favor_series():
content_id = common.args.url
if tv_db.favor_series(content_id) > 0:
common.notification('Added ' + urllib.unquote_plus(common.args.title) + ' to favorites')
common.refresh_menu()
else:
common.notification('Error adding movie to favorites', isError=True)
def unfavor_series():
content_id = common.args.url
if tv_db.unfavor_series(content_id) > 0:
common.notification('Removed ' + urllib.unquote_plus(common.args.title) + ' from favorites')
common.refresh_menu()
else:
common.notification('Error removing movie from favorites', isError=True)
def watch_episode():
content_id = common.args.url
if tv_db.watch_episode(content_id) > 0:
common.refresh_menu()
else:
common.notification('Could not update watch count', isError=True)
def unwatch_episode():
content_id = common.args.url
tv_db.unwatch_episode(content_id)
common.refresh_menu()
|
|
from itertools import product
import datetime
import os.path as op
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose)
import pytest
import matplotlib.pyplot as plt
import mne
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
Info, Transform)
from mne.io import read_raw_fif
from mne.utils import (requires_h5py, requires_pandas, grand_average,
catch_logging)
from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,
tfr_multitaper, AverageTFR, read_tfrs,
write_tfrs, combine_tfr, cwt, _compute_tfr,
EpochsTFR)
from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet
from mne.viz.utils import _fake_click
from mne.tests.test_epochs import assert_metadata_equal
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
event_fname = op.join(data_path, 'test-eve.fif')
raw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif')
def test_tfr_ctf():
"""Test that TFRs can be calculated on CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = mne.make_fixed_length_events(raw, duration=0.5)
epochs = mne.Epochs(raw, events)
for method in (tfr_multitaper, tfr_morlet):
method(epochs, [10], 1) # smoke test
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert (np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_allclose(power.data, power_picks.data)
assert_allclose(power.data, power_picks_avg.data)
assert_allclose(itc.data, itc_picks.data)
# test on evoked
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_allclose,
power.data, power_evoked.data)
# complex output
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
return_itc=False, average=True, output="complex")
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
output="complex", average=False, return_itc=True)
epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles,
output="complex", average=False,
return_itc=False)
epochs_amplitude_2 = abs(epochs_power_complex)
epochs_amplitude_3 = epochs_amplitude_2.copy()
epochs_amplitude_3.data[:] = np.inf # test that it's actually copied
# test that the power computed via `complex` is equivalent to power
# computed within the method.
assert_allclose(epochs_amplitude_2.data**2, epochs_power_picks.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
ret = itc * 23 # test mult
itc = ret / 23 # test dic
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert 'meg' in power
assert 'grad' in power
assert 'mag' not in power
assert 'eeg' not in power
assert power.nave == nave
assert itc.nave == nave
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (power_.data.shape == (len(picks), len(freqs), 2))
assert (power_.data.shape == itc_.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert gave.data.shape == (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2])
assert itc2.ch_names[1:] == gave.ch_names
assert gave.nave == 2
itc2.drop_channels(itc2.info["bads"])
assert_allclose(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_allclose(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
return_itc=False)
tfr_data = tfr.data[0]
assert (tfr_data.shape == (len(picks), len(freqs), len(times)))
tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2), average=False,
return_itc=False).data[0]
assert (tfr2.shape == (len(picks), len(freqs), 2))
single_power = tfr_morlet(epochs, freqs, 2, average=False,
return_itc=False).data
single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
average=False, return_itc=False).data
single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
average=False, return_itc=False).data
single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
average=False, return_itc=False).data
assert_allclose(np.mean(single_power, axis=0), power.data)
assert_allclose(np.mean(single_power2, axis=0), power.data[:, :, :2])
assert_allclose(np.mean(single_power3, axis=0), power.data[:, :, 1:3])
assert_allclose(np.mean(single_power4, axis=0), power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
power_pick, power_drop = mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = list(range(50, 55))
decim = 2
_, n_chan, n_time = data.shape
tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
return_itc=False).data[0]
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
cwt(data[0], Ws, use_fft=use_fft, mode=mode)
# Test invalid frequency arguments
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_morlet(epochs, freqs=np.arange(0, 3), n_cycles=7)
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_morlet(epochs, freqs=np.arange(-4, -1), n_cycles=7)
# Test decim parameter checks
pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
# When convolving in time, wavelets must not be longer than the data
pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws,
use_fft=False)
with pytest.warns(UserWarning, match='one of the wavelets.*is longer'):
cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)
# Check for off-by-one errors when using wavelets with an even number of
# samples
psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')
assert_equal(psd.shape, (2, 1, 420))
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert (len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@pytest.mark.slowtest
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float64)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
# Test channel picking
power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
assert_equal(power_epochs_picked.ch_names, ['SIM0001'])
pytest.raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_allclose(power.data, power_picks.data)
assert_allclose(power.data, power_averaged.data)
assert_allclose(power.times, power_epochs.times)
assert_allclose(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_allclose(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_allclose,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert (tmax > 0.3 and tmax < 0.7)
assert not np.any(itc.data < 0.)
assert (fmax > 40 and fmax < 60)
assert (power2.data.shape == (len(picks), len(freqs), 2))
assert (power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
# Test invalid frequency arguments
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_multitaper(epochs, freqs=np.arange(0, 3), n_cycles=7)
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_multitaper(epochs, freqs=np.arange(-4, -1), n_cycles=7)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 4, 5))
times = np.array([.1, .2, .3, .4, .5])
freqs = np.array([.10, .20, .30, .40])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(tmin=0.2)
assert_array_equal(tfr.times, [0.2, 0.3, 0.4, 0.5])
assert tfr.data.ndim == 3
assert tfr.data.shape[-1] == 4
tfr.crop(fmax=0.3)
assert_array_equal(tfr.freqs, [0.1, 0.2, 0.3])
assert tfr.data.ndim == 3
assert tfr.data.shape[-2] == 3
tfr.crop(tmin=0.3, tmax=0.4, fmin=0.1, fmax=0.2)
assert_array_equal(tfr.times, [0.3, 0.4])
assert tfr.data.ndim == 3
assert tfr.data.shape[-1] == 2
assert_array_equal(tfr.freqs, [0.1, 0.2])
assert tfr.data.shape[-2] == 2
@requires_h5py
@requires_pandas
def test_io(tmpdir):
"""Test TFR IO capacities."""
from pandas import DataFrame
tempdir = str(tmpdir)
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
info['meas_date'] = datetime.datetime(year=2020, month=2, day=5,
tzinfo=datetime.timezone.utc)
info._check_consistency()
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert isinstance(tfr2.info, Info)
assert isinstance(tfr2.info['dev_head_t'], Transform)
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
pytest.raises(IOError, tfr.save, fname)
tfr.comment = None
# test old meas_date
info['meas_date'] = (1, 2)
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert (isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
pytest.raises(ValueError, read_tfrs, fname, condition='nonono')
# Test save of EpochsTFR.
n_events = 5
data = np.zeros((n_events, 3, 2, 3))
# create fake metadata
rng = np.random.RandomState(42)
rt = np.round(rng.uniform(size=(n_events,)), 3)
trialtypes = np.array(['face', 'place'])
trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)]
meta = DataFrame(dict(RT=rt, Trial=trial))
# fake events and event_id
events = np.zeros([n_events, 3])
events[:, 0] = np.arange(n_events)
events[:, 2] = np.ones(n_events)
event_id = {'a/b': 1}
# fake selection
n_dropped_epochs = 3
selection = np.arange(n_events + n_dropped_epochs)[n_dropped_epochs:]
drop_log = tuple([('IGNORED',) for i in range(n_dropped_epochs)] +
[() for i in range(n_events)])
tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,
comment='test', method='crazy-tfr', events=events,
event_id=event_id, selection=selection, drop_log=drop_log,
metadata=meta)
fname_save = fname
tfr.save(fname_save, True)
fname_write = op.join(tempdir, 'test3-tfr.h5')
write_tfrs(fname_write, tfr, overwrite=True)
for fname in [fname_save, fname_write]:
read_tfr = read_tfrs(fname)[0]
assert_array_equal(tfr.data, read_tfr.data)
assert_metadata_equal(tfr.metadata, read_tfr.metadata)
assert_array_equal(tfr.events, read_tfr.events)
assert tfr.event_id == read_tfr.event_id
assert_array_equal(tfr.selection, read_tfr.selection)
assert tfr.drop_log == read_tfr.drop_log
with pytest.raises(NotImplementedError, match='condition not supported'):
tfr = read_tfrs(fname, condition='a')
def test_init_EpochsTFR():
"""Test __init__ for EpochsTFR."""
# Create fake data:
data = np.zeros((3, 3, 3, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20, .30])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
data_x = data[:, :, :, 0]
with pytest.raises(ValueError, match='data should be 4d. Got 3'):
tfr = EpochsTFR(info, data=data_x, times=times, freqs=freqs)
data_x = data[:, :-1, :, :]
with pytest.raises(ValueError, match="channels and data size don't"):
tfr = EpochsTFR(info, data=data_x, times=times, freqs=freqs)
times_x = times[:-1]
with pytest.raises(ValueError, match="times and data size don't match"):
tfr = EpochsTFR(info, data=data, times=times_x, freqs=freqs)
freqs_x = freqs[:-1]
with pytest.raises(ValueError, match="frequencies and data size don't"):
tfr = EpochsTFR(info, data=data, times=times_x, freqs=freqs_x)
del(tfr)
def test_plot():
"""Test TFR plotting."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
# test title=auto, combine=None, and correct length of figure list
picks = [1, 2]
figs = tfr.plot(picks, title='auto', colorbar=False,
mask=np.ones(tfr.data.shape[1:], bool))
assert len(figs) == len(picks)
assert 'MEG' in figs[0].texts[0].get_text()
plt.close('all')
# test combine and title keyword
figs = tfr.plot(picks, title='title', colorbar=False, combine='rms',
mask=np.ones(tfr.data.shape[1:], bool))
assert len(figs) == 1
assert figs[0].texts[0].get_text() == 'title'
figs = tfr.plot(picks, title='auto', colorbar=False, combine='mean',
mask=np.ones(tfr.data.shape[1:], bool))
assert len(figs) == 1
assert figs[0].texts[0].get_text() == 'Mean of 2 sensors'
with pytest.raises(ValueError, match='combine must be None'):
tfr.plot(picks, colorbar=False, combine='something',
mask=np.ones(tfr.data.shape[1:], bool))
plt.close('all')
# test axes argument - first with list of axes
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
figs = tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
assert len(figs) == len([ax, ax2, ax3])
# and as a single axes
figs = tfr.plot(picks=[0], axes=ax)
assert len(figs) == 1
plt.close('all')
# and invalid inputs
with pytest.raises(ValueError, match='axes must be None'):
tfr.plot(picks, colorbar=False, axes={},
mask=np.ones(tfr.data.shape[1:], bool))
# different number of axes and picks should throw a RuntimeError
with pytest.raises(RuntimeError, match='There must be an axes'):
tfr.plot(picks=[0], colorbar=False, axes=[ax, ax2],
mask=np.ones(tfr.data.shape[1:], bool))
tfr.plot_topo(picks=[1, 2])
plt.close('all')
# interactive mode on by default
fig = tfr.plot(picks=[1], cmap='RdBu_r')[0]
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('pagedown')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_plot_joint():
"""Test TFR joint plotting."""
raw = read_raw_fif(raw_fname)
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
topomap_args = {'res': 8, 'contours': 0, 'sensors': False}
for combine in ('mean', 'rms'):
with catch_logging() as log:
tfr.plot_joint(title='auto', colorbar=True,
combine=combine, topomap_args=topomap_args,
verbose='debug')
plt.close('all')
log = log.getvalue()
assert 'Plotting topomap for grad data' in log
# check various timefreqs
for timefreqs in (
{(tfr.times[0], tfr.freqs[1]): (0.1, 0.5),
(tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)},
[(tfr.times[1], tfr.freqs[1])]):
tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args)
plt.close('all')
# test bad timefreqs
timefreqs = ([(-100, 1)], tfr.times[1], [1],
[(tfr.times[1], tfr.freqs[1], tfr.freqs[1])])
for these_timefreqs in timefreqs:
pytest.raises(ValueError, tfr.plot_joint, these_timefreqs)
# test that the object is not internally modified
tfr_orig = tfr.copy()
tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]],
topomap_args=topomap_args)
plt.close('all')
assert_array_equal(tfr.data, tfr_orig.data)
assert set(tfr.ch_names) == set(tfr_orig.ch_names)
assert set(tfr.times) == set(tfr_orig.times)
# test tfr with picked channels
tfr.pick_channels(tfr.ch_names[:-1])
tfr.plot_joint(title='auto', colorbar=True, topomap_args=topomap_args)
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
have_all = all(ch in tfr_new.ch_names
for ch in tfr.ch_names if ch != 'STIM 001')
assert have_all
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])
pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for func, use_fft, zero_mean, output in product(
(tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (func == tfr_array_multitaper) and (output == 'phase'):
pytest.raises(NotImplementedError, func, data, sfreq=sfreq,
freqs=freqs, output=output)
continue
# Check runs
out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft,
zero_mean=zero_mean, n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex128, out.dtype)
else:
assert_equal(np.float64, out.dtype)
assert (np.all(np.isfinite(out)))
# Check errors params
for _data in (None, 'foo', data[0]):
pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
with pytest.raises(ValueError, match='above Nyquist'):
_compute_tfr(data, [sfreq], sfreq)
# No time_bandwidth param in morlet
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert np.sum(out >= 1) == 0
assert np.sum(out <= 0) == 0
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
output='avg_power', n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
@pytest.mark.parametrize('method', ('multitaper', 'morlet'))
@pytest.mark.parametrize('decim', (1, slice(1, None, 2), 3))
def test_compute_tfr_correct(method, decim):
"""Test that TFR actually gets us our freq back."""
sfreq = 1000.
t = np.arange(1000) / sfreq
f = 50.
data = np.sin(2 * np.pi * 50. * t)
data *= np.hanning(data.size)
data = data[np.newaxis, np.newaxis]
freqs = np.arange(10, 111, 10)
assert f in freqs
tfr = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2)[0, 0]
assert freqs[np.argmax(np.abs(tfr).mean(-1))] == f
def test_averaging_epochsTFR():
"""Test that EpochsTFR averaging methods work."""
# Setup for reading the raw data
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
raw = read_raw_fif(raw_fname)
# only pick a few events for speed
events = read_events(event_fname)[:4]
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
# Obtain EpochsTFR
power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
average=False, use_fft=True,
return_itc=False)
# Test average methods
for func, method in zip(
[np.mean, np.median, np.mean],
['mean', 'median', lambda x: np.mean(x, axis=0)]):
avgpower = power.average(method=method)
np.testing.assert_array_equal(func(power.data, axis=0),
avgpower.data)
with pytest.raises(RuntimeError, match='You passed a function that '
'resulted in data'):
power.average(method=np.mean)
@requires_pandas
def test_getitem_epochsTFR():
"""Test GetEpochsMixin in the context of EpochsTFR."""
from pandas import DataFrame
# Setup for reading the raw data and select a few trials
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
# create fake data, test with and without dropping epochs
for n_drop_epochs in [0, 2]:
n_events = 12
# create fake metadata
rng = np.random.RandomState(42)
rt = rng.uniform(size=(n_events,))
trialtypes = np.array(['face', 'place'])
trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)]
meta = DataFrame(dict(RT=rt, Trial=trial))
event_id = dict(a=1, b=2, c=3, d=4)
epochs = Epochs(raw, events[:n_events], event_id=event_id,
metadata=meta, decim=1)
epochs.drop(np.arange(n_drop_epochs))
n_events -= n_drop_epochs
freqs = np.arange(12., 17., 2.) # define frequencies of interest
n_cycles = freqs / 2. # 0.5 second time windows for all frequencies
# Choose time x (full) bandwidth product
time_bandwidth = 4.0
# With 0.5 s time windows, this gives 8 Hz smoothing
kwargs = dict(freqs=freqs, n_cycles=n_cycles, use_fft=True,
time_bandwidth=time_bandwidth, return_itc=False,
average=False, n_jobs=1)
power = tfr_multitaper(epochs, **kwargs)
# Check that power and epochs metadata is the same
assert_metadata_equal(epochs.metadata, power.metadata)
assert_metadata_equal(epochs[::2].metadata, power[::2].metadata)
assert_metadata_equal(epochs['RT < .5'].metadata,
power['RT < .5'].metadata)
assert_array_equal(epochs.selection, power.selection)
assert epochs.drop_log == power.drop_log
# Check that get power is functioning
assert_array_equal(power[3:6].data, power.data[3:6])
assert_array_equal(power[3:6].events, power.events[3:6])
assert_array_equal(epochs.selection[3:6], power.selection[3:6])
indx_check = (power.metadata['Trial'] == 'face')
try:
indx_check = indx_check.to_numpy()
except Exception:
pass # older Pandas
indx_check = indx_check.nonzero()
assert_array_equal(power['Trial == "face"'].events,
power.events[indx_check])
assert_array_equal(power['Trial == "face"'].data,
power.data[indx_check])
# Check that the wrong Key generates a Key Error for Metadata search
with pytest.raises(KeyError):
power['Trialz == "place"']
# Test length function
assert len(power) == n_events
assert len(power[3:6]) == 3
# Test iteration function
for ind, power_ep in enumerate(power):
assert_array_equal(power_ep, power.data[ind])
if ind == 5:
break
# Test that current state is maintained
assert_array_equal(power.next(), power.data[ind + 1])
# Check decim affects sfreq
power_decim = tfr_multitaper(epochs, decim=2, **kwargs)
assert power.info['sfreq'] / 2. == power_decim.info['sfreq']
@requires_pandas
def test_to_data_frame():
"""Test EpochsTFR Pandas exporter."""
# Create fake EpochsTFR data:
n_epos = 3
ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']
n_picks = len(ch_names)
ch_types = ['eeg'] * n_picks
n_freqs = 5
n_times = 6
data = np.random.rand(n_epos, n_picks, n_freqs, n_times)
times = np.arange(6)
srate = 1000.
freqs = np.arange(5)
events = np.zeros((n_epos, 3), dtype=int)
events[:, 0] = np.arange(n_epos)
events[:, 2] = np.arange(5, 5 + n_epos)
event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])}
info = mne.create_info(ch_names, srate, ch_types)
tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs,
events=events, event_id=event_id)
# test index checking
with pytest.raises(ValueError, match='options. Valid index options are'):
tfr.to_data_frame(index=['foo', 'bar'])
with pytest.raises(ValueError, match='"qux" is not a valid option'):
tfr.to_data_frame(index='qux')
with pytest.raises(TypeError, match='index must be `None` or a string '):
tfr.to_data_frame(index=np.arange(400))
# test wide format
df_wide = tfr.to_data_frame()
assert all(np.in1d(tfr.ch_names, df_wide.columns))
assert all(np.in1d(['time', 'condition', 'freq', 'epoch'],
df_wide.columns))
# test long format
df_long = tfr.to_data_frame(long_format=True)
expected = ('condition', 'epoch', 'freq', 'time', 'channel', 'ch_type',
'value')
assert set(expected) == set(df_long.columns)
assert set(tfr.ch_names) == set(df_long['channel'])
assert(len(df_long) == tfr.data.size)
# test long format w/ index
df_long = tfr.to_data_frame(long_format=True, index=['freq'])
del df_wide, df_long
# test whether data is in correct shape
df = tfr.to_data_frame(index=['condition', 'epoch', 'freq', 'time'])
data = tfr.data
assert_array_equal(df.values[:, 0],
data[:, 0, :, :].reshape(1, -1).squeeze())
# compare arbitrary observation:
assert df.loc[('he', slice(None), freqs[1], times[2] * srate),
ch_names[3]].iloc[0] == data[1, 3, 1, 2]
# Check also for AverageTFR:
tfr = tfr.average()
with pytest.raises(ValueError, match='options. Valid index options are'):
tfr.to_data_frame(index=['epoch', 'condition'])
with pytest.raises(ValueError, match='"epoch" is not a valid option'):
tfr.to_data_frame(index='epoch')
with pytest.raises(TypeError, match='index must be `None` or a string '):
tfr.to_data_frame(index=np.arange(400))
# test wide format
df_wide = tfr.to_data_frame()
assert all(np.in1d(tfr.ch_names, df_wide.columns))
assert all(np.in1d(['time', 'freq'], df_wide.columns))
# test long format
df_long = tfr.to_data_frame(long_format=True)
expected = ('freq', 'time', 'channel', 'ch_type', 'value')
assert set(expected) == set(df_long.columns)
assert set(tfr.ch_names) == set(df_long['channel'])
assert(len(df_long) == tfr.data.size)
# test long format w/ index
df_long = tfr.to_data_frame(long_format=True, index=['freq'])
del df_wide, df_long
# test whether data is in correct shape
df = tfr.to_data_frame(index=['freq', 'time'])
data = tfr.data
assert_array_equal(df.values[:, 0],
data[0, :, :].reshape(1, -1).squeeze())
# compare arbitrary observation:
assert df.loc[(freqs[1], times[2] * srate), ch_names[3]] == \
data[3, 1, 2]
@requires_pandas
@pytest.mark.parametrize('index', ('time', ['condition', 'time', 'freq'],
['freq', 'time'], ['time', 'freq'], None))
def test_to_data_frame_index(index):
"""Test index creation in epochs Pandas exporter."""
# Create fake EpochsTFR data:
n_epos = 3
ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']
n_picks = len(ch_names)
ch_types = ['eeg'] * n_picks
n_freqs = 5
n_times = 6
data = np.random.rand(n_epos, n_picks, n_freqs, n_times)
times = np.arange(6)
freqs = np.arange(5)
events = np.zeros((n_epos, 3), dtype=int)
events[:, 0] = np.arange(n_epos)
events[:, 2] = np.arange(5, 8)
event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])}
info = mne.create_info(ch_names, 1000., ch_types)
tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs,
events=events, event_id=event_id)
df = tfr.to_data_frame(picks=[0, 2, 3], index=index)
# test index order/hierarchy preservation
if not isinstance(index, list):
index = [index]
assert (df.index.names == index)
# test that non-indexed data were present as columns
non_index = list(set(['condition', 'time', 'freq', 'epoch']) - set(index))
if len(non_index):
assert all(np.in1d(non_index, df.columns))
@requires_pandas
@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta'))
def test_to_data_frame_time_format(time_format):
"""Test time conversion in epochs Pandas exporter."""
from pandas import Timedelta
n_epos = 3
ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']
n_picks = len(ch_names)
ch_types = ['eeg'] * n_picks
n_freqs = 5
n_times = 6
data = np.random.rand(n_epos, n_picks, n_freqs, n_times)
times = np.arange(6)
freqs = np.arange(5)
events = np.zeros((n_epos, 3), dtype=int)
events[:, 0] = np.arange(n_epos)
events[:, 2] = np.arange(5, 8)
event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])}
info = mne.create_info(ch_names, 1000., ch_types)
tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs,
events=events, event_id=event_id)
# test time_format
df = tfr.to_data_frame(time_format=time_format)
dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta}
assert isinstance(df['time'].iloc[0], dtypes[time_format])
|
|
"""
Calculates mold growth indication from temperature and humidity.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mold_indicator/
"""
import logging
import math
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.util as util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_CRITICAL_TEMP = 'Est. Crit. Temp'
ATTR_DEWPOINT = 'Dewpoint'
CONF_CALIBRATION_FACTOR = 'calibration_factor'
CONF_INDOOR_HUMIDITY = 'indoor_humidity_sensor'
CONF_INDOOR_TEMP = 'indoor_temp_sensor'
CONF_OUTDOOR_TEMP = 'outdoor_temp_sensor'
DEFAULT_NAME = 'Mold Indicator'
MAGNUS_K2 = 17.62
MAGNUS_K3 = 243.12
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_INDOOR_TEMP): cv.entity_id,
vol.Required(CONF_OUTDOOR_TEMP): cv.entity_id,
vol.Required(CONF_INDOOR_HUMIDITY): cv.entity_id,
vol.Optional(CONF_CALIBRATION_FACTOR): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup MoldIndicator sensor."""
name = config.get(CONF_NAME, DEFAULT_NAME)
indoor_temp_sensor = config.get(CONF_INDOOR_TEMP)
outdoor_temp_sensor = config.get(CONF_OUTDOOR_TEMP)
indoor_humidity_sensor = config.get(CONF_INDOOR_HUMIDITY)
calib_factor = config.get(CONF_CALIBRATION_FACTOR)
add_devices([MoldIndicator(
hass, name, indoor_temp_sensor, outdoor_temp_sensor,
indoor_humidity_sensor, calib_factor)])
class MoldIndicator(Entity):
"""Represents a MoldIndication sensor."""
def __init__(self, hass, name, indoor_temp_sensor, outdoor_temp_sensor,
indoor_humidity_sensor, calib_factor):
"""Initialize the sensor."""
self._state = None
self._name = name
self._indoor_temp_sensor = indoor_temp_sensor
self._indoor_humidity_sensor = indoor_humidity_sensor
self._outdoor_temp_sensor = outdoor_temp_sensor
self._calib_factor = calib_factor
self._is_metric = hass.config.units.is_metric
self._dewpoint = None
self._indoor_temp = None
self._outdoor_temp = None
self._indoor_hum = None
self._crit_temp = None
track_state_change(hass, indoor_temp_sensor, self._sensor_changed)
track_state_change(hass, outdoor_temp_sensor, self._sensor_changed)
track_state_change(hass, indoor_humidity_sensor, self._sensor_changed)
# Read initial state
indoor_temp = hass.states.get(indoor_temp_sensor)
outdoor_temp = hass.states.get(outdoor_temp_sensor)
indoor_hum = hass.states.get(indoor_humidity_sensor)
if indoor_temp:
self._indoor_temp = MoldIndicator._update_temp_sensor(indoor_temp)
if outdoor_temp:
self._outdoor_temp = MoldIndicator._update_temp_sensor(
outdoor_temp)
if indoor_hum:
self._indoor_hum = MoldIndicator._update_hum_sensor(indoor_hum)
self.update()
@staticmethod
def _update_temp_sensor(state):
"""Parse temperature sensor value."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = util.convert(state.state, float)
if temp is None:
_LOGGER.error('Unable to parse sensor temperature: %s',
state.state)
return None
# convert to celsius if necessary
if unit == TEMP_FAHRENHEIT:
return util.temperature.fahrenheit_to_celsius(temp)
elif unit == TEMP_CELSIUS:
return temp
else:
_LOGGER.error("Temp sensor has unsupported unit: %s"
" (allowed: %s, %s)",
unit, TEMP_CELSIUS, TEMP_FAHRENHEIT)
return None
@staticmethod
def _update_hum_sensor(state):
"""Parse humidity sensor value."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
hum = util.convert(state.state, float)
if hum is None:
_LOGGER.error('Unable to parse sensor humidity: %s',
state.state)
return None
if unit != '%':
_LOGGER.error("Humidity sensor has unsupported unit: %s %s",
unit, " (allowed: %)")
if hum > 100 or hum < 0:
_LOGGER.error("Humidity sensor out of range: %s %s", hum,
" (allowed: 0-100%)")
return hum
def update(self):
"""Calculate latest state."""
# check all sensors
if None in (self._indoor_temp, self._indoor_hum, self._outdoor_temp):
return
# re-calculate dewpoint and mold indicator
self._calc_dewpoint()
self._calc_moldindicator()
def _sensor_changed(self, entity_id, old_state, new_state):
"""Called when sensor values change."""
if new_state is None:
return
if entity_id == self._indoor_temp_sensor:
self._indoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity_id == self._outdoor_temp_sensor:
self._outdoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity_id == self._indoor_humidity_sensor:
self._indoor_hum = MoldIndicator._update_hum_sensor(new_state)
self.update()
self.update_ha_state()
def _calc_dewpoint(self):
"""Calculate the dewpoint for the indoor air."""
# use magnus approximation to calculate the dew point
alpha = MAGNUS_K2 * self._indoor_temp / (MAGNUS_K3 + self._indoor_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._indoor_temp)
if self._indoor_hum == 0:
self._dewpoint = -50 # not defined, assume very low value
else:
self._dewpoint = \
MAGNUS_K3 * (alpha + math.log(self._indoor_hum / 100.0)) / \
(beta - math.log(self._indoor_hum / 100.0))
_LOGGER.debug("Dewpoint: %f " + TEMP_CELSIUS, self._dewpoint)
def _calc_moldindicator(self):
"""Calculate the humidity at the (cold) calibration point."""
if None in (self._dewpoint, self._calib_factor) or \
self._calib_factor == 0:
_LOGGER.debug("Invalid inputs - dewpoint: %s,"
" calibration-factor: %s",
self._dewpoint, self._calib_factor)
self._state = None
return
# first calculate the approximate temperature at the calibration point
self._crit_temp = \
self._outdoor_temp + (self._indoor_temp - self._outdoor_temp) / \
self._calib_factor
_LOGGER.debug("Estimated Critical Temperature: %f " +
TEMP_CELSIUS, self._crit_temp)
# Then calculate the humidity at this point
alpha = MAGNUS_K2 * self._crit_temp / (MAGNUS_K3 + self._crit_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._crit_temp)
crit_humidity = \
math.exp(
(self._dewpoint * beta - MAGNUS_K3 * alpha) /
(self._dewpoint + MAGNUS_K3)) * 100.0
# check bounds and format
if crit_humidity > 100:
self._state = '100'
elif crit_humidity < 0:
self._state = '0'
else:
self._state = '{0:d}'.format(int(crit_humidity))
_LOGGER.debug('Mold indicator humidity: %s ', self._state)
@property
def should_poll(self):
"""Polling needed."""
return False
@property
def name(self):
"""Return the name."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return '%'
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
if self._is_metric:
return {
ATTR_DEWPOINT: self._dewpoint,
ATTR_CRITICAL_TEMP: self._crit_temp,
}
else:
return {
ATTR_DEWPOINT:
util.temperature.celsius_to_fahrenheit(self._dewpoint),
ATTR_CRITICAL_TEMP:
util.temperature.celsius_to_fahrenheit(self._crit_temp),
}
|
|
# =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All right reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Rainer Gericke
# =============================================================================
#
# Demo program for UAZBUS simulation.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
# All units SI.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as irr
import pychrono.vehicle as veh
import math
import os
# =============================================================================
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location and orientation
initLoc = chrono.ChVectorD(0, 0, 0.4)
initRot = chrono.ChQuaternionD(1, 0, 0, 0)
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_MESH
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Type of tire model (RIGID, TMEASY, PAC02)
tire_model = veh.TireModelType_PAC02
# Poon chassis tracked by the camera
trackPoint = chrono.ChVectorD(0.0, 0.0, 1.75)
# Simulation step sizes
step_size = 1e-3
tire_step_size = step_size
# Simulation end time
tend = 15
# Time interval between two render frames
render_step_size = 1.0 / 50 # FPS = 50
# Output directories
out_dir = "./UAZBUS"
# =============================================================================
print( "Copyright (c) 2017 projectchrono.org\n")
# --------------
# Create systems
# --------------
# Create the vehicle, set parameters, and initialize
uaz = veh.UAZBUS()
uaz.SetContactMethod(chrono.ChContactMethod_NSC)
uaz.SetChassisFixed(False)
uaz.SetInitPosition(chrono.ChCoordsysD(initLoc, initRot))
uaz.SetTireType(tire_model)
uaz.SetTireStepSize(tire_step_size)
uaz.SetInitFwdVel(0.0)
uaz.Initialize()
uaz.SetChassisVisualizationType(chassis_vis_type)
uaz.SetSuspensionVisualizationType(suspension_vis_type)
uaz.SetSteeringVisualizationType(steering_vis_type)
uaz.SetWheelVisualizationType(wheel_vis_type)
uaz.SetTireVisualizationType(tire_vis_type)
suspF = veh.CastToChToeBarLeafspringAxle(uaz.GetVehicle().GetSuspension(0))
leftAngle = suspF.GetKingpinAngleLeft()
rightAngle = suspF.GetKingpinAngleRight()
springFL = suspF.GetSpring(veh.LEFT)
shockFL = suspF.GetShock(veh.RIGHT)
print( "Spring rest length front: " + str(springFL.GetRestLength() ) + "\n")
print( "Shock rest length front: " + str(shockFL.GetRestLength() ) + "\n")
suspR = veh.CastToChLeafspringAxle(uaz.GetVehicle().GetSuspension(1))
springRL = suspR.GetSpring(veh.LEFT)
shockRL = suspR.GetShock(veh.RIGHT)
print( "Spring rest length rear: " + str(springRL.GetRestLength() ) + "\n" )
print( "Shock rest length rear: " + str(shockRL.GetRestLength() ) + "\n" )
print("Vehicle mass: " + str( uaz.GetVehicle().GetVehicleMass() ) + "\n")
print("Vehicle mass (with tires): " + str(uaz.GetTotalMass() ) + "\n")
# ------------------
# Create the terrain
# ------------------
terrain = veh.RigidTerrain(uaz.GetSystem())
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch = terrain.AddPatch(patch_mat,
chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1),
600, 600)
patch.SetColor(chrono.ChColor(0.8, 0.8, 1.0))
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 1200, 1200)
terrain.Initialize()
# -------------------------------------
# Create the vehicle Irrlicht interface
# Create the driver system
# -------------------------------------
app = veh.ChWheeledVehicleIrrApp(uaz.GetVehicle(), 'UAZBUS')
app.AddTypicalLights()
app.SetChaseCamera(trackPoint, 6.0, 0.5)
app.SetTimestep(step_size)
app.AssetBindAll()
app.AssetUpdateAll()
# Create the interactive driver system
driver = veh.ChIrrGuiDriver(app)
# Set the time response for steering and throttle keyboard inputs.
steering_time = 1.0 # time to go from 0 to +1 (or from 0 to -1)
throttle_time = 1.0 # time to go from 0 to +1
braking_time = 0.3 # time to go from 0 to +1
driver.SetSteeringDelta(render_step_size / steering_time)
driver.SetThrottleDelta(render_step_size / throttle_time)
driver.SetBrakingDelta(render_step_size / braking_time)
driver.Initialize()
# -----------------
# Initialize output
# -----------------
#if not os.path.isdir(out_dir):
# os.makedirs(out_dir)
#assert os.path.isdir(out_dir), "Error creating directory "
# ---------------
# Simulation loop
# ---------------
render_steps = math.ceil(render_step_size / step_size)
step_number = 0
render_frame = 0
maxKingpinAngle = 0.0
realtime_timer = chrono.ChRealtimeStepTimer()
while (app.GetDevice().run()) :
time = uaz.GetSystem().GetChTime()
# Render scene
if (step_number % render_steps == 0) :
app.BeginScene(True, True, irr.SColor(255, 140, 161, 192))
app.DrawAll()
app.EndScene()
# Collect output data from modules (for inter-module communication)
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
uaz.Synchronize(time, driver_inputs, terrain)
app.Synchronize(driver.GetInputModeAsString(), driver_inputs)
# Test for validity of kingpin angles (max.allowed by UAZ: 27deg)
suspF = veh.CastToChToeBarLeafspringAxle(uaz.GetVehicle().GetSuspension(0))
leftAngle = suspF.GetKingpinAngleLeft() * 180.0 / chrono.CH_C_PI
rightAngle = suspF.GetKingpinAngleRight() * 180.0 / chrono.CH_C_PI
if abs(leftAngle) > maxKingpinAngle :
maxKingpinAngle = abs(leftAngle)
if abs(rightAngle) > maxKingpinAngle :
maxKingpinAngle = abs(rightAngle)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
uaz.Advance(step_size)
app.Advance(step_size)
# Increment frame number
step_number += 1
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
print( "Maximum Kingpin Angle = " + str(maxKingpinAngle ) + " deg \n" )
|
|
"""
Classes and subroutines dealing with network connections and related topics.
"""
from __future__ import with_statement
from functools import wraps
import getpass
import re
import threading
import select
import socket
import sys
from fabric.auth import get_password, set_password
from fabric.utils import abort, handle_prompt_abort
try:
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import ssh
except ImportError, e:
import traceback
traceback.print_exc()
print >> sys.stderr, """
There was a problem importing our SSH library (see traceback above).
Please make sure all dependencies are installed and importable.
""".rstrip()
sys.exit(1)
host_pattern = r'((?P<user>.+)@)?(?P<host>[^:]+)(:(?P<port>\d+))?'
host_regex = re.compile(host_pattern)
class HostConnectionCache(dict):
"""
Dict subclass allowing for caching of host connections/clients.
This subclass does not offer any extra methods, but will intelligently
create new client connections when keys are requested, or return previously
created connections instead.
Key values are the same as host specifiers throughout Fabric: optional
username + ``@``, mandatory hostname, optional ``:`` + port number.
Examples:
* ``example.com`` - typical Internet host address.
* ``firewall`` - atypical, but still legal, local host address.
* ``[email protected]`` - with specific username attached.
* ``[email protected]:222`` - with specific nonstandard port attached.
When the username is not given, ``env.user`` is used. ``env.user``
defaults to the currently running user at startup but may be overwritten by
user code or by specifying a command-line flag.
Note that differing explicit usernames for the same hostname will result in
multiple client connections being made. For example, specifying
``[email protected]`` will create a connection to ``example.com``, logged
in as ``user1``; later specifying ``[email protected]`` will create a new,
2nd connection as ``user2``.
The same applies to ports: specifying two different ports will result in
two different connections to the same host being made. If no port is given,
22 is assumed, so ``example.com`` is equivalent to ``example.com:22``.
"""
def __getitem__(self, key):
# Normalize given key (i.e. obtain username and port, if not given)
user, host, port = normalize(key)
# Recombine for use as a key.
real_key = join_host_strings(user, host, port)
# If not found, create new connection and store it
if real_key not in self:
self[real_key] = connect(user, host, port)
# Return the value either way
return dict.__getitem__(self, real_key)
def __setitem__(self, key, value):
return dict.__setitem__(self, normalize_to_string(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, normalize_to_string(key))
def __contains__(self, key):
return dict.__contains__(self, normalize_to_string(key))
def normalize(host_string, omit_port=False):
"""
Normalizes a given host string, returning explicit host, user, port.
If ``omit_port`` is given and is True, only the host and user are returned.
"""
from fabric.state import env
# Gracefully handle "empty" input by returning empty output
if not host_string:
return ('', '') if omit_port else ('', '', '')
# Get user, host and port separately
r = host_regex.match(host_string).groupdict()
# Add any necessary defaults in
user = r['user'] or env.get('user')
host = r['host']
port = r['port'] or '22'
if omit_port:
return user, host
return user, host, port
def to_dict(host_string):
user, host, port = normalize(host_string)
return {
'user': user, 'host': host, 'port': port, 'host_string': host_string
}
def from_dict(arg):
return join_host_strings(arg['user'], arg['host'], arg['port'])
def denormalize(host_string):
"""
Strips out default values for the given host string.
If the user part is the default user, it is removed;
if the port is port 22, it also is removed.
"""
from state import env
r = host_regex.match(host_string).groupdict()
user = ''
if r['user'] is not None and r['user'] != env.user:
user = r['user'] + '@'
port = ''
if r['port'] is not None and r['port'] != '22':
port = ':' + r['port']
return user + r['host'] + port
def join_host_strings(user, host, port=None):
"""
Turns user/host/port strings into ``user@host:port`` combined string.
This function is not responsible for handling missing user/port strings;
for that, see the ``normalize`` function.
If ``port`` is omitted, the returned string will be of the form
``user@host``.
"""
port_string = ''
if port:
port_string = ":%s" % port
return "%s@%s%s" % (user, host, port_string)
def normalize_to_string(host_string):
"""
normalize() returns a tuple; this returns another valid host string.
"""
return join_host_strings(*normalize(host_string))
def connect(user, host, port):
"""
Create and return a new SSHClient instance connected to given host.
"""
from state import env
#
# Initialization
#
# Init client
client = ssh.SSHClient()
# Load known host keys (e.g. ~/.ssh/known_hosts) unless user says not to.
if not env.disable_known_hosts:
client.load_system_host_keys()
# Unless user specified not to, accept/add new, unknown host keys
if not env.reject_unknown_hosts:
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
#
# Connection attempt loop
#
# Initialize loop variables
connected = False
password = get_password()
# Loop until successful connect (keep prompting for new password)
while not connected:
# Attempt connection
try:
client.connect(
hostname=host,
port=int(port),
username=user,
password=password,
key_filename=env.key_filename,
timeout=10,
allow_agent=not env.no_agent,
look_for_keys=not env.no_keys
)
connected = True
# set a keepalive if desired
if env.keepalive:
client.get_transport().set_keepalive(env.keepalive)
return client
# BadHostKeyException corresponds to key mismatch, i.e. what on the
# command line results in the big banner error about man-in-the-middle
# attacks.
except ssh.BadHostKeyException:
abort("Host key for %s did not match pre-existing key! Server's"
" key was changed recently, or possible man-in-the-middle"
"attack." % env.host)
# Prompt for new password to try on auth failure
except (
ssh.AuthenticationException,
ssh.PasswordRequiredException,
ssh.SSHException
), e:
# For whatever reason, empty password + no ssh key or agent results
# in an SSHException instead of an AuthenticationException. Since
# it's difficult to do otherwise, we must assume empty password +
# SSHException == auth exception. Conversely: if we get
# SSHException and there *was* a password -- it is probably
# something non auth related, and should be sent upwards.
if e.__class__ is ssh.SSHException and password:
abort(str(e))
# Otherwise, assume an auth exception, and prompt for new/better
# password.
# The 'ssh' library doesn't handle prompting for locked private
# keys (i.e. keys with a passphrase and not loaded into an agent)
# so we have to detect this and tweak our prompt slightly.
# (Otherwise, however, the logic flow is the same, because
# ssh's connect() method overrides the password argument to be
# either the login password OR the private key passphrase. Meh.)
#
# NOTE: This will come up if you normally use a
# passphrase-protected private key with ssh-agent, and enter an
# incorrect remote username, because ssh.connect:
# * Tries the agent first, which will fail as you gave the wrong
# username, so obviously any loaded keys aren't gonna work for a
# nonexistent remote account;
# * Then tries the on-disk key file, which is passphrased;
# * Realizes there's no password to try unlocking that key with,
# because you didn't enter a password, because you're using
# ssh-agent;
# * In this condition (trying a key file, password is None)
# ssh raises PasswordRequiredException.
text = None
if e.__class__ is ssh.PasswordRequiredException:
# NOTE: we can't easily say WHICH key's passphrase is needed,
# because ssh doesn't provide us with that info, and
# env.key_filename may be a list of keys, so we can't know
# which one raised the exception. Best not to try.
prompt = "[%s] Passphrase for private key"
text = prompt % env.host_string
password = prompt_for_password(text)
# Update env.password, env.passwords if empty
set_password(password)
# Ctrl-D / Ctrl-C for exit
except (EOFError, TypeError):
# Print a newline (in case user was sitting at prompt)
print('')
sys.exit(0)
# Handle timeouts
except socket.timeout:
abort('Timed out trying to connect to %s' % host)
# Handle DNS error / name lookup failure
except socket.gaierror:
abort('Name lookup failed for %s' % host)
# Handle generic network-related errors
# NOTE: In 2.6, socket.error subclasses IOError
except socket.error, e:
abort('Low level socket error connecting to host %s: %s' % (
host, e[1])
)
def prompt_for_password(prompt=None, no_colon=False, stream=None):
"""
Prompts for and returns a new password if required; otherwise, returns
None.
A trailing colon is appended unless ``no_colon`` is True.
If the user supplies an empty password, the user will be re-prompted until
they enter a non-empty password.
``prompt_for_password`` autogenerates the user prompt based on the current
host being connected to. To override this, specify a string value for
``prompt``.
``stream`` is the stream the prompt will be printed to; if not given,
defaults to ``sys.stderr``.
"""
from fabric.state import env
handle_prompt_abort("a connection or sudo password")
stream = stream or sys.stderr
# Construct prompt
default = "[%s] Login password" % env.host_string
password_prompt = prompt if (prompt is not None) else default
if not no_colon:
password_prompt += ": "
# Get new password value
new_password = getpass.getpass(password_prompt, stream)
# Otherwise, loop until user gives us a non-empty password (to prevent
# returning the empty string, and to avoid unnecessary network overhead.)
while not new_password:
print("Sorry, you can't enter an empty password. Please try again.")
new_password = getpass.getpass(password_prompt, stream)
return new_password
def needs_host(func):
"""
Prompt user for value of ``env.host_string`` when ``env.host_string`` is
empty.
This decorator is basically a safety net for silly users who forgot to
specify the host/host list in one way or another. It should be used to wrap
operations which require a network connection.
Due to how we execute commands per-host in ``main()``, it's not possible to
specify multiple hosts at this point in time, so only a single host will be
prompted for.
Because this decorator sets ``env.host_string``, it will prompt once (and
only once) per command. As ``main()`` clears ``env.host_string`` between
commands, this decorator will also end up prompting the user once per
command (in the case where multiple commands have no hosts set, of course.)
"""
from fabric.state import env
@wraps(func)
def host_prompting_wrapper(*args, **kwargs):
while not env.get('host_string', False):
handle_prompt_abort("the target host connection string")
host_string = raw_input("No hosts found. Please specify (single)"
" host string for connection: ")
env.update(to_dict(host_string))
return func(*args, **kwargs)
return host_prompting_wrapper
def disconnect_all():
"""
Disconnect from all currently connected servers.
Used at the end of ``fab``'s main loop, and also intended for use by
library users.
"""
from fabric.state import connections, output
# Explicitly disconnect from all servers
for key in connections.keys():
if output.status:
print "Disconnecting from %s..." % denormalize(key),
connections[key].close()
del connections[key]
if output.status:
print "done."
|
|
import numpy as np
import pandas as pd
import CoolProp as CP
import matplotlib.pyplot as plt
import warnings
from copy import copy
from scipy.optimize import newton
from prf.state import *
__all__ = ['Point', 'Curve', 'convert_to_base_units']
class Point:
"""Point.
A point in the compressor map that can be defined in different ways.
Parameters
----------
speed : float
Speed in 1/s.
flow_v or flow_m : float
Volumetric or mass flow.
suc, disch : prf.State, prf.State
Suction and discharge states for the point.
suc, head, eff : prf.State, float, float
Suction state, polytropic head and polytropic efficiency.
suc, head, power : prf.State, float, float
Suction state, polytropic head and gas power.
suc, eff, vol_ratio : prf.State, float, float
Suction state, polytropic efficiecy and volume ratio.
Returns
-------
Point : prf.Point
A point in the compressor map.
"""
@convert_to_base_units
def __init__(self, *args, **kwargs):
# TODO create dictionary with optional inputs
self.suc = kwargs.get('suc')
# dummy state used to avoid copying states
self._dummy_state = copy(self.suc)
try:
self.speed = kwargs['speed']
if 'flow_m' not in kwargs:
self.flow_v = kwargs['flow_v']
self.flow_m = self.flow_v * self.suc.rhomass()
else:
self.flow_m = kwargs['flow_m']
self.flow_v = self.flow_m / self.suc.rhomass()
except KeyError as err:
raise Exception('Argument not provided', err.args[0]) from err
self.disch = kwargs.get('disch')
self.head = kwargs.get('head')
self.eff = kwargs.get('eff')
self.power = kwargs.get('power')
self.volume_ratio = kwargs.get('volume_ratio')
if self.suc and self.disch is not None:
self.calc_from_suc_disch(self.suc, self.disch)
elif self.suc and self.head and self.eff is not None:
self.calc_from_suc_head_eff(self.suc, self.head, self.eff)
elif self.suc and self.head and self.power is not None:
self.calc_from_suc_head_power(self.suc, self.head, self.power)
elif self.suc and self.eff and self.volume_ratio is not None:
self.calc_from_suc_eff_vol_ratio(self.suc, self.eff, self.volume_ratio)
else:
raise KeyError('Argument not provided')
if self.volume_ratio is None:
self.volume_ratio = self.suc.rhomass() / self.disch.rhomass()
self.mach_comparison = kwargs.get('mach_comparison')
self.reynolds_comparison = kwargs.get('reynolds_comparison')
self.volume_ratio_comparison = kwargs.get('volume_ratio_comparison')
def __repr__(self):
return (
'\nPoint: '
+ '\n Volume flow: {:10.5} m^3 / s'.format(self.flow_v)
+ '\n Head : {:10.5} J / kg.K'.format(self.head)
+ '\n Efficiency : {:10.5} %'.format(100 * self.eff)
+ '\n Power : {:10.5} W'.format(self.power)
)
def calc_from_suc_disch(self, suc, disch):
self.head = self.head_pol_schultz()
self.eff = self.eff_pol_schultz()
self.power = self.power_calc()
def calc_from_suc_head_eff(self, suc, head, eff):
"""Point from suction, head and efficiency.
This function will construct a point given its suction, head and
efficiency. Discharge state is calculated by an iterative process
where the discharge pressure is initially defined based on an
isentropic compression. After defining the pressure, polytropic
head is calculated and compared with the given head. A new pressure
is defined and the process is repeated.
Parameters
----------
suc : state
Suction state.
head : float
Polytropic head.
eff : float
Polytropic efficiency.
Returns
-------
"""
# calculate discharge state from suction, head and efficiency
h_suc = suc.hmass()
h_disch = head/eff + h_suc
# first disch state will consider an isentropic compression
s_disch = suc.smass()
disch = State.define(fluid=suc.fluid_dict(), h=h_disch, s=s_disch)
if disch.not_defined():
raise ValueError(f'state not defined: {disch}')
def update_pressure(p):
disch.update(CP.HmassP_INPUTS, h_disch, p)
new_head = self.head_pol_schultz(suc, disch)
return new_head - head
# with newton we have a risk of falling outside a reasonable state
# region. #TODO evaluate pressure interval to use brent method
newton(update_pressure, disch.p(), tol=1e-4)
self.disch = disch
self.calc_from_suc_disch(suc, disch)
def calc_from_suc_head_power(self, suc, head, power):
# calculate efficiency
self.eff = self.flow_m * head / power
self.calc_from_suc_head_eff(suc, head, self.eff)
def calc_from_suc_eff_vol_ratio(self, suc, eff, volume_ratio):
# from volume ratio calculate discharge rhomass
d_disch = suc.rhomass() / volume_ratio
# first disch state will consider an isentropic compression
s_disch = suc.smass()
disch = State.define(fluid=suc.fluid_dict(), d=d_disch, s=s_disch)
def update_pressure(p):
disch.update(CP.DmassP_INPUTS, disch.rhomass(), p)
new_eff = self.eff_pol_schultz(suc=suc, disch=disch)
return new_eff - eff
newton(update_pressure, disch.p(), tol=1e-4)
self.disch = disch
self.calc_from_suc_disch(suc, disch)
def n_exp(self, suc=None, disch=None):
"""Polytropic exponent.
Calculates the polytropic exponent given a suction and a discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
n_exp : float
Polytropic exponent.
Examples
--------
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
ps = suc.p()
vs = 1 / suc.rhomass()
pd = disch.p()
vd = 1 / disch.rhomass()
return np.log(pd/ps)/np.log(vs/vd)
def head_pol(self, suc=None, disch=None):
"""Polytropic head.
Calculates the polytropic head given a suction and a discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
head_pol : float
Polytropic head.
Examples
--------
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
n = self.n_exp(suc, disch)
p2 = disch.p()
v2 = 1 / disch.rhomass()
p1 = suc.p()
v1 = 1 / suc.rhomass()
return (n/(n-1))*(p2*v2 - p1*v1)
def eff_pol(self, suc=None, disch=None):
"""Polytropic efficiency.
Calculates the polytropic efficiency given suction and discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
eff_pol : float
Polytropic head.
Examples
--------
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
wp = self.head_pol(suc, disch)
dh = disch.hmass() - suc.hmass()
return wp/dh
def head_isen(self, suc=None, disch=None):
"""Isentropic head.
Calculates the Isentropic head given a suction and a discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
head_isen : float
Isentropic head.
Examples
--------
>>> fluid ={'CarbonDioxide': 0.76064,
... 'R134a': 0.23581,
... 'Nitrogen': 0.00284,
... 'Oxygen': 0.00071}
>>> suc = State.define(fluid, 183900, 291.5)
>>> disch = State.define(fluid, 590200, 380.7)
>>> head_isen(suc, disch) # doctest: +ELLIPSIS
53166.296...
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
# define state to isentropic discharge using dummy state
disch_s = self._dummy_state
disch_s.update(CP.PSmass_INPUTS, disch.p(), suc.smass())
return self.head_pol(suc, disch_s)
def eff_isen(self, suc=None, disch=None):
"""Isentropic efficiency.
Calculates the Isentropic efficiency given a suction and a discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
ef_isen : float
Isentropic efficiency.
Examples
--------
>>> fluid ={'CarbonDioxide': 0.76064,
... 'R134a': 0.23581,
... 'Nitrogen': 0.00284,
... 'Oxygen': 0.00071}
>>> suc = State.define(fluid, 183900, 291.5)
>>> disch = State.define(fluid, 590200, 380.7)
>>> ef_isen(suc, disch) # doctest: +ELLIPSIS
0.684...
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
ws = self.head_isen(suc, disch)
dh = disch.hmass() - suc.hmass()
return ws/dh
def schultz_f(self, suc=None, disch=None):
"""Schultz factor.
Calculates the Schultz factor given a suction and discharge state.
This factor is used to correct the polytropic head as per PTC 10.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
ef_isen : float
Isentropic efficiency.
Examples
--------
>>> fluid ={'CarbonDioxide': 0.76064,
... 'R134a': 0.23581,
... 'Nitrogen': 0.00284,
... 'Oxygen': 0.00071}
>>> suc = State.define(fluid, 183900, 291.5)
>>> disch = State.define(fluid, 590200, 380.7)
>>> schultz_f(suc, disch) # doctest: +ELLIPSIS
1.001...
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
# define state to isentropic discharge using dummy state
disch_s = self._dummy_state
disch_s.update(CP.PSmass_INPUTS, disch.p(), suc.smass())
h2s_h1 = disch_s.hmass() - suc.hmass()
h_isen = self.head_isen(suc, disch)
return h2s_h1/h_isen
def head_pol_schultz(self, suc=None, disch=None):
"""Polytropic head corrected by the Schultz factor.
Calculates the polytropic head corrected by the Schultz factor
given a suction and a discharge state.
Parameters
----------
suc : State
Suction state.
disch : State
Discharge state.
Returns
-------
head_pol_schultz : float
Polytropic head corrected by the Schultz factor.
Examples
--------
>>> fluid ={'CarbonDioxide': 0.76064,
... 'R134a': 0.23581,
... 'Nitrogen': 0.00284,
... 'Oxygen': 0.00071}
>>> suc = State.define(fluid, 183900, 291.5)
>>> disch = State.define(fluid, 590200, 380.7)
>>> head_pol_schultz(suc, disch) # doctest: +ELLIPSIS
55377.434...
"""
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
f = self.schultz_f(suc, disch)
head = self.head_pol(suc, disch)
return f * head
def eff_pol_schultz(self, suc=None, disch=None):
if suc is None:
suc = self.suc
if disch is None:
disch = self.disch
wp = self.head_pol_schultz(suc, disch)
dh = disch.hmass() - suc.hmass()
return wp/dh
def power_calc(self, flow_m=None, head=None, eff=None):
"""Power.
Calculate the power consumption.
Parameters
----------
flow_m : float
Mass flow.
head : float
Head.
eff : float
Polytropic efficiency.
Returns
-------
power : float
"""
if flow_m is None:
flow_m = self.flow_m
if head is None:
head = self.head
if eff is None:
eff = self.eff
return flow_m * head / eff
# TODO add head Mallen
# TODO add head Huntington
# TODO add head reference
# TODO add power
class InterpolatedCurve(np.poly1d):
"""Auxiliary class to create interpolated curve.
This class inherit from np.poly1d, changing the polydegree to a
property to that its value can be changed after instantiation.
Plots are also added in this class.
"""
def __init__(self, x, y, deg=3, ylabel=None):
"""
Auxiliary function to create an interpolated curve
for each various points attributes.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points (x[i], y[i]).
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
Returns
-------
interpolated_curve : np.poly1d
Interpolated curve using np.poly1d function.
"""
# create a list for the attribute iterating on the points list
self.x = x
self.y = y
self._deg = deg
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.args = np.polyfit(self.x, self.y, self._deg)
self.ylabel = ylabel
super().__init__(self.args)
@property
def deg(self):
return self._deg
@deg.setter
def deg(self, value):
self._deg = value
self.__init__(self.x, self.y, self._deg, self.ylabel)
def plot(self, ax=None, plot_kws=None):
if ax is None:
ax = plt.gca()
if plot_kws is None:
plot_kws = dict()
flow = np.linspace(self.x[0], self.x[-1], 20)
ax.plot(flow, self(flow), **plot_kws)
ax.set_xlabel('Volumetric flow $(m^3 / s)$')
ax.set_ylabel(self.ylabel)
return ax
class Curve:
"""Curve.
A curve is a collection of points that share the same suction
state and the same speed.
Parameters
----------
points : list
List with the points
"""
def __init__(self, points):
# for one single point:
if not isinstance(points, list):
p0 = points
p1 = Point(suc=p0.suc, eff=p0.eff, volume_ratio=p0.volume_ratio,
speed=p0.speed, flow_m=p0.flow_m+1)
points = [p0, p1]
self.points = sorted(points, key=lambda point: point.flow_v)
# set each point as attribute
for i, p in enumerate(self.points):
setattr(self, 'point_' + str(i), p)
# get one point to extract attributes
self._point0 = self.points[0]
self.suc = self._point0.suc
self.speed = self._point0.speed
# attributes from each point
self.flow_v = [p.flow_v for p in self.points]
self.flow_m = [p.flow_m for p in self.points]
self.suc_p = [p.suc.p() for p in self.points]
self.suc_T = [p.suc.T() for p in self.points]
self.disch_p = [p.disch.p() for p in self.points]
self.disch_T = [p.disch.T() for p in self.points]
self.head = [p.head for p in self.points]
self.eff = [p.eff for p in self.points]
self.power = [p.power for p in self.points]
# interpolated curves
self.suc_p_curve = InterpolatedCurve(self.flow_v, self.suc_p,
ylabel='Pressure $(Pa)$')
self.suc_T_curve = InterpolatedCurve(self.flow_v, self.suc_T,
ylabel='Temperature $(K)$')
self.disch_p_curve = InterpolatedCurve(self.flow_v, self.disch_p,
ylabel='Pressure $(Pa)$')
self.disch_T_curve = InterpolatedCurve(self.flow_v, self.disch_T,
ylabel='Temperature $(K)$')
self.head_curve = InterpolatedCurve(self.flow_v, self.head,
ylabel='Head $(J / kg)$')
self.eff_curve = InterpolatedCurve(self.flow_v, self.eff,
ylabel='Efficiency')
self.power_curve = InterpolatedCurve(self.flow_v, self.power,
ylabel='Power $(W)$')
def __getitem__(self, item):
return self.points[item]
|
|
# Copyright (c) 2020 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Sequences of :class:`~nutils.points.Points`.'''
from . import types, numeric
from .points import Points
from typing import Tuple, Sequence, Iterable, Iterator, Optional, Union, overload
import abc, itertools, numpy
class PointsSequence(types.Singleton):
'''Abstract base class for a sequence of :class:`~nutils.points.Points`.
Parameters
----------
ndims : :class:`int`
The dimension of the point coordinates.
Attributes
----------
ndims : :class:`int`
The dimension of the point coordinates.
Notes
-----
Subclasses must implement :meth:`__len__` and :meth:`get`.
'''
__slots__ = 'ndims'
__cache__ = 'npoints', 'tri', 'hull'
@staticmethod
def from_iter(value: Iterable[Points], ndims: int) -> 'PointsSequence':
'''Create a :class:`PointsSequence` from an iterator.
Parameters
----------
value : iterable of :class:`~nutils.points.Points` objects
ndims : :class:`int`
Returns
-------
sequence : :class:`PointsSequence`
'''
value = tuple(value)
if not all(item.ndims == ndims for item in value):
raise ValueError('not all `Points` in the sequence have ndims equal to {}'.format(ndims))
if len(value) == 0:
return _Empty(ndims)
elif all(item == value[0] for item in value[1:]):
return _Uniform(value[0], len(value))
else:
return _Plain(value, ndims)
@staticmethod
def uniform(value: Points, length: int) -> 'PointsSequence':
'''Create a uniform :class:`PointsSequence`.
Parameters
----------
value : :class:`~nutils.points.Points`
length : :class:`int`
Returns
-------
sequence : :class:`PointsSequence`
'''
if length < 0:
raise ValueError('expected nonnegative `length` but got {}'.format(length))
elif length == 0:
return _Empty(value.ndims)
else:
return _Uniform(value, length)
@staticmethod
def empty(ndims: int) -> 'PointsSequence':
'''Create an empty :class:`PointsSequence`.
Parameters
----------
ndims : :class:`int`
Returns
-------
sequence : :class:`PointsSequence`
'''
if ndims < 0:
raise ValueError('expected nonnegative `ndims` but got {}'.format(ndims))
else:
return _Empty(ndims)
def __init__(self, ndims: int) -> None:
self.ndims = ndims
super().__init__()
@property
def npoints(self) -> int:
'''The total number of points in this sequence.'''
return sum(p.npoints for p in self)
def __bool__(self) -> bool:
'''Return ``bool(self)``.'''
return bool(len(self))
@abc.abstractmethod
def __len__(self) -> int:
'''Return ``len(self)``.'''
raise NotImplementedError
def __iter__(self) -> Iterator[Points]:
'''Implement ``iter(self)``.'''
return map(self.get, range(len(self)))
@overload
def __getitem__(self, index: int) -> Points:
...
@overload
def __getitem__(self, index: Union[slice, numpy.ndarray]) -> 'PointsSequence':
...
def __getitem__(self, index):
'''Return ``self[index]``.'''
if numeric.isint(index):
return self.get(index)
elif isinstance(index, slice):
index = range(len(self))[index]
if index == range(len(self)):
return self
return self.take(numpy.arange(index.start, index.stop, index.step))
elif numeric.isintarray(index):
return self.take(index)
elif numeric.isboolarray(index):
return self.compress(index)
else:
raise IndexError('invalid index: {}'.format(index))
def __add__(self, other: 'PointsSequence') -> 'PointsSequence':
'''Return ``self+other``.'''
if isinstance(other, PointsSequence):
return self.chain(other)
else:
return NotImplemented
@overload
def __mul__(self, other: int) -> Points:
...
@overload
def __mul__(self, other: 'PointsSequence') -> 'PointsSequence':
...
def __mul__(self, other):
'''Return ``self*other``.'''
if numeric.isint(other):
return self.repeat(other)
elif isinstance(other, PointsSequence):
return self.product(other)
else:
return NotImplemented
@abc.abstractmethod
def get(self, index: int) -> Points:
'''Return the points at ``index``.
Parameters
----------
index : :class:`int`
Returns
-------
points: :class:`~nutils.points.Points`
The points at ``index``.
'''
raise NotImplementedError
def take(self, indices: numpy.ndarray) -> 'PointsSequence':
'''Return a selection of this sequence.
Parameters
----------
indices : :class:`numpy.ndarray`, ndim: 1, dtype: int
The indices of points of this sequence to select.
Returns
-------
points: :class:`PointsSequence`
The sequence of selected points.
'''
_check_take(len(self), indices)
if len(indices) == 0:
return _Empty(self.ndims)
elif len(indices) == 1:
return _Uniform(self.get(indices[0]), 1)
else:
return _Take(self, types.frozenarray(indices))
def compress(self, mask: numpy.ndarray) -> 'PointsSequence':
'''Return a selection of this sequence.
Parameters
----------
mask : :class:`numpy.ndarray`, ndim: 1, dtype: bool
A boolean mask of points of this sequence to select.
Returns
-------
sequence: :class:`PointsSequence`
The sequence of selected points.
'''
_check_compress(len(self), mask)
return self.take(numpy.nonzero(mask)[0])
def repeat(self, count: int) -> 'PointsSequence':
'''Return this sequence repeated ``count`` times.
Parameters
----------
count : :class:`int`
Returns
-------
sequence : :class:`PointsSequence`
This sequence repeated ``count`` times.
'''
_check_repeat(count)
if count == 0:
return _Empty(self.ndims)
elif count == 1:
return self
else:
return _Repeat(self, count)
def product(self, other: 'PointsSequence') -> 'PointsSequence':
'''Return the product of this sequence with another sequence.
Parameters
----------
other : :class:`PointsSequence`
Returns
-------
sequence : :class:`PointsSequence`
This product sequence.
'''
return _Product(self, other)
def chain(self, other: 'PointsSequence') -> 'PointsSequence':
'''Return the chained sequence of this sequence with ``other``.
Parameters
----------
other : :class:`PointsSequence`
Returns
-------
sequence : :class:`PointsSequence`
The chained sequence.
'''
if other.ndims != self.ndims:
raise ValueError('expected a `PointsSequence` with ndims={} but got {}'.format(self.ndims, other.ndims))
if not other:
return self
elif not self:
return other
else:
selfitems = list(_unchain(self))
otheritems = list(_unchain(other))
# Since `self` and `other` are already properly merged, it suffices to
# merge the tail of `self` with the head of `other`. Both `selfitems` and
# `otheritems` cannot be empty by the above tests.
merged = _merge_chain(selfitems[-1], otheritems[0])
if merged:
return _balanced_chain(selfitems[:-1] + [merged] + otheritems[1:])
else:
return _balanced_chain(selfitems + otheritems)
@property
def tri(self) -> types.frozenarray:
'''Triangulation of interior.
A two-dimensional integer array with ``ndims+1`` columns, of which every
row defines a simplex by mapping vertices into the list of points.
'''
tri = []
offset = 0
for points in self:
tri.append(points.tri + offset)
offset += points.npoints
return types.frozenarray(numpy.concatenate(tri) if tri else numpy.zeros((0,self.ndims+1), int), copy=False)
@property
def hull(self) -> types.frozenarray:
'''Triangulation of the exterior hull.
A two-dimensional integer array with ``ndims`` columns, of which every row
defines a simplex by mapping vertices into the list of points. Note that
the hull often does contain internal element boundaries as the
triangulations originating from separate elements are disconnected.
'''
hull = []
offset = 0
for points in self:
hull.append(points.hull + offset)
offset += points.npoints
return types.frozenarray(numpy.concatenate(hull) if hull else numpy.zeros((0,self.ndims), int), copy=False)
class _Empty(PointsSequence):
__slots__ = ()
def __len__(self) -> int:
return 0
def get(self, index: int) -> Points:
raise IndexError('sequence index out of range')
class _Plain(PointsSequence):
__slots__ = 'items'
def __init__(self, items: Tuple[Points, ...], ndims: int) -> None:
assert len(items), 'inefficient; this should have been `_Empty`'
assert not all(item == items[0] for item in items), 'inefficient; this should have been `_Uniform`'
assert all(item.ndims == ndims for item in items), 'not all items have ndims equal to {}'.format(ndims)
self.items = items
super().__init__(ndims)
def __len__(self) -> int:
return len(self.items)
def __iter__(self) -> Iterator[Points]:
return iter(self.items)
def get(self, index: int) -> Points:
return self.items[index]
class _Uniform(PointsSequence):
__slots__ = 'item', 'length'
__cache__ = 'tri', 'hull'
def __init__(self, item, length):
assert length >= 0, 'length should be nonnegative'
assert length > 0, 'inefficient; this should have been `_Empty`'
self.item = item
self.length = length
super().__init__(item.ndims)
@property
def npoints(self) -> int:
return self.item.npoints * self.length
def __len__(self) -> int:
return self.length
def __iter__(self) -> Iterator[Points]:
return itertools.repeat(self.item, len(self))
def get(self, index: int) -> Points:
numeric.normdim(len(self), index)
return self.item
def take(self, indices: numpy.ndarray) -> PointsSequence:
_check_take(len(self), indices)
return PointsSequence.uniform(self.item, len(indices))
def compress(self, mask: numpy.ndarray) -> PointsSequence:
_check_compress(len(self), mask)
return PointsSequence.uniform(self.item, mask.sum())
def repeat(self, count: int) -> PointsSequence:
_check_repeat(count)
if count == 0:
return _Empty(self.ndims)
else:
return PointsSequence.uniform(self.item, len(self) * count)
def product(self, other: PointsSequence) -> PointsSequence:
if isinstance(other, _Uniform):
return PointsSequence.uniform(self.item * other.item, len(self) * len(other))
else:
return super().product(other)
def _mk_indices(self, item: numpy.ndarray) -> types.frozenarray:
npoints = self.item.npoints
ind = item[None] + numpy.arange(0, len(self)*npoints, npoints)[:,None,None]
ind = ind.reshape(len(self)*item.shape[0], item.shape[1])
return types.frozenarray(ind, copy=False)
@property
def tri(self) -> types.frozenarray:
return self._mk_indices(self.item.tri)
@property
def hull(self) -> types.frozenarray:
return self._mk_indices(self.item.hull)
class _Take(PointsSequence):
__slots__ = 'parent', 'indices'
def __init__(self, parent, indices):
_check_take(len(parent), indices)
assert len(indices) > 1, 'inefficient; this should have been `_Empty` or `_Uniform`'
assert not isinstance(parent, _Uniform), 'inefficient; this should have been `_Uniform`'
self.parent = parent
self.indices = indices
super().__init__(parent.ndims)
def __len__(self) -> int:
return len(self.indices)
def __iter__(self) -> Iterator[Points]:
return map(self.parent.get, self.indices)
def get(self, index: int) -> Points:
return self.parent.get(self.indices[index])
def take(self, indices: numpy.ndarray) -> PointsSequence:
_check_take(len(self), indices)
return self.parent.take(numpy.take(self.indices, indices))
def compress(self, mask: numpy.ndarray) -> PointsSequence:
_check_compress(len(self), mask)
return self.parent.take(numpy.compress(mask, self.indices))
class _Repeat(PointsSequence):
__slots__ = 'parent', 'count'
__cache__ = 'tri', 'hull'
def __init__(self, parent, count):
assert count >= 0, 'count should be nonnegative'
assert count > 0, 'inefficient; this should have been `_Empty`'
assert not isinstance(parent, _Uniform), 'inefficient; this should have been `_Uniform`'
self.parent = parent
self.count = count
super().__init__(parent.ndims)
@property
def npoints(self) -> int:
return self.parent.npoints * self.count
def __len__(self) -> int:
return len(self.parent) * self.count
def __iter__(self) -> Iterator[Points]:
for i in range(self.count):
yield from self.parent
def get(self, index: int) -> Points:
return self.parent.get(numeric.normdim(len(self), index) % len(self.parent))
def repeat(self, count: int) -> PointsSequence:
_check_repeat(count)
if count == 0:
return _Empty(self.ndims)
else:
return _Repeat(self.parent, self.count * count)
def _mk_indices(self, parent: numpy.ndarray) -> types.frozenarray:
npoints = self.parent.npoints
ind = parent[None] + numpy.arange(0, self.count*npoints, npoints)[:,None,None]
ind = ind.reshape(self.count*parent.shape[0], parent.shape[1])
return types.frozenarray(ind, copy=False)
@property
def tri(self) -> types.frozenarray:
return self._mk_indices(self.parent.tri)
@property
def hull(self) -> types.frozenarray:
return self._mk_indices(self.parent.hull)
class _Product(PointsSequence):
__slots__ = 'sequence1', 'sequence2'
@types.apply_annotations
def __init__(self, sequence1, sequence2):
assert not (isinstance(sequence1, _Uniform) and isinstance(sequence2, _Uniform)), 'inefficient; this should have been `_Uniform`'
self.sequence1 = sequence1
self.sequence2 = sequence2
super().__init__(sequence1.ndims + sequence2.ndims)
@property
def npoints(self) -> int:
return self.sequence1.npoints * self.sequence2.npoints
def __len__(self) -> int:
return len(self.sequence1) * len(self.sequence2)
def __iter__(self) -> Iterator[Points]:
return (item1.product(item2) for item1 in self.sequence1 for item2 in self.sequence2)
def get(self, index: int) -> Points:
index1, index2 = divmod(numeric.normdim(len(self), index), len(self.sequence2))
return self.sequence1.get(index1).product(self.sequence2.get(index2))
def product(self, other: PointsSequence) -> PointsSequence:
return self.sequence1.product(self.sequence2.product(other))
class _Chain(PointsSequence):
__slots__ = 'sequence1', 'sequence2'
__cache__ = 'tri', 'hull'
def __init__(self, sequence1, sequence2):
assert sequence1.ndims == sequence2.ndims, 'cannot chain sequences with different ndims'
assert sequence1 and sequence2, 'inefficient; at least one of the sequences is empty'
assert not _merge_chain(sequence1, sequence2), 'inefficient; this should have been `_Uniform` or `_Repeat`'
self.sequence1 = sequence1
self.sequence2 = sequence2
super().__init__(sequence1.ndims)
@property
def npoints(self) -> int:
return self.sequence1.npoints + self.sequence2.npoints
def __len__(self) -> int:
return len(self.sequence1) + len(self.sequence2)
def __iter__(self) -> Iterator[Points]:
return itertools.chain(self.sequence1, self.sequence2)
def get(self, index: int) -> Points:
index = numeric.normdim(len(self), index)
n = len(self.sequence1)
if index < n:
return self.sequence1.get(index)
else:
return self.sequence2.get(index - n)
def take(self, indices: numpy.ndarray) -> PointsSequence:
_check_take(len(self), indices)
n = len(self.sequence1)
mask = numpy.less(indices, n)
return self.sequence1.take(numpy.compress(mask, indices)).chain(self.sequence2.take(numpy.compress(~mask, indices) - n))
def compress(self, mask: numpy.ndarray) -> PointsSequence:
_check_compress(len(self), mask)
n = len(self.sequence1)
return self.sequence1.compress(mask[:n]).chain(self.sequence2.compress(mask[n:]))
@property
def tri(self) -> types.frozenarray:
tri1 = self.sequence1.tri
tri2 = self.sequence2.tri
return types.frozenarray(numpy.concatenate([tri1, tri2 + self.sequence1.npoints]), copy=False)
@property
def hull(self) -> types.frozenarray:
hull1 = self.sequence1.hull
hull2 = self.sequence2.hull
return types.frozenarray(numpy.concatenate([hull1, hull2 + self.sequence1.npoints]), copy=False)
def _unchain(seq: PointsSequence) -> Iterator[PointsSequence]:
if isinstance(seq, _Chain):
yield from _unchain(seq.sequence1)
yield from _unchain(seq.sequence2)
elif seq: # skip empty sequences
yield seq
def _balanced_chain(items: Sequence[PointsSequence]) -> PointsSequence:
assert items
if len(items) == 1:
return items[0]
else:
c = numpy.cumsum([0]+list(map(len, items)))
i = numpy.argmin(abs(c[1:-1] - c[-1]/2)) + 1
a = _balanced_chain(items[:i])
b = _balanced_chain(items[i:])
return _merge_chain(a, b) or _Chain(a, b)
def _merge_chain(a: PointsSequence, b: PointsSequence) -> Optional[PointsSequence]: # type: ignore[return]
if a == b:
return a.repeat(2)
if isinstance(a, _Uniform) and isinstance(b, _Uniform) and a.item == b.item:
return _Uniform(a.item, len(a) + len(b))
if isinstance(a, _Repeat):
if isinstance(b, _Repeat) and a.parent == b.parent:
return a.parent.repeat(a.count + b.count)
elif a.parent == b:
return a.parent.repeat(a.count + 1)
elif isinstance(b, _Repeat) and b.parent == a:
return b.parent.repeat(b.count + 1)
def _check_repeat(count):
if count < 0:
raise ValueError('expected nonnegative `count` but got {}'.format(count))
def _check_take(length, indices):
if not numeric.isintarray(indices):
raise IndexError('expected an array of integers')
if not indices.ndim == 1:
raise IndexError('expected an array with dimension 1 but got {}'.format(indices.ndim))
if len(indices) and not (0 <= indices.min() and indices.max() < length):
raise IndexError('`indices` out of range')
def _check_compress(length, mask):
if not numeric.isboolarray(mask):
raise IndexError('expected an array of booleans')
if not mask.ndim == 1:
raise IndexError('expected an array with dimension 1 but got {}'.format(mask.ndim))
if len(mask) != length:
raise IndexError('expected an array with length {} but got {}'.format(length, len(mask)))
# vim:sw=2:sts=2:et
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import time
import unittest
import imath
import arnold
import IECore
import IECoreScene
import IECoreImage
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
import GafferImage
import GafferArnold
class InteractiveArnoldRenderTest( GafferSceneTest.InteractiveRenderTest ) :
interactiveRenderNodeClass = GafferArnold.InteractiveArnoldRender
# Arnold outputs licensing warnings that would cause failures
failureMessageLevel = IECore.MessageHandler.Level.Error
def testTwoRenders( self ) :
s = Gaffer.ScriptNode()
s["s"] = GafferScene.Sphere()
s["o"] = GafferScene.Outputs()
s["o"].addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelySphere",
}
)
)
s["o"]["in"].setInput( s["s"]["out"] )
s["r"] = self._createInteractiveRender()
s["r"]["in"].setInput( s["o"]["out"] )
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 0.5 )
image = IECoreImage.ImageDisplayDriver.storedImage( "myLovelySphere" )
self.assertTrue( isinstance( image, IECoreImage.ImagePrimitive ) )
# Try to start a second render while the first is running.
s["o2"] = GafferScene.Outputs()
s["o2"].addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelySphere2",
}
)
)
s["o2"]["in"].setInput( s["s"]["out"] )
s["r2"] = self._createInteractiveRender( failOnError = False )
s["r2"]["in"].setInput( s["o2"]["out"] )
if [ int( v ) for v in arnold.AiGetVersion()[:3] ] >= [ 7, 0, 0 ] :
s["r2"]["state"].setValue( s["r"].State.Running )
time.sleep( 0.5 )
image = IECoreImage.ImageDisplayDriver.storedImage( "myLovelySphere2" )
self.assertTrue( isinstance( image, IECoreImage.ImagePrimitive ) )
else :
# Only one universe available, so second render will fail.
try :
defaultHandler = IECore.MessageHandler.getDefaultHandler()
messageHandler = IECore.CapturingMessageHandler()
IECore.MessageHandler.setDefaultHandler( messageHandler )
s["r2"]["state"].setValue( s["r"].State.Running )
finally :
IECore.MessageHandler.setDefaultHandler( defaultHandler )
messages = s["r2"]["messages"].getValue().value
self.assertEqual( len( messages ), 1 )
self.assertEqual( messages[0].message, "Arnold is already in use" )
self.assertEqual( len( messageHandler.messages ), 1 )
self.assertEqual( messageHandler.messages[0].message, "Arnold is already in use" )
def testEditSubdivisionAttributes( self ) :
script = Gaffer.ScriptNode()
script["cube"] = GafferScene.Cube()
script["cube"]["dimensions"].setValue( imath.V3f( 2 ) )
script["meshType"] = GafferScene.MeshType()
script["meshType"]["in"].setInput( script["cube"]["out"] )
script["meshType"]["meshType"].setValue( "catmullClark" )
script["attributes"] = GafferArnold.ArnoldAttributes()
script["attributes"]["in"].setInput( script["meshType"]["out"] )
script["attributes"]["attributes"]["subdivIterations"]["enabled"].setValue( True )
script["catalogue"] = GafferImage.Catalogue()
script["outputs"] = GafferScene.Outputs()
script["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : str( script['catalogue'].displayDriverServer().portNumber() ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
)
)
script["outputs"]["in"].setInput( script["attributes"]["out"] )
script["imageStats"] = GafferImage.ImageStats()
script["imageStats"]["in"].setInput( script["catalogue"]["out"] )
script["imageStats"]["channels"].setValue( IECore.StringVectorData( [ "R", "G", "B", "A" ] ) )
script["imageStats"]["area"].setValue( imath.Box2i( imath.V2i( 0 ), imath.V2i( 640, 480 ) ) )
script["options"] = GafferScene.StandardOptions()
script["options"]["in"].setInput( script["outputs"]["out"] )
script["options"]["options"]["filmFit"]["enabled"].setValue( True )
script["options"]["options"]["filmFit"]["value"].setValue( IECoreScene.Camera.FilmFit.Fit )
script["render"] = self._createInteractiveRender()
script["render"]["in"].setInput( script["options"]["out"] )
# Render the cube with one level of subdivision. Check we get roughly the
# alpha coverage we expect.
script["render"]["state"].setValue( script["render"].State.Running )
self.uiThreadCallHandler.waitFor( 1 )
self.assertAlmostEqual( script["imageStats"]["average"][3].getValue(), 0.381, delta = 0.001 )
# Now up the number of subdivision levels. The alpha coverage should
# increase as the shape tends towards the limit surface.
script["attributes"]["attributes"]["subdivIterations"]["value"].setValue( 4 )
self.uiThreadCallHandler.waitFor( 1 )
self.assertAlmostEqual( script["imageStats"]["average"][3].getValue(), 0.424, delta = 0.001 )
script["render"]["state"].setValue( script["render"].State.Stopped )
def testLightLinkingAfterParameterUpdates( self ) :
s = Gaffer.ScriptNode()
s["catalogue"] = GafferImage.Catalogue()
s["s"] = GafferScene.Sphere()
s["PathFilter"] = GafferScene.PathFilter( "PathFilter" )
s["PathFilter"]["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
s["ShaderAssignment"] = GafferScene.ShaderAssignment( "ShaderAssignment" )
s["ShaderAssignment"]["in"].setInput( s["s"]["out"] )
s["ShaderAssignment"]["filter"].setInput( s["PathFilter"]["out"] )
s["lambert"], _ = self._createMatteShader()
s["ShaderAssignment"]["shader"].setInput( s["lambert"]["out"] )
s["StandardAttributes"] = GafferScene.StandardAttributes( "StandardAttributes" )
s["StandardAttributes"]["attributes"]["linkedLights"]["enabled"].setValue( True )
s["StandardAttributes"]["attributes"]["linkedLights"]["value"].setValue( "defaultLights" )
s["StandardAttributes"]["filter"].setInput( s["PathFilter"]["out"] )
s["StandardAttributes"]["in"].setInput( s["ShaderAssignment"]["out"] )
s["Light"] = GafferArnold.ArnoldLight( "skydome_light" )
s["Light"].loadShader( "skydome_light" )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 2 )
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["StandardAttributes"]["out"] )
s["group"]["in"][1].setInput( s["Light"]["out"] )
s["group"]["in"][2].setInput( s["c"]["out"] )
s["o"] = GafferScene.Outputs()
s["o"].addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : str( s['catalogue'].displayDriverServer().portNumber() ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
)
)
s["o"]["in"].setInput( s["group"]["out"] )
s["so"] = GafferScene.StandardOptions()
s["so"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["so"]["options"]["renderCamera"]["enabled"].setValue( True )
s["so"]["in"].setInput( s["o"]["out"] )
s["r"] = self._createInteractiveRender()
s["r"]["in"].setInput( s["so"]["out"] )
# Start rendering and make sure the light is linked to the sphere
s["r"]["state"].setValue( s["r"].State.Running )
self.uiThreadCallHandler.waitFor( 1.0 )
self.assertAlmostEqual(
self._color4fAtUV( s["catalogue"], imath.V2f( 0.5 ) ).r,
1,
delta = 0.01
)
# Change a value on the light. The light should still be linked to the sphere
# and we should get the same result as before.
s["Light"]['parameters']['shadow_density'].setValue( 0.0 )
self.uiThreadCallHandler.waitFor( 1.0 )
self.assertAlmostEqual(
self._color4fAtUV( s["catalogue"], imath.V2f( 0.5 ) ).r,
1,
delta = 0.01
)
s["r"]["state"].setValue( s["r"].State.Stopped )
def testQuadLightTextureEdits( self ) :
# Quad light texture edits don't currently update correctly in Arnold.
# Check that our workaround is working
s = Gaffer.ScriptNode()
s["catalogue"] = GafferImage.Catalogue()
s["s"] = GafferScene.Sphere()
s["PathFilter"] = GafferScene.PathFilter( "PathFilter" )
s["PathFilter"]["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
s["ShaderAssignment"] = GafferScene.ShaderAssignment( "ShaderAssignment" )
s["ShaderAssignment"]["in"].setInput( s["s"]["out"] )
s["ShaderAssignment"]["filter"].setInput( s["PathFilter"]["out"] )
s["lambert"], _ = self._createMatteShader()
s["ShaderAssignment"]["shader"].setInput( s["lambert"]["out"] )
s["Tex"] = GafferArnold.ArnoldShader( "image" )
s["Tex"].loadShader( "image" )
s["Tex"]["parameters"]["filename"].setValue( os.path.join( os.path.dirname( __file__ ), "images", "sphereLightBake.exr" ) )
s["Tex"]["parameters"]["multiply"].setValue( imath.Color3f( 1, 0, 0 ) )
s["Light"] = GafferArnold.ArnoldLight( "quad_light" )
s["Light"].loadShader( "quad_light" )
s["Light"]["transform"]["translate"]["z"].setValue( 2 )
s["Light"]["parameters"]["color"].setInput( s["Tex"]["out"] )
s["Light"]["parameters"]["exposure"].setValue( 4 )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 2 )
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["ShaderAssignment"]["out"] )
s["group"]["in"][1].setInput( s["Light"]["out"] )
s["group"]["in"][2].setInput( s["c"]["out"] )
s["o"] = GafferScene.Outputs()
s["o"].addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : str( s['catalogue'].displayDriverServer().portNumber() ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
)
)
s["o"]["in"].setInput( s["group"]["out"] )
s["so"] = GafferScene.StandardOptions()
s["so"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["so"]["options"]["renderCamera"]["enabled"].setValue( True )
s["so"]["in"].setInput( s["o"]["out"] )
s["r"] = self._createInteractiveRender()
s["r"]["in"].setInput( s["so"]["out"] )
# Start rendering and make sure the light is linked to the sphere
s["r"]["state"].setValue( s["r"].State.Running )
self.uiThreadCallHandler.waitFor( 1.0 )
initialColor = self._color4fAtUV( s["catalogue"], imath.V2f( 0.5 ) )
self.assertAlmostEqual( initialColor.r, 0.09, delta = 0.013 )
self.assertAlmostEqual( initialColor.g, 0, delta = 0.01 )
# Edit texture network and make sure the changes take effect
s["Tex"]["parameters"]["multiply"].setValue( imath.Color3f( 0, 1, 0 ) )
self.uiThreadCallHandler.waitFor( 1.0 )
updateColor = self._color4fAtUV( s["catalogue"], imath.V2f( 0.5 ) )
self.assertAlmostEqual( updateColor.r, 0, delta = 0.01 )
self.assertAlmostEqual( updateColor.g, 0.06, delta = 0.022 )
s["r"]["state"].setValue( s["r"].State.Stopped )
def _createConstantShader( self ) :
shader = GafferArnold.ArnoldShader()
shader.loadShader( "flat" )
return shader, shader["parameters"]["color"]
def _createMatteShader( self ) :
shader = GafferArnold.ArnoldShader()
shader.loadShader( "lambert" )
shader["parameters"]["Kd"].setValue( 1 )
return shader, shader["parameters"]["Kd_color"]
def _createTraceSetShader( self ) :
# It's currently pretty ugly how we need to disable the trace set when it is left empty,
# to match the behaviour expected by GafferSceneTest.InteractiveRenderTest.
# Would be somewhat cleaner if we had the primaryInput metadata on trace_set
# available, so we could just put an expression on it to disable it when no trace set is given,
# but it doesn't seem very safe to do a metadata load in the middle of the tests
shaderBox = Gaffer.Box()
shader = GafferArnold.ArnoldShader("shader")
shader.loadShader( "standard_surface" )
shader["parameters"]["base"].setValue( 1 )
shader["parameters"]["base_color"].setValue( imath.Color3f( 1 ) )
shader["parameters"]["specular_roughness"].setValue( 0 )
shader["parameters"]["metalness"].setValue( 1 )
shader["parameters"]["specular_IOR"].setValue( 100 )
#return shader, Gaffer.StringPlug( "unused" )
traceSetShader = GafferArnold.ArnoldShader("traceSetShader")
traceSetShader.loadShader( "trace_set" )
traceSetShader["parameters"]["passthrough"].setInput( shader["out"] )
switchShader = GafferArnold.ArnoldShader("switchShader")
switchShader.loadShader( "switch_shader" )
switchShader["parameters"]["input0"].setInput( shader["out"] )
switchShader["parameters"]["input1"].setInput( traceSetShader["out"] )
shaderBox.addChild( shader )
shaderBox.addChild( traceSetShader )
shaderBox.addChild( switchShader )
shaderBox["enableExpression"] = Gaffer.Expression()
shaderBox["enableExpression"].setExpression( 'parent.switchShader.parameters.index = parent.traceSetShader.parameters.trace_set != ""', "OSL" )
Gaffer.PlugAlgo.promote( switchShader["out"] )
return shaderBox, traceSetShader["parameters"]["trace_set"]
def _cameraVisibilityAttribute( self ) :
return "ai:visibility:camera"
def _traceDepthOptions( self ) :
return "ai:GI_specular_depth", "ai:GI_diffuse_depth", "ai:GI_transmission_depth"
def _createPointLight( self ) :
light = GafferArnold.ArnoldLight()
light.loadShader( "point_light" )
return light, light["parameters"]["color"]
def _createSpotLight( self ) :
light = GafferArnold.ArnoldLight()
light.loadShader( "spot_light" )
return light, light["parameters"]["color"]
def _createLightFilter( self ) :
lightFilter = GafferArnold.ArnoldLightFilter()
lightFilter.loadShader( "light_blocker" )
return lightFilter, lightFilter["parameters"]["density"]
def _createGobo( self ) :
gobo = GafferArnold.ArnoldShader()
gobo.loadShader( "gobo" )
return gobo, gobo["parameters"]["slidemap"]
if __name__ == "__main__":
unittest.main()
|
|
"""
Tests for the Feature Provider
"""
import numpy as np
import os
import sys
import unittest
import warnings
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
sys.path.insert(0, path)
import senses.dataproviders.featureprovider as fp # pylint: disable=locally-disabled, import-error
class TestFeatureProvider(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.root = os.path.abspath("test_data_directory")
self.sample_rate = 24_000
self.nchannels = 1
self.bytewidth = 2
self.provider = fp.FeatureProvider(self.root, sample_rate=self.sample_rate, nchannels=self.nchannels, bytewidth=self.bytewidth)
def _reset(self):
"""
Resets the iterator.
"""
self.provider = fp.FeatureProvider(self.root, sample_rate=self.sample_rate, nchannels=self.nchannels, bytewidth=self.bytewidth)
def _label_fn(self, fpath):
"""
Returns 0 if fpath contains 'babies', otherwise 1.
"""
if "babies" in fpath:
return 0
else:
return 1
def test_generate_one_fft(self):
"""
Tests yielding a single FFT of 45ms.
"""
ffts = [(f, label) for f, label in self.provider.generate_n_ffts(n=1, ms=45, label_fn=self._label_fn)]
self.assertEqual(len(ffts), 1)
fft, _label = ffts[0]
self.assertEqual(fft.shape, (541,))
def test_generate_more_than_one_fft(self):
"""
Tests yielding more than one FFT of 34ms each.
"""
ffts = [(f, label) for f, label in self.provider.generate_n_ffts(n=2, ms=34, label_fn=self._label_fn)]
self.assertEqual(len(ffts), 2)
for fft, _label in ffts:
self.assertEqual(fft.shape, (409,))
def test_generate_fft_minibatch(self):
"""
Tests yielding several minibatches of labeled FFT data.
"""
batchsize = 16
batches = [batch for batch in self.provider.generate_n_fft_batches(n=5, batchsize=batchsize, ms=45, label_fn=self._label_fn)]
self.assertEqual(len(batches), 5)
data_batch, label_batch = batches[0]
nbins = 541
self.assertEqual(data_batch.shape, (batchsize, nbins))
labels_that_are_ones = np.where(label_batch == 1)[0]
labels_that_are_zeros = np.where(label_batch == 0)[0]
self.assertEqual(len(labels_that_are_ones) + len(labels_that_are_zeros), len(label_batch))
def test_generate_all_ffts(self):
"""
Test yielding all the FFTs in the dataset.
"""
batchsize = 16
ms = 45
min_ffts_expected = 3 * 60 * 1000 / ms # (minutes * sec/min * ms/sec) / ms/fft
max_ffts_expected = 5 * 60 * 1000 / ms
batches = [b for b in self.provider.generate_n_fft_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn)]
self.assertGreaterEqual(len(batches), min_ffts_expected // batchsize)
self.assertLessEqual(len(batches), max_ffts_expected // batchsize)
self._reset()
ffts = [f for f in self.provider.generate_n_ffts(n=None, ms=ms, label_fn=self._label_fn)]
self.assertGreaterEqual(len(ffts), min_ffts_expected)
self.assertLessEqual(len(ffts), max_ffts_expected)
def test_generate_ffts_forever(self):
"""
Test yielding all the FFTs in the dataset forever.
"""
batchsize = 16
broke = False
ms = 45
total_segs_to_yield = 5 * 60 * 1000 / ms
total_batches_to_yield = int(total_segs_to_yield / batchsize)
batches = []
for i, b in enumerate(self.provider.generate_n_fft_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn, forever=True)):
if i >= total_batches_to_yield:
broke = True
break
else:
batches.append(b)
self.assertTrue(broke)
self.assertEqual(len(batches), total_batches_to_yield)
def test_generate_all_sequences(self):
"""
Tests getting all the data in sequence form.
"""
batchsize = 16
ms = 45
min_sequences_expected = 3 * 60 * 1000 / ms # (minutes * sec/min * ms/sec) / ms/fft
max_sequences_expected = 5 * 60 * 1000 / ms
batches = [b for b in self.provider.generate_n_sequence_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn)]
self.assertGreaterEqual(len(batches), min_sequences_expected // batchsize)
self.assertLessEqual(len(batches), max_sequences_expected // batchsize)
self._reset()
seqs = [f for f in self.provider.generate_n_sequences(n=None, ms=ms, label_fn=self._label_fn)]
self.assertGreaterEqual(len(seqs), min_sequences_expected)
self.assertLessEqual(len(seqs), max_sequences_expected)
def test_generate_n_sequences(self):
"""
Tests yielding n sequences.
"""
ms = 45
n = 217
seqs = [s for s in self.provider.generate_n_sequences(n=n, ms=ms, label_fn=self._label_fn)]
self.assertEqual(len(seqs), n)
def test_generate_sequence_minibatch(self):
"""
Tests yielding several minibatches of time-domain data.
"""
batchsize = 32
ms = 34
n = 5
batches = [batch for batch in self.provider.generate_n_sequence_batches(n=n, batchsize=batchsize, ms=ms, label_fn=self._label_fn)]
self.assertEqual(len(batches), n)
data_batch, label_batch = batches[0]
datapoints = int(self.sample_rate * self.nchannels * (ms / 1000))
self.assertEqual(data_batch.shape, (batchsize, datapoints))
labels_that_are_ones = np.where(label_batch == 1)[0]
labels_that_are_zeros = np.where(label_batch == 0)[0]
self.assertEqual(len(labels_that_are_ones) + len(labels_that_are_zeros), len(label_batch))
def test_generate_all_spectrograms(self):
"""
Tests yielding all the spectrograms in the dataset.
"""
batchsize = 16
ms = 45
nwindows = 10
wndl = ms / nwindows
overlap = 0.5
min_specs_expected = 3 * 60 * 1000 / ms # (minutes * sec/min * ms/sec) / ms/fft
max_specs_expected = 5 * 60 * 1000 / ms
batches = [batch for batch in self.provider.generate_n_spectrogram_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn, window_length_ms=wndl, overlap=overlap)]
self.assertGreaterEqual(len(batches), min_specs_expected // batchsize)
self.assertLessEqual(len(batches), max_specs_expected // batchsize)
self._reset()
seqs = [f for f in self.provider.generate_n_spectrograms(n=None, ms=ms, label_fn=self._label_fn, window_length_ms=wndl, overlap=overlap)]
self.assertGreaterEqual(len(seqs), min_specs_expected)
self.assertLessEqual(len(seqs), max_specs_expected)
def test_generate_sequences_forever(self):
"""
Test yielding all the sequences in the dataset forever.
"""
batchsize = 16
broke = False
ms = 45
total_seqs_to_yield = 5 * 60 * 1000 / ms
total_batches_to_yield = int(total_seqs_to_yield / batchsize)
batches = []
for i, b in enumerate(self.provider.generate_n_sequence_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn, forever=True)):
if i >= total_batches_to_yield:
broke = True
break
else:
batches.append(b)
self.assertTrue(broke)
self.assertEqual(len(batches), total_batches_to_yield)
def test_generate_spectrograms_forever(self):
"""
Test yielding all the spectrograms in the dataset forever.
"""
broke = False
batchsize = 16
ms = 45
nwindows = 10
wndl = ms / nwindows
overlap = 0.5
total_segs_to_yield = 5 * 60 * 1000 / ms
total_batches_to_yield = int(total_segs_to_yield / batchsize)
batches = []
for i, b in enumerate(self.provider.generate_n_spectrogram_batches(n=None, batchsize=batchsize, ms=ms, label_fn=self._label_fn, window_length_ms=wndl, overlap=overlap, forever=True)):
if i >= total_batches_to_yield:
broke = True
break
else:
batches.append(b)
self.assertTrue(broke)
self.assertEqual(len(batches), total_batches_to_yield)
def test_generate_spectrogram_minibatch(self):
"""
Tests yielding several minibatches of spectrogram data.
"""
batchsize = 12
ms = 340
n = 3
nwindows = 10
window_length_ms = ms / nwindows
overlap = 0.5
batches = [batch for batch in self.provider.generate_n_spectrogram_batches(n=n, batchsize=batchsize, ms=ms, label_fn=self._label_fn, window_length_ms=window_length_ms, overlap=overlap, expand_dims=True)]
self.assertEqual(len(batches), n)
data_batch, label_batch = batches[0]
ntimebins = nwindows * (1/overlap) - 1 # with a 50% overlap
nbins = 409
self.assertEqual(data_batch.shape, (batchsize, nbins, ntimebins, 1))
labels_that_are_ones = np.where(label_batch == 1)[0]
labels_that_are_zeros = np.where(label_batch == 0)[0]
self.assertEqual(len(labels_that_are_ones) + len(labels_that_are_zeros), len(label_batch))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6())
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(1, 5, 2, 3))
self.assertAllEqual((1, 5, 2, 3), sp_output.get_shape())
def testSameShape(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testWorksWellWithTfShape(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
shape = array_ops.shape(sp_input) # tf.shape generates int32 output
sp_output = sparse_ops.sparse_reshape(sp_input, shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedSameShapeWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedNewShapeSameRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testFeedNewShapeSameRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedUpRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
def testFeedDownRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedDownRankWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
def testFeedMultipleInferredDims(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output shape size may be -1"):
sess.run(sp_output, {sp_input: input_val})
def testProvideStaticallyMismatchedSizes(self):
input_val = self._SparseTensorValue_5x6()
sp_input = sparse_tensor.SparseTensor.from_value(input_val)
with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, [4, 7])
def testFeedMismatchedSizes(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
def testFeedMismatchedSizesWithInferredDim(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
def testFeedPartialShapes(self):
with self.test_session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
def testFeedDenseReshapeSemantics(self):
with self.test_session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
if __name__ == "__main__":
test.main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import logging
import os
import re
import shutil
import subprocess
import time
import traceback
from contextlib import contextmanager
from textwrap import dedent
from pex.pex_info import PexInfo
from six import StringIO
from six.moves import configparser
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_setup import PythonRepos, PythonSetup
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError, TestFailedTaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.target import Target
from pants.task.testrunner_task_mixin import TestRunnerTaskMixin
from pants.util.contextutil import (environment_as, temporary_dir, temporary_file,
temporary_file_path)
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.process_handler import SubprocessProcessHandler
from pants.util.strutil import safe_shlex_split
# Initialize logging, since tests do not run via pants_exe (where it is usually done).
logging.basicConfig()
logger = logging.getLogger(__name__)
class PythonTestResult(object):
@staticmethod
def exception():
return PythonTestResult('EXCEPTION')
@staticmethod
def rc(value):
return PythonTestResult('SUCCESS' if value == 0 else 'FAILURE', rc=value)
def with_failed_targets(self, failed_targets):
return PythonTestResult(self._msg, self._rc, failed_targets)
def __init__(self, msg, rc=None, failed_targets=None):
self._rc = rc
self._msg = msg
self._failed_targets = failed_targets or []
def __str__(self):
return self._msg
@property
def success(self):
return self._rc == 0
@property
def failed_targets(self):
return self._failed_targets
class PytestRun(TestRunnerTaskMixin, PythonTask):
_TESTING_TARGETS = [
# Note: the requirement restrictions on pytest and pytest-cov match those in requirements.txt,
# to avoid confusion when debugging pants tests.
# TODO: make these an option, so any pants install base can pick their pytest version.
PythonRequirement('pytest>=2.6,<2.7'),
# NB, pytest-timeout 1.0.0 introduces a conflicting pytest>=2.8.0 requirement, see:
# https://github.com/pantsbuild/pants/issues/2566
PythonRequirement('pytest-timeout<1.0.0'),
PythonRequirement('pytest-cov>=1.8,<1.9'),
PythonRequirement('unittest2', version_filter=lambda py, pl: py.startswith('2')),
PythonRequirement('unittest2py3k', version_filter=lambda py, pl: py.startswith('3'))
]
@classmethod
def global_subsystems(cls):
return super(PytestRun, cls).global_subsystems() + (PythonSetup, PythonRepos)
@classmethod
def register_options(cls, register):
super(PytestRun, cls).register_options(register)
register('--fast', action='store_true', default=True,
help='Run all tests in a single chroot. If turned off, each test target will '
'create a new chroot, which will be much slower, but more correct, as the'
'isolation verifies that all dependencies are correctly declared.')
register('--fail-slow', action='store_true', default=False,
help='Do not fail fast on the first test failure in a suite; instead run all tests '
'and report errors only after all tests complete.')
register('--junit-xml-dir', metavar='<DIR>',
help='Specifying a directory causes junit xml results files to be emitted under '
'that dir for each test run.')
register('--profile', metavar='<FILE>',
help="Specifying a file path causes tests to be profiled with the profiling data "
"emitted to that file (prefix). Note that tests may run in a different cwd, so "
"it's best to use an absolute path to make it easy to find the subprocess "
"profiles later.")
register('--options', action='append', help='Pass these options to pytest.')
register('--coverage',
help='Emit coverage information for specified paths/modules. Value has two forms: '
'"module:list,of,modules" or "path:list,of,paths"')
register('--coverage-output-dir', metavar='<DIR>', default=None,
help='Directory to emit coverage reports to.'
'If not specified, a default within dist is used.')
register('--shard',
help='Subset of tests to run, in the form M/N, 0 <= M < N. For example, 1/3 means '
'run tests number 2, 5, 8, 11, ...')
@classmethod
def supports_passthru_args(cls):
return True
def __init__(self, *args, **kwargs):
super(PytestRun, self).__init__(*args, **kwargs)
def _test_target_filter(self):
def target_filter(target):
return isinstance(target, PythonTests)
return target_filter
def _validate_target(self, target):
pass
def _execute(self, all_targets):
test_targets = self._get_test_targets()
if test_targets:
self.context.release_lock()
with self.context.new_workunit(name='run',
labels=[WorkUnitLabel.TOOL, WorkUnitLabel.TEST]) as workunit:
# pytest uses py.io.terminalwriter for output. That class detects the terminal
# width and attempts to use all of it. However we capture and indent the console
# output, leading to weird-looking line wraps. So we trick the detection code
# into thinking the terminal window is narrower than it is.
cols = os.environ.get('COLUMNS', 80)
with environment_as(COLUMNS=str(int(cols) - 30)):
self.run_tests(test_targets, workunit)
def run_tests(self, targets, workunit):
if self.get_options().fast:
result = self._do_run_tests(targets, workunit)
if not result.success:
raise TestFailedTaskError(failed_targets=result.failed_targets)
else:
results = {}
# Coverage often throws errors despite tests succeeding, so force failsoft in that case.
fail_hard = not self.get_options().fail_slow and not self.get_options().coverage
for target in targets:
rv = self._do_run_tests([target], workunit)
results[target] = rv
if not rv.success and fail_hard:
break
for target in sorted(results):
self.context.log.info('{0:80}.....{1:>10}'.format(target.id, str(results[target])))
failed_targets = [target for target, rv in results.items() if not rv.success]
if failed_targets:
raise TestFailedTaskError(failed_targets=failed_targets)
class InvalidShardSpecification(TaskError):
"""Indicates an invalid `--shard` option."""
@contextmanager
def _maybe_shard(self):
shard_spec = self.get_options().shard
if not shard_spec:
yield []
return
components = shard_spec.split('/', 1)
if len(components) != 2:
raise self.InvalidShardSpecification("Invalid shard specification '{}', should be of form: "
"[shard index]/[total shards]".format(shard_spec))
def ensure_int(item):
try:
return int(item)
except ValueError:
raise self.InvalidShardSpecification("Invalid shard specification '{}', item {} is not an "
"int".format(shard_spec, item))
shard = ensure_int(components[0])
total = ensure_int(components[1])
if not (0 <= shard and shard < total):
raise self.InvalidShardSpecification("Invalid shard specification '{}', shard must "
"be >= 0 and < {}".format(shard_spec, total))
if total < 2:
yield []
return
with temporary_dir() as tmp:
path = os.path.join(tmp, 'conftest.py')
with open(path, 'w') as fp:
fp.write(dedent("""
def pytest_report_header(config):
return 'shard: {shard} of {total} (0-based shard numbering)'
def pytest_collection_modifyitems(session, config, items):
total_count = len(items)
removed = 0
for i, item in enumerate(list(items)):
if i % {total} != {shard}:
del items[i - removed]
removed += 1
reporter = config.pluginmanager.getplugin('terminalreporter')
reporter.write_line('Only executing {{}} of {{}} total tests in shard {shard} of '
'{total}'.format(total_count - removed, total_count),
bold=True, invert=True, yellow=True)
""".format(shard=shard, total=total)))
yield [path]
@contextmanager
def _maybe_emit_junit_xml(self, targets):
args = []
xml_base = self.get_options().junit_xml_dir
if xml_base and targets:
xml_base = os.path.realpath(xml_base)
xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
safe_mkdir(os.path.dirname(xml_path))
args.append('--junitxml={}'.format(xml_path))
yield args
DEFAULT_COVERAGE_CONFIG = dedent(b"""
[run]
branch = True
timid = True
[report]
exclude_lines =
def __repr__
raise NotImplementedError
""")
@staticmethod
def _format_string_list(values):
# The coverage rc ini files accept "Multi-valued strings" - ie: lists of strings - denoted by
# indenting values on multiple lines like so:
# [section]
# name =
# value1
# value2
#
# See http://nedbatchelder.com/code/coverage/config.html for details.
return '\n\t{values}'.format(values='\n\t'.join(values))
@property
def _debug(self):
return self.get_options().level == 'debug'
def _generate_coverage_config(self, source_mappings):
# For the benefit of macos testing, add the 'real' path the the directory as an equivalent.
def add_realpath(path):
realpath = os.path.realpath(path)
if realpath != canonical and realpath not in alternates:
realpaths.add(realpath)
cp = configparser.SafeConfigParser()
cp.readfp(StringIO(self.DEFAULT_COVERAGE_CONFIG))
# We use the source_mappings to setup the `combine` coverage command to transform paths in
# coverage data files into canonical form.
# See the "[paths]" entry here: http://nedbatchelder.com/code/coverage/config.html for details.
cp.add_section('paths')
for canonical, alternates in source_mappings.items():
key = canonical.replace(os.sep, '.')
realpaths = set()
add_realpath(canonical)
for path in alternates:
add_realpath(path)
cp.set('paths',
key,
self._format_string_list([canonical] + list(alternates) + list(realpaths)))
# See the debug options here: http://nedbatchelder.com/code/coverage/cmd.html#cmd-run-debug
if self._debug:
debug_options = self._format_string_list([
# Dumps the coverage config realized values.
'config',
# Logs which files are skipped or traced and why.
'trace'])
cp.set('run', 'debug', debug_options)
return cp
@contextmanager
def _cov_setup(self, targets, chroot, coverage_modules=None):
def compute_coverage_modules(target):
if target.coverage:
return target.coverage
else:
# This makes the assumption that tests/python/<target> will be testing src/python/<target>.
# Note in particular that this doesn't work for pants' own tests, as those are under
# the top level package 'pants_tests', rather than just 'pants'.
# TODO(John Sirois): consider failing fast if there is no explicit coverage scheme; but also
# consider supporting configuration of a global scheme whether that be parallel
# dirs/packages or some arbitrary function that can be registered that takes a test target
# and hands back the source packages or paths under test.
return set(os.path.dirname(source).replace(os.sep, '.')
for source in target.sources_relative_to_source_root())
if coverage_modules is None:
coverage_modules = set(itertools.chain(*[compute_coverage_modules(t) for t in targets]))
# Hack in turning off pytest_cov reporting to the console - we want control this ourselves.
# Take the approach of registering a plugin that replaces the pycov plugin's
# `pytest_terminal_summary` callback with a noop.
with temporary_dir() as plugin_root:
plugin_root = os.path.realpath(plugin_root)
with safe_open(os.path.join(plugin_root, 'pants_reporter.py'), 'w') as fp:
fp.write(dedent("""
def pytest_configure(__multicall__, config):
# This executes the rest of the pytest_configures ensuring the `pytest_cov` plugin is
# registered so we can grab it below.
__multicall__.execute()
pycov = config.pluginmanager.getplugin('_cov')
# Squelch console reporting
pycov.pytest_terminal_summary = lambda *args, **kwargs: None
"""))
pythonpath = os.environ.get('PYTHONPATH')
existing_pythonpath = pythonpath.split(os.pathsep) if pythonpath else []
with environment_as(PYTHONPATH=os.pathsep.join(existing_pythonpath + [plugin_root])):
def is_python_lib(tgt):
return tgt.has_sources('.py') and not isinstance(tgt, PythonTests)
source_mappings = {}
for target in targets:
libs = (tgt for tgt in target.closure() if is_python_lib(tgt))
for lib in libs:
source_mappings[lib.target_base] = [chroot]
cp = self._generate_coverage_config(source_mappings=source_mappings)
with temporary_file() as fp:
cp.write(fp)
fp.close()
coverage_rc = fp.name
args = ['-p', 'pants_reporter', '-p', 'pytest_cov', '--cov-config', coverage_rc]
for module in coverage_modules:
args.extend(['--cov', module])
yield args, coverage_rc
@contextmanager
def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
coverage = self.get_options().coverage
if coverage is None:
yield []
return
def read_coverage_list(prefix):
return coverage[len(prefix):].split(',')
coverage_modules = None
if coverage.startswith('modules:'):
# NB: pytest-cov maps these modules to the `[run] sources` config. So for
# `modules:pants.base,pants.util` the config emitted has:
# [run]
# source =
# pants.base
# pants.util
#
# Now even though these are not paths, coverage sees the dots and switches to a module
# prefix-matching mode. Unfortunately, neither wildcards nor top-level module prefixes
# like `pants.` serve to engage this module prefix-matching as one might hope. It
# appears that `pants.` is treated as a path and `pants.*` is treated as a literal
# module prefix name.
coverage_modules = read_coverage_list('modules:')
elif coverage.startswith('paths:'):
coverage_modules = []
for path in read_coverage_list('paths:'):
if not os.path.exists(path) and not os.path.isabs(path):
# Look for the source in the PEX chroot since its not available from CWD.
path = os.path.join(chroot, path)
coverage_modules.append(path)
with self._cov_setup(targets,
chroot,
coverage_modules=coverage_modules) as (args, coverage_rc):
try:
yield args
finally:
with environment_as(PEX_MODULE='coverage.cmdline:main'):
def pex_run(args):
return self._pex_run(pex, workunit, args=args)
# On failures or timeouts, the .coverage file won't be written.
if not os.path.exists('.coverage'):
logger.warning('No .coverage file was found! Skipping coverage reporting.')
else:
# Normalize .coverage.raw paths using combine and `paths` config in the rc file.
# This swaps the /tmp pex chroot source paths for the local original source paths
# the pex was generated from and which the user understands.
shutil.move('.coverage', '.coverage.raw')
pex_run(args=['combine', '--rcfile', coverage_rc])
pex_run(args=['report', '-i', '--rcfile', coverage_rc])
# TODO(wickman): If coverage is enabled and we are not using fast mode, write an
# intermediate .html that points to each of the coverage reports generated and
# webbrowser.open to that page.
# TODO(John Sirois): Possibly apply the same logic to the console report. In fact,
# consider combining coverage files from all runs in this Tasks's execute and then
# producing just 1 console and 1 html report whether or not the tests are run in fast
# mode.
if self.get_options().coverage_output_dir:
target_dir = self.get_options().coverage_output_dir
else:
relpath = Target.maybe_readable_identify(targets)
pants_distdir = self.context.options.for_global_scope().pants_distdir
target_dir = os.path.join(pants_distdir, 'coverage', relpath)
safe_mkdir(target_dir)
pex_run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir])
coverage_xml = os.path.join(target_dir, 'coverage.xml')
pex_run(args=['xml', '-i', '--rcfile', coverage_rc, '-o', coverage_xml])
@contextmanager
def _test_runner(self, targets, workunit):
interpreter = self.select_interpreter_for_targets(targets)
pex_info = PexInfo.default()
pex_info.entry_point = 'pytest'
chroot = self.cached_chroot(interpreter=interpreter,
pex_info=pex_info,
targets=targets,
platforms=('current',),
extra_requirements=self._TESTING_TARGETS)
pex = chroot.pex()
with self._maybe_shard() as shard_args:
with self._maybe_emit_junit_xml(targets) as junit_args:
with self._maybe_emit_coverage_data(targets,
chroot.path(),
pex,
workunit) as coverage_args:
yield pex, shard_args + junit_args + coverage_args
def _do_run_tests_with_args(self, pex, workunit, args):
try:
# The pytest runner we use accepts a --pdb argument that will launch an interactive pdb
# session on any test failure. In order to support use of this pass-through flag we must
# turn off stdin buffering that otherwise occurs. Setting the PYTHONUNBUFFERED env var to
# any value achieves this in python2.7. We'll need a different solution when we support
# running pants under CPython 3 which does not unbuffer stdin using this trick.
env = {
'PYTHONUNBUFFERED': '1',
}
profile = self.get_options().profile
if profile:
env['PEX_PROFILE_FILENAME'] = '{0}.subprocess.{1:.6f}'.format(profile, time.time())
with environment_as(**env):
rc = self._spawn_and_wait(pex, workunit, args=args, setsid=True)
return PythonTestResult.rc(rc)
except TestFailedTaskError:
# _spawn_and_wait wraps the test runner in a timeout, so it could
# fail with a TestFailedTaskError. We can't just set PythonTestResult
# to a failure because the resultslog doesn't have all the failures
# when tests are killed with a timeout. Therefore we need to re-raise
# here.
raise
except Exception:
self.context.log.error('Failed to run test!')
self.context.log.info(traceback.format_exc())
return PythonTestResult.exception()
# Pattern for lines such as ones below. The second one is from a test inside a class.
# F testprojects/tests/python/pants/constants_only/test_fail.py::test_boom
# F testprojects/tests/python/pants/constants_only/test_fail.py::TestClassName::test_boom
# 'E' is here as well to catch test errors, not just test failures.
RESULTLOG_FAILED_PATTERN = re.compile(r'[EF] +(.+?)::(.+)')
@classmethod
def _get_failed_targets_from_resultlogs(cls, filename, targets):
with open(filename, 'r') as fp:
lines = fp.readlines()
failed_files = {
m.groups()[0] for m in map(cls.RESULTLOG_FAILED_PATTERN.match, lines) if m and m.groups()
}
failed_targets = set()
for failed_file in failed_files:
failed_targets.update(
t for t in targets if failed_file in t.sources_relative_to_buildroot()
)
return list(failed_targets)
def _do_run_tests(self, targets, workunit):
def _extract_resultlog_filename(args):
resultlogs = [arg[arg.find('=') + 1:] for arg in args if arg.startswith('--resultlog=')]
if resultlogs:
return resultlogs[0]
else:
try:
return args[args.index('--resultlog') + 1]
except IndexError:
self.context.log.error('--resultlog specified without an argument')
return None
except ValueError:
return None
if not targets:
return PythonTestResult.rc(0)
sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
if not sources:
return PythonTestResult.rc(0)
with self._test_runner(targets, workunit) as (pex, test_args):
def run_and_analyze(resultlog_path):
result = self._do_run_tests_with_args(pex, workunit, args)
failed_targets = self._get_failed_targets_from_resultlogs(resultlog_path, targets)
return result.with_failed_targets(failed_targets)
# N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
# top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
# from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
args = ['--confcutdir', get_buildroot()]
if self._debug:
args.extend(['-s'])
if self.get_options().colors:
args.extend(['--color', 'yes'])
for options in self.get_options().options + self.get_passthru_args():
args.extend(safe_shlex_split(options))
args.extend(test_args)
args.extend(sources)
# The user might have already specified the resultlog option. In such case, reuse it.
resultlog_arg = _extract_resultlog_filename(args)
if resultlog_arg:
return run_and_analyze(resultlog_arg)
else:
with temporary_file_path() as resultlog_path:
args.insert(0, '--resultlog={0}'.format(resultlog_path))
return run_and_analyze(resultlog_path)
def _pex_run(self, pex, workunit, args, setsid=False):
process = self._spawn(pex, workunit, args, setsid=False)
return process.wait()
def _spawn(self, pex, workunit, args, setsid=False):
# NB: We don't use pex.run(...) here since it makes a point of running in a clean environment,
# scrubbing all `PEX_*` environment overrides and we use overrides when running pexes in this
# task.
process = subprocess.Popen(pex.cmdline(args),
preexec_fn=os.setsid if setsid else None,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
return SubprocessProcessHandler(process)
|
|
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 9/29/14
This script reads in a tab delimited combined coverage file from
consolidate_coverage.py of phylogeny, protein classifications, and annotations
uses the biopython KGML libraries to generate graphics of KEGG pathways.
Input file:
Phylogeny Organism Protein Classification RPKM1 RPKM2 ...
2 H. Monster Function|K00003 4211.629513 ...
2 H. Monster Function|K00012 2752.574388
3 ... ...
Output
A series of mapping files for each bin over each time point
--------------------------------------------------------------------------------
usage: generate_kgml_graphics.py -i in.file -d out.directory
"""
#-------------------------------------------------------------------------------
#
#
#Code from: http://armchairbiology.blogspot.co.uk/2013/02/keggwatch-part-iii.html
#Generating KEGG maps example 2
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import pandas as pd
import os
import bisect
from numpy import log10, arange
import KGML_parser
from KGML_scrape import retrieve_KEGG_pathway
from KGML_vis import KGMLCanvas
from Bio.Graphics import ColorSpiral
# List of 2010 IDs for metabolic pathways
metabolic = ["ko00010", "ko00020", "ko00030", "ko00040", "ko00051", "ko00052",
"ko00053", "ko00061", "ko00062", "ko00071", "ko00072", "ko00100",
"ko00120", "ko00121", "ko00130", "ko00140", "ko00190", "ko00195",
"ko00196", "ko00230", "ko00231", "ko00232", "ko00240", "ko00250",
"ko00253", "ko00260", "ko00270", "ko00280", "ko00281", "ko00290",
"ko00300", "ko00310", "ko00311", "ko00312", "ko00330", "ko00331",
"ko00340", "ko00350", "ko00351", "ko00360", "ko00361", "ko00362",
"ko00363", "ko00364", "ko00380", "ko00400", "ko00401", "ko00402",
"ko00410", "ko00430", "ko00440", "ko00450", "ko00460", "ko00471",
"ko00472", "ko00473", "ko00480", "ko00500", "ko00510", "ko00511",
"ko00512", "ko00513", "ko00514", "ko00520", "ko00521", "ko00522",
"ko00523", "ko00524", "ko00531", "ko00532", "ko00533", "ko00534",
"ko00540", "ko00550", "ko00561", "ko00562", "ko00563", "ko00564",
"ko00565", "ko00590", "ko00591", "ko00592", "ko00600", "ko00601",
"ko00603", "ko00604", "ko00620", "ko00621", "ko00622", "ko00623",
"ko00624", "ko00625", "ko00626", "ko00627", "ko00630", "ko00633",
"ko00640", "ko00642", "ko00643", "ko00650", "ko00660", "ko00670",
"ko00680", "ko00710", "ko00720", "ko00730", "ko00740", "ko00750",
"ko00760", "ko00770", "ko00780", "ko00785", "ko00790", "ko00791",
"ko00830", "ko00860", "ko00900", "ko00901", "ko00902", "ko00903",
"ko00904", "ko00905", "ko00906", "ko00908", "ko00909", "ko00910",
"ko00920", "ko00930", "ko00940", "ko00941", "ko00942", "ko00943",
"ko00944", "ko00945", "ko00950", "ko00960", "ko00965", "ko00966",
"ko00970", "ko00980", "ko00981", "ko00982", "ko00983", "ko01040",
"ko01051", "ko01053", "ko01055", "ko01056", "ko01057", "ko01058",
"ko01100", "ko01110", "ko01120", "ko04070"]
# List of 2010 IDs for non-metabolic pathways
non_metabolic = ["ko02010", "ko02020", "ko02030", "ko02040", "ko02060",
"ko03008", "ko03010", "ko03013", "ko03015", "ko03018",
"ko03020", "ko03022", "ko03030", "ko03040", "ko03050",
"ko03060", "ko03070", "ko03320", "ko03410", "ko03420",
"ko03430", "ko03440", "ko03450", "ko04010", "ko04011",
"ko04012", "ko04013", "ko04020", "ko04060", "ko04062",
"ko04070", "ko04075", "ko04080", "ko04110", "ko04111",
"ko04112", "ko04113", "ko04114", "ko04115", "ko04120",
"ko04122", "ko04130", "ko04140", "ko04141", "ko04142",
"ko04144", "ko04145", "ko04146", "ko04150", "ko04210",
"ko04260", "ko04270", "ko04310", "ko04320", "ko04330",
"ko04340", "ko04350", "ko04360", "ko04370", "ko04380",
"ko04510", "ko04512", "ko04514", "ko04520", "ko04530",
"ko04540", "ko04610", "ko04612", "ko04614", "ko04620",
"ko04621", "ko04622", "ko04623", "ko04626", "ko04630",
"ko04640", "ko04650", "ko04660", "ko04662", "ko04664",
"ko04666", "ko04670", "ko04672", "ko04710", "ko04711",
"ko04712", "ko04720", "ko04722", "ko04730", "ko04740",
"ko04742", "ko04744", "ko04745", "ko04810", "ko04910",
"ko04912", "ko04914", "ko04916", "ko04920", "ko04930",
"ko04940", "ko04950", "ko04960", "ko04961", "ko04962",
"ko04964", "ko04966", "ko04970", "ko04971", "ko04972",
"ko04973", "ko04974", "ko04975", "ko04976", "ko04977",
"ko04978", "ko05010", "ko05012", "ko05014", "ko05016",
"ko05020", "ko05100", "ko05110", "ko05111", "ko05120",
"ko05130", "ko05131", "ko05140", "ko05142", "ko05143",
"ko05144", "ko05145", "ko05146", "ko05150", "ko05152",
"ko05160", "ko05162", "ko05200", "ko05210", "ko05211",
"ko05212", "ko05213", "ko05214", "ko05215", "ko05216",
"ko05217", "ko05218", "ko05219", "ko05220", "ko05221",
"ko05222", "ko05223", "ko05310", "ko05320", "ko05322",
"ko05323", "ko05330", "ko05332", "ko05340", "ko05410",
"ko05412", "ko05414", "ko05416"]
#all_kegg = metabolic + non_metabolic
#essential
all_kegg = ["ko00010", "ko00020", "ko00030", "ko00040", "ko00051", "ko00052", "ko00053", "ko00061", "ko00071", "ko00190", "ko00195", "ko00196", "ko00230", "ko00240", "ko00250", "ko00260", "ko00270", "ko00500", "ko00510", "ko00520", "ko00562", "ko00620", "ko00625", "ko00630", "ko00640", "ko00650", "ko00660", "ko00680", "ko00710", "ko00720", "ko00910", "ko00920", "ko01100", "ko01110", "ko01120", "ko02010", "ko02020", "ko02060", "ko03070", "ko04710"]
#for bin 27
#all_kegg = ["ko00010", "ko00020", "ko00030", "ko00190", "ko00195", "ko00620", "ko00630", "ko00640", "ko00650", "ko00660", "ko00680", "ko00720", "ko00910", "ko00920", "ko01100", "ko01110", "ko01120", "ko02010", "ko02020", "ko03070", "ko04122"]
#bare set
#all_kegg = ["ko00010", "ko00020", "ko01100", "ko01110", "ko01120"]
#-------------------------------------------------------------------------------
#function declarations
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "generate_kgml_graphics.py -i in.file -d \
out.directory",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_file", action="store",
dest="inputfilename", help="text input file")
parser.add_argument("-d", "--output_directory", action="store",
dest="outputdirectory", help="text output file")
parser.add_argument("-K", "--KEGG_directory", action="store",
dest="KEGGdirectory", help="path to KEGG kgml files")
options = parser.parse_args()
mandatories = ["inputfilename","outputdirectory", "KEGGdirectory"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
outputdirectory = options.outputdirectory
inputfilename = options.inputfilename
keggdir = options.KEGGdirectory
if not os.path.exists(outputdirectory):
os.makedirs(outputdirectory)
else:
print "\nError: Directory exists!\n"
parser.print_help()
exit(-1)
print "Reading in datafile..."
with open(inputfilename,'U') as infile:
combined = pd.read_csv(infile, header=0, sep='\t')
infile.close()
combined.columns = ["Phylogeny", "Organism", "Protein Classification"] + combined.columns.tolist()[3:]
combined["Protein Classification"] = combined["Protein Classification"].str.replace('^.*\|', '')
rpkm_columns = combined.columns[3:]
log10_columns = [column_name + '_log10' for column_name in rpkm_columns]
combined[log10_columns] = combined[rpkm_columns].applymap(lambda x: log10(float(x)) if float(x) > 0 else 0)
bin_list = list(combined.Phylogeny.unique())
bin_list.sort()
#cs = ColorSpiral(a=2, b=0.2, v_init=0.85, v_final=0.5, jitter=0.03)
print "Generating graphics..."
for bin in bin_list:
working_df = combined[combined.Phylogeny == bin]
os.makedirs(outputdirectory + '/' + str(bin))
#.reindex(index='Protein Classification')
for timepoint, label in zip(log10_columns,rpkm_columns):
# find rpkm ranges and set color palette
min_rpkm = working_df[working_df[timepoint] != 0][timepoint].min()
max_rpkm = working_df[working_df[timepoint] != 0][timepoint].max()
cutoff_rpkm = working_df[working_df[timepoint] != 0][timepoint].median()
color_range = arange(min_rpkm, max_rpkm, (max_rpkm-min_rpkm)/100)
color_dict = ColorSpiral.get_color_dict(color_range, a=6, b=0.7, v_init=0.7, v_final=0.55, jitter=0.00)
print 'Generating ' + outputdirectory + '/' + str(bin) + '/' + str(bin) + '.' + label
for map in all_kegg:
outfilename = outputdirectory + '/' + str(bin) + '/' + str(bin) + '.' + label + '.' + map + '.pdf'
#print 'Opening ' + keggdir + '/' + map + '.kgml'
pathway = KGML_parser.read(open(keggdir + '/' + map + '.kgml', 'U'))
kgml_map = KGMLCanvas(pathway, show_maps=False)
kgml_map.fontsize = 9
special_maps = ['ko01100','ko01110','ko01120']
if pathway.name.split('path:')[1] in special_maps:
entries = [e for e in pathway.orthologs]
for entry in entries:
ename = entry.name.split('ko:')[1:]
ename = [i[:6].lower() for i in ename]
erpkm = working_df.loc[working_df["Protein Classification"].isin(ename),label].sum()
if erpkm >= 0:
erpkm = log10(erpkm)
if erpkm < min_rpkm:
#print oname
for g in entry.graphics:
g.fgcolor = '#CCCCCC'
g.width = .4
else:
for g in entry.graphics:
g.width = 2
if erpkm > cutoff_rpkm:
for g in entry.graphics:
g.width = 10
kgml_map.show_colorbar_legend = False
kgml_map.import_imagemap = False
kgml_map.show_maps = True
else:
kgml_map.set_colorbar_legend(minmax=['1e%.2f' % min_rpkm,'1e%.2f' % max_rpkm], wh_dims = [60.0, 5.0], xypos= [35.0, 5.0], color_dict=color_dict)
orthologs = [e for e in pathway.orthologs]
for ortholog in orthologs:
oname = ortholog.name.split('ko:')[1:]
oname = [i[:6].lower() for i in oname]
orpkm = working_df.loc[working_df["Protein Classification"].isin(oname),label].sum()
if orpkm != 0:
orpkm = log10(orpkm)
if orpkm > max_rpkm:
orpkm = max_rpkm
if orpkm <= 0:
orpkm = min_rpkm
if bisect.bisect_left(color_range, orpkm) > len(color_range)-1:
ocolor = color_dict[color_range[-1]]
else:
ocolor = color_dict[color_range[bisect.bisect_left(color_range, orpkm)]]
for element in ortholog.graphics:
element.bgcolor = ocolor
# set figure display attributes
kgml_map.import_imagemap = True
#kgml_map.show_maps = True
kgml_map.show_maps = False
#kgml_map.show_orthologs = False
kgml_map.draw_relations = False
kgml_map.show_compounds = False
#kgml_map.show_genes = False
# And rendering elements as an overlay
#kgml_map.show_compounds = True
kgml_map.show_genes = True
kgml_map.show_orthologs = True
# Default settings are for the KGML elements only
kgml_map.draw(outfilename)
print "Done!"
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
import re
import warnings
from operator import itemgetter
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from tabulate import tabulate
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = (
"CONTROL",
"PRINT",
"ATOMS",
"POTENTIALS",
"RECIPROCAL",
"REAL",
"MARKER",
"LATTICE",
"TITLE",
"RMULTIPLIER",
"SGROUP",
"COORDINATES",
"EQUIVALENCE",
"CIF",
"CGRID",
"CFAVERAGE",
"OVERLAP",
"EXAFS",
"XANES",
"ELNES",
"EXELFS",
"LDOS",
"ELLIPTICITY",
"MULTIPOLE",
"POLARIZATION",
"RHOZZP",
"DANES",
"FPRIME",
"NRIXS",
"XES",
"XNCD",
"XMCD",
"XNCDCONTROL",
"END",
"KMESH",
"PRINT",
"EGRID",
"DIMS",
"AFOLP",
"EDGE",
"COMPTON",
"DANES",
"FPRIME",
"MDFF",
"HOLE",
"COREHOLE",
"S02",
"CHBROAD",
"EXCHANGE",
"FOLP",
"NOHOLE",
"RGRID",
"SCF",
"UNFREEZEF",
"CHSHIFT",
"DEBYE",
"INTERSTITIAL",
"CHWIDTH",
"EGAP",
"EPS0",
"EXTPOT",
"ION",
"JUMPRM",
"EXPOT",
"SPIN",
"LJMAX",
"LDEC",
"MPSE",
"PLASMON",
"RPHASES",
"RSIGMA",
"PMBSE",
"TDLDA",
"FMS",
"DEBYA",
"OPCONS",
"PREP",
"RESTART",
"SCREEN",
"SETE",
"STRFACTORS",
"BANDSTRUCTURE",
"RPATH",
"NLEG",
"PCRITERIA",
"SYMMETRY",
"SS",
"CRITERIA",
"IORDER",
"NSTAR",
"ABSOLUTE",
"CORRECTIONS",
"SIG2",
"SIG3",
"MBCONV",
"SFCONV",
"RCONV",
"SELF",
"SFSE",
"MAGIC",
"TARGET",
"STRFAC",
)
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
"""
def __init__(self, struct, source="", comment=""):
"""
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source="", comment=""):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename="feff.inp"):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
if feffpmg == -1:
feffpmg = False
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return "".join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg == -1:
feffpmg = False
if feffpmg:
comment2 = " ".join(lines[1].split()[2:])
source = " ".join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_parameters(*lengths, *angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False, False, False)
h = Header(struct, source, comment2)
return h
raise ValueError("Header not generated by pymatgen, cannot return header object")
def __str__(self):
"""
String representation of Header.
"""
def to_s(x):
return f"{x:0.6f}"
output = [
"* This FEFF.inp file generated by pymatgen",
"".join(["TITLE comment: ", self.comment]),
"".join(["TITLE Source: ", self.source]),
f"TITLE Structure Summary: {self.struct.composition.formula}",
f"TITLE Reduced formula: {self.struct.composition.reduced_formula}",
f"TITLE space group: ({self.space_group}), space number: ({self.space_number})",
f"TITLE abc:{' '.join([to_s(i).rjust(10) for i in self.struct.lattice.abc])}",
f"TITLE angles:{' '.join([to_s(i).rjust(10) for i in self.struct.lattice.angles])}",
f"TITLE sites: {self.struct.num_sites}",
]
for i, site in enumerate(self.struct):
output.append(
" ".join(
[
"*",
str(i + 1),
site.species_string,
" ".join([to_s(j).rjust(12) for j in site.frac_coords]),
]
)
)
return "\n".join(output)
def write_file(self, filename="HEADER"):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be converted into atomic coordinates!")
self.absorbing_atom, self.center_index = get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and "END" not in line:
atoms_str.append(line.replace("\r", ""))
return "".join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [
[
f"{self._cluster[0].x:f}",
f"{self._cluster[0].y:f}",
f"{self._cluster[0].z:f}",
0,
self.absorbing_atom,
"0.0",
0,
]
]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(
[
f"{site.x:f}",
f"{site.y:f}",
f"{site.z:f}",
ipot,
site_symbol,
f"{self._cluster.get_distance(0, i + 1):f}",
i + 1,
]
)
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(
tabulate(
lines_sorted,
headers=["* x", "y", "z", "ipot", "Atom", "Distance", "Number"],
)
)
atom_list = lines_formatted.replace("--", "**")
return "".join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename="ATOMS"):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super().__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super().__setitem__(
key.strip(),
Tags.proc_val(key.strip(), val.strip()) if isinstance(val, str) else val,
)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict["@module"] = self.__class__.__module__
tags_dict["@class"] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename="PARAMETERS"):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if ieels <= i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = [
"BEAM_ENERGY",
"BEAM_DIRECTION",
"ANGLES",
"MESH",
"POSITION",
]
else:
eels_keys = ["BEAM_ENERGY", "ANGLES", "MESH", "POSITION"]
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
return int(numstr)
try:
if key.lower() == "cif":
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = []
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] * int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
return m.group(1) in ["T", "t"]
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1, "FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1, "FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default", "FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be converted into atomic coordinates!")
self.absorbing_atom, _ = get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename="feff.inp"):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [
re.compile("ipot.*Z.*tag.*lmax1.*lmax2.*spinph"),
re.compile("^[*]+.*[*]+$"),
]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return "".join(pot_str).rstrip("\n")
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, 0.0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(
tabulate(
ipot_sorted,
headers=[
"*ipot",
"Z",
"tag",
"lmax1",
"lmax2",
"xnatph(stoichometry)",
"spinph",
],
)
)
ipotlist = ipotrow.replace("--", "**")
ipotlist = "".join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename="POTENTIALS"):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append(f"{path_index} {len(legs)} {self.degeneracies[i]}")
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = f"{tmp} {potential} {element}"
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symbol and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
if isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
raise ValueError("absorbing_atom must be either specie symbol or site index")
|
|
#!/usr/bin/env python3
import ssl
import socket
from time import sleep
from functools import reduce
MODE_COMMAND = 0
MODE_DATA = 1
DEBUG_HEXDUMP = 0b0001
DEBUG_COMMAND = 0b0010
DEBUG_PROTOCOL = 0b0100
DEBUG_ALL = 0b1111
RECOVER_TIME = 1
_FRAME_COUNT = 9
_HIGHEST_BIT = 0x7F
_FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
_PAYLOADSIZE = 54
_PAYLOADMAP = {
# Sonnenkraft SKSC2 HE 0x4214
# Offset, size, factor
'temp1': (0, 2, 0.1), # Temperature S1
'temp2': (2, 2, 0.1), # Temperature S2
'temp3': (4, 2, 0.1), # Temperature S3
'temp4': (6, 2, 0.1), # Temperature S4
'rel1': (8, 1, 1), # Relais 1
'rel2': (9, 1, 1), # Relais 2
'error': (10, 1, 1), # Error mask
'rel1oph': (12, 2, 1), # Operating hours Relais 1
'rel2oph': (14, 2, 1), # Operating hours Relais 2
'heat': (16, 6, 1), # Amount of heat
'temp5': (24, 2, 0.1), # Temperature VFD1
'flow5': (26, 2, 1), # Volumetric flow rate VFD1
'voltage': (32, 1, 0.1), # Voltage
}
class _TERM:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def _hexdump(src, length=16):
result = []
for i in range(0, len(src), length):
s = src[i:i + length]
hexa = ' '.join(["%02X" % x for x in s])
result.append("%04X %-*s \n" % (i, length * 3, hexa))
return "Len %iB\n%s" % (len(src), ''.join(result))
class VBUSException(Exception):
def __init__(self, *args):
super.__init__(*args)
class VBUSResponse(object):
"""
A response object that is generated by
the VBUSConnection when in COMMAND mode.
"""
def __init__(self, line):
assert len(line) > 2
self.positive = str(chr(line[0])) == '+'
# Convert byte-object to string
str_line = ''
for b in line:
str_line += str(chr(b))
print('< ', str_line)
self.type = str_line
class VBUSConnection(object):
def __init__(self, host, port=7053, password="", debugmode=0b0000):
"""
:param host: The IP/DNS of the vbus host
:param port: The port the vbus is listening to
:param password: The optional password. Use "" or None for no password
:param debugmode: The debug flags to use
:type host: str
:type port: int
:type password: str
:type debugmode: int
"""
password = "" if password in [None, False] else password
assert isinstance(port, int)
assert isinstance(host, str)
assert isinstance(password, str)
assert isinstance(debugmode, int)
self.host = host
self.port = port
self.password = password or False
self.debugmode = debugmode
self._mode = MODE_COMMAND
self._sock = None
self._buffer = []
def connect(self, sslsock=False):
"""
Connects to the VBUS. It will try to authenticate
if a password has been set.
:raise VBUSException:
:type sslsock: bool
:param sslsock: Use ssl?
"""
assert not self._sock
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sslsock: # Unlikely that we'll ever connect to the VBUS using an ssl socket but "why not?"
self._sock = ssl.wrap_socket(self._sock)
self._sock.connect((self.host, self.port))
assert VBUSResponse(self._lrecv()).type == "+HELLO"
if self.password:
if not self.authenticate():
raise VBUSException("Could not authenticate")
def authenticate(self):
"""
Authenticate with the server using the set password. This
will return if the authentication attempt was acccepted.
:rtype : bool
"""
assert self.password
assert self._mode == MODE_COMMAND
self._lsend("PASS %s" % self.password)
resp = VBUSResponse(self._lrecv())
return resp.positive
def data(self, payloadmap=_PAYLOADMAP, framecount=_FRAME_COUNT, payloadsize=_PAYLOADSIZE):
"""
Listen to the server and get some data.
:param payloadmap:
:param payloadsize: The size of a
:param framecount: The amount of desired frames
:return: The requested data
:type payloadmap: dict
:type payloadsize: int
:type framecount: int
:rtype : dict
"""
payloadmap = payloadmap.copy()
#assert isinstance(payloadmap, dict)
assert isinstance(payloadsize, int)
assert self._sock
if self._mode is not MODE_DATA:
self._lsend("DATA")
resp = VBUSResponse(self._lrecv())
if not resp.positive:
raise VBUSException("Could create a data stream: %s" % resp.message)
self._mode = MODE_DATA
while True:
for data in self._brecv().split(b"\xaa"):
if len(data) > 5:
# Wait till we get the correct protocol
if self._getbytes(data, 4, 5) is not 0x10:
continue
if self.debugmode & DEBUG_PROTOCOL:
print('-----------------------------------------')
print("Src: 0X%02X" % self._getbytes(data, 0, 2))
print("Dst: 0X%02X" % self._getbytes(data, 2, 4))
print('Protocol version:', hex(data[4]))
if len(data) > 8:
if self.debugmode & DEBUG_PROTOCOL:
print("Cmd: 0X%02X" % self._getbytes(data, 5, 7))
# Are we getting a payload?
if self._getbytes(data, 5, 7) is not 0x100:
continue
if self.debugmode & DEBUG_PROTOCOL:
print("Source map: 0X%02X" % self._getbytes(data, 2, 4))
# Is the checksum valid?
if self._checksum(data[0:8]) is not data[8]:
if self.debugmode & DEBUG_PROTOCOL:
print("Invalid checksum: got %d expected %d" % \
(self._checksum(data[0:8]), data[8]))
continue
# Check payload length
frames = data[7]
if self.debugmode & DEBUG_PROTOCOL:
print('Frames:', frames)
p_end = 9 + (6 * frames)
payload = data[9:p_end]
if len(payload) is not 6 * frames:
if self.debugmode & DEBUG_PROTOCOL:
print("Unexpected payload length: %i != %i" % \
(len(payload), 6 * frames))
continue
r = self._parsepayload(payload, payloadmap, payloadsize, framecount)
if r:
print(r)
return r
# The vbus freaks out when you send too many requests
# This can be solved by just waiting
sleep(RECOVER_TIME)
def getmode(self):
"""
Returns the current mode
"""
return self._mode
def _parsepayload(self, payload, payloadmap, payloadsize, framecount):
data = []
if len(payload) is not payloadsize and False:
if self.debugmode & DEBUG_PROTOCOL:
print("Payload size mismatch: expected %i got %i", payloadsize, len(payload))
return None
if True in [i > _HIGHEST_BIT for i in payload]:
if self.debugmode & DEBUG_PROTOCOL:
print("Found highest byte discarding payload")
print(' '.join(
"%02X" % i if i <= _HIGHEST_BIT else "%s%02X%s" % (_TERM.RED, i, _TERM.END)
for i in payload
))
return None
if (len(payload) / 6) != framecount:
if self.debugmode & DEBUG_PROTOCOL:
print("Invalid frame count %d (%d)" % (framecount, len(payload) / 6))
return None
for i in range(int(len(payload) / 6)):
frame = payload[i * 6:i * 6 + 6]
if self.debugmode & DEBUG_PROTOCOL:
print("Frame %i: %s" % (i, ' '.join("%02X" % i for i in frame)))
# Check frame checksum
if frame[5] is not self._checksum(frame[:-1]):
if self.debugmode & DEBUG_PROTOCOL:
print("Frame checksum mismatch: ", frame[5], self._checksum(frame[:-1]))
return None
septet = frame[4]
for j in range(4):
if septet & (1 << j):
data.append(frame[j] | 0x80)
else:
data.append(frame[j])
vals = {}
for i, rng in payloadmap.items():
vals[i] = self._getbytes(data, rng[0], rng[0] + rng[1])
# Temperatures can be negative (using two's complement)
if i.startswith('temp'):
bits = (rng[1]) * 8
if vals[i] >= 1 << (bits - 1):
vals[i] -= 1 << bits
# Apply factor
vals[i] = vals[i] * rng[2]
if self.debugmode & DEBUG_PROTOCOL:
print(vals)
return vals
@staticmethod
def _checksum(data):
crc = 0x7F
for d in data:
crc = (crc - d) & 0x7F
return crc
@staticmethod
def _getbytes(data, begin, end):
return sum([b << (i * 8) for i, b in enumerate(data[begin:end])])
def _lrecv(self):
c = b''
s = b''
while c != b'\n':
c = self._sock.recv(1)
if c == '':
break
if c != b'\n':
s += c
if self.debugmode & DEBUG_COMMAND:
print("< %s" % s)
return s
def _brecv(self, n=1024):
d = self._sock.recv(n)
while d.count(b"\xaa") < 4:
d += self._sock.recv(n)
if self.debugmode & DEBUG_HEXDUMP:
print(_hexdump(d))
return d
def _lsend(self, s):
if self.debugmode & DEBUG_COMMAND:
print("> " + s)
msg = s + "\r\n"
self._sock.send(msg.encode("utf-8"))
def _bsend(self, s):
if self.debugmode & DEBUG_HEXDUMP:
print(_hexdump(s))
self._sock.send(s)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.transfers.gcs_to_gcs import WILDCARD, GCSToGCSOperator
TASK_ID = 'test-gcs-to-gcs-operator'
TEST_BUCKET = 'test-bucket'
PREFIX = 'TEST'
SOURCE_OBJECTS_NO_FILE = ['']
SOURCE_OBJECTS_TWO_EMPTY_STRING = ['', '']
SOURCE_OBJECTS_SINGLE_FILE = ['test_object/file1.txt']
SOURCE_OBJECTS_MULTIPLE_FILES = ['test_object/file1.txt', 'test_object/file2.txt']
SOURCE_OBJECTS_LIST = ['test_object/file1.txt', 'test_object/file2.txt', 'test_object/file3.json']
SOURCE_OBJECT_WILDCARD_PREFIX = '*test_object'
SOURCE_OBJECT_WILDCARD_SUFFIX = 'test_object*'
SOURCE_OBJECT_WILDCARD_MIDDLE = 'test*object'
SOURCE_OBJECT_WILDCARD_FILENAME = 'test_object*.txt'
SOURCE_OBJECT_NO_WILDCARD = 'test_object.txt'
SOURCE_OBJECT_MULTIPLE_WILDCARDS = 'csv/*/test_*.csv'
DESTINATION_BUCKET = 'archive'
DESTINATION_OBJECT = 'foo/bar'
DESTINATION_OBJECT_PREFIX = 'foo/bar'
SOURCE_FILES_LIST = [
'test_object/file1.txt',
'test_object/file2.txt',
'test_object/file3.json',
]
DELIMITER = '.json'
MOD_TIME_1 = datetime(2016, 1, 1)
MOD_TIME_2 = datetime(2019, 1, 1)
class TestGoogleCloudStorageToCloudStorageOperator(unittest.TestCase):
"""
Tests the three use-cases for the wildcard operator. These are
no_prefix: *test_object
no_suffix: test_object*
prefix_and_suffix: test*object
Also tests the destination_object as prefix when the wildcard is used.
"""
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_prefix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_PREFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="", delimiter="test_object")
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test_object", delimiter="")
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_replace_flag_false(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="test_object", delimiter=""),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_prefix_and_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_MIDDLE,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test", delimiter="object")
# copy with wildcard
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'foo/bar/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'foo/bar/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_destination_object_retained_prefix(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=f'{DESTINATION_OBJECT_PREFIX}/{SOURCE_OBJECT_WILDCARD_SUFFIX[:-1]}',
)
operator.execute(None)
mock_calls_retained = [
mock.call(
TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'foo/bar/test_object/file1.txt'
),
mock.call(
TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'foo/bar/test_object/file2.txt'
),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_retained)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_without_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_empty_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object='',
)
operator.execute(None)
mock_calls_empty = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, '/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, '/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_empty)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_all_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_one_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, False, False]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_no_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_last_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_before.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
maximum_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_exe_last_modified_time_and_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_between.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_prefix_with_no_last_modified_time(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=None,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_last_modified_time_with_false_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_not_called()
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_is_older_than_with_true_cond(self, mock_hook):
mock_hook.return_value.is_older_than.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
is_older_than=3600,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_more_than_1_wildcard(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
total_wildcards = operator.source_object.count(WILDCARD)
error_msg = f"Only one wildcard '[*]' is allowed in source_object parameter. Found {total_wildcards}"
with pytest.raises(AirflowException, match=error_msg):
operator.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_with_empty_destination_bucket(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=None,
destination_object=DESTINATION_OBJECT_PREFIX,
)
with mock.patch.object(operator.log, 'warning') as mock_warn:
operator.execute(None)
mock_warn.assert_called_once_with(
'destination_bucket is None. Defaulting it to source_bucket (%s)', TEST_BUCKET
)
assert operator.destination_bucket == operator.source_bucket
# Tests the use of delimiter and source object as list
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_empty_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_NO_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix='', delimiter=None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_raises_exception_with_two_empty_list_inside_source_objects(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_TWO_EMPTY_STRING
)
with pytest.raises(AirflowException, match="You can't have two empty strings inside source_object"):
operator.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_single_item_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_SINGLE_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
TEST_BUCKET, prefix=SOURCE_OBJECTS_SINGLE_FILE[0], delimiter=None
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_multiple_items_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_MULTIPLE_FILES
)
operator.execute(None)
mock_hook.return_value.list.assert_has_calls(
[
mock.call(TEST_BUCKET, prefix='test_object/file1.txt', delimiter=None),
mock.call(TEST_BUCKET, prefix='test_object/file2.txt', delimiter=None),
],
any_order=True,
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_a_delimiter(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
delimiter=DELIMITER,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix='', delimiter=DELIMITER)
# COPY
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = ['test_object/file3.json']
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter=DELIMITER,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file3.json', DESTINATION_BUCKET, DESTINATION_OBJECT),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_different_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = ['test_object/file1.txt', 'test_object/file2.txt']
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter='.txt',
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_no_destination_bucket_and_no_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_LIST
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', TEST_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', TEST_BUCKET, 'test_object/file2.txt'),
mock.call(TEST_BUCKET, 'test_object/file3.json', TEST_BUCKET, 'test_object/file3.json'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_all_true_cond_no_file(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
mock.call(TEST_BUCKET, 'test_object/file3.json', DESTINATION_BUCKET, 'test_object/file3.json'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_replace_flag_false_with_destination_object(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="foo/bar", delimiter=""),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_source_object_required_flag_true(self, mock_hook):
mock_hook.return_value.exists.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_SINGLE_FILE,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
source_object_required=True,
)
with pytest.raises(
AirflowException, match=f"{SOURCE_OBJECTS_SINGLE_FILE} does not exist in bucket {TEST_BUCKET}"
):
operator.execute(None)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import os
import shutil
from six import StringIO
import socket
import subprocess
import tempfile
import time
import traceback
from oslo_utils import uuidutils
from selenium.webdriver.common import action_chains
from selenium.webdriver.common import by
from selenium.webdriver.common import keys
import testtools
import xvfbwrapper
from horizon.test import webdriver
from openstack_dashboard.test.integration_tests import config
from openstack_dashboard.test.integration_tests.pages import loginpage
from openstack_dashboard.test.integration_tests.regions import messages
from openstack_dashboard.test.integration_tests.video_recorder import \
VideoRecorder
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
IS_SELENIUM_HEADLESS = os.environ.get('SELENIUM_HEADLESS', False)
ROOT_PATH = os.path.dirname(os.path.abspath(config.__file__))
if not subprocess.call('which xdpyinfo > /dev/null 2>&1', shell=True):
SCREEN_SIZE = subprocess.check_output('xdpyinfo | grep dimensions',
shell=True).split()[1].split('x')
else:
SCREEN_SIZE = (None, None)
LOGGER.info("X11 isn't installed. Should use xvfb to run tests.")
def gen_random_resource_name(resource="", timestamp=True):
"""Generate random resource name using uuid and timestamp.
Input fields are usually limited to 255 or 80 characters hence their
provide enough space for quite long resource names, but it might be
the case that maximum field length is quite restricted, it is then
necessary to consider using shorter resource argument or avoid using
timestamp by setting timestamp argument to False.
"""
fields = ["horizon"]
if resource:
fields.append(resource)
if timestamp:
tstamp = time.strftime("%d-%m-%H-%M-%S")
fields.append(tstamp)
fields.append(uuidutils.generate_uuid(dashed=False))
return "_".join(fields)
@contextlib.contextmanager
def gen_temporary_file(name='', suffix='.qcow2', size=10485760):
"""Generate temporary file with provided parameters.
:param name: file name except the extension /suffix
:param suffix: file extension/suffix
:param size: size of the file to create, bytes are generated randomly
:return: path to the generated file
"""
with tempfile.NamedTemporaryFile(prefix=name, suffix=suffix) as tmp_file:
tmp_file.write(os.urandom(size))
yield tmp_file.name
class AssertsMixin(object):
def assertSequenceTrue(self, actual):
return self.assertEqual(list(actual), [True] * len(actual))
def assertSequenceFalse(self, actual):
return self.assertEqual(list(actual), [False] * len(actual))
class BaseTestCase(testtools.TestCase):
CONFIG = config.get_config()
def setUp(self):
if not os.environ.get('INTEGRATION_TESTS', False):
raise self.skipException(
"The INTEGRATION_TESTS env variable is not set.")
self._configure_log()
self.addOnException(
lambda exc_info: setattr(self, '_need_attach_test_log', True))
def cleanup():
if getattr(self, '_need_attach_test_log', None):
self._attach_test_log()
self.addCleanup(cleanup)
width, height = SCREEN_SIZE
display = '0.0'
# Start a virtual display server for running the tests headless.
if IS_SELENIUM_HEADLESS:
width, height = 1920, 1080
self.vdisplay = xvfbwrapper.Xvfb(width=width, height=height)
args = []
# workaround for memory leak in Xvfb taken from:
# http://blog.jeffterrace.com/2012/07/xvfb-memory-leak-workaround.html
args.append("-noreset")
# disables X access control
args.append("-ac")
if hasattr(self.vdisplay, 'extra_xvfb_args'):
# xvfbwrapper 0.2.8 or newer
self.vdisplay.extra_xvfb_args.extend(args)
else:
self.vdisplay.xvfb_cmd.extend(args)
self.vdisplay.start()
display = self.vdisplay.new_display
self.addCleanup(self.vdisplay.stop)
self.video_recorder = VideoRecorder(width, height, display=display)
self.video_recorder.start()
self.addOnException(
lambda exc_info: setattr(self, '_need_attach_video', True))
def cleanup():
self.video_recorder.stop()
if getattr(self, '_need_attach_video', None):
self._attach_video()
else:
self.video_recorder.clear()
self.addCleanup(cleanup)
# Increase the default Python socket timeout from nothing
# to something that will cope with slow webdriver startup times.
# This *just* affects the communication between this test process
# and the webdriver.
socket.setdefaulttimeout(60)
# Start the Selenium webdriver and setup configuration.
desired_capabilities = dict(webdriver.desired_capabilities)
desired_capabilities['loggingPrefs'] = {'browser': 'ALL'}
self.driver = webdriver.WebDriverWrapper(
desired_capabilities=desired_capabilities
)
if self.CONFIG.selenium.maximize_browser:
self.driver.maximize_window()
if IS_SELENIUM_HEADLESS: # force full screen in xvfb
self.driver.set_window_size(width, height)
self.driver.implicitly_wait(self.CONFIG.selenium.implicit_wait)
self.driver.set_page_load_timeout(
self.CONFIG.selenium.page_timeout)
self.addOnException(self._attach_page_source)
self.addOnException(self._attach_screenshot)
self.addOnException(
lambda exc_info: setattr(self, '_need_attach_browser_log', True))
def cleanup():
if getattr(self, '_need_attach_browser_log', None):
self._attach_browser_log()
self.driver.quit()
self.addCleanup(cleanup)
super(BaseTestCase, self).setUp()
def addOnException(self, exception_handler):
def wrapped_handler(exc_info):
if issubclass(exc_info[0], testtools.testcase.TestSkipped):
return
return exception_handler(exc_info)
super(BaseTestCase, self).addOnException(wrapped_handler)
def _configure_log(self):
"""Configure log to capture test logs include selenium logs in order
to attach them if test will be broken.
"""
LOGGER.handlers[:] = [] # clear other handlers to set target handler
self._log_buffer = StringIO()
stream_handler = logging.StreamHandler(stream=self._log_buffer)
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler.setFormatter(formatter)
LOGGER.addHandler(stream_handler)
@property
def _test_report_dir(self):
report_dir = os.path.join(ROOT_PATH, 'test_reports',
'{}.{}'.format(self.__class__.__name__,
self._testMethodName))
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
return report_dir
def _attach_page_source(self, exc_info):
source_path = os.path.join(self._test_report_dir, 'page.html')
with self.log_exception("Attach page source"):
with open(source_path, 'w') as f:
f.write(self._get_page_html_source())
def _attach_screenshot(self, exc_info):
screen_path = os.path.join(self._test_report_dir, 'screenshot.png')
with self.log_exception("Attach screenshot"):
self.driver.get_screenshot_as_file(screen_path)
def _attach_video(self, exc_info=None):
with self.log_exception("Attach video"):
if not os.path.isfile(self.video_recorder.file_path):
LOGGER.warn("Can't find video {!r}".format(
self.video_recorder.file_path))
return
shutil.move(self.video_recorder.file_path,
os.path.join(self._test_report_dir, 'video.mp4'))
def _attach_browser_log(self, exc_info=None):
browser_log_path = os.path.join(self._test_report_dir, 'browser.log')
with self.log_exception("Attach browser log"):
with open(browser_log_path, 'w') as f:
f.write(
self._unwrap_browser_log(self.driver.get_log('browser')))
def _attach_test_log(self, exc_info=None):
test_log_path = os.path.join(self._test_report_dir, 'test.log')
with self.log_exception("Attach test log"):
with open(test_log_path, 'w') as f:
f.write(self._log_buffer.getvalue().encode('utf-8'))
@contextlib.contextmanager
def log_exception(self, label):
try:
yield
except Exception:
self.addDetail(
label, testtools.content.text_content(traceback.format_exc()))
@staticmethod
def _unwrap_browser_log(_log):
def rec(log):
if isinstance(log, dict):
return log['message'].encode('utf-8')
elif isinstance(log, list):
return '\n'.join([rec(item) for item in log])
else:
return log.encode('utf-8')
return rec(_log)
def zoom_out(self, times=3):
"""Zooming out prevents different elements being driven out of xvfb
viewport (which in Selenium>=2.50.1 prevents interaction with them.
"""
html = self.driver.find_element(by.By.TAG_NAME, 'html')
html.send_keys(keys.Keys.NULL)
zoom_out_keys = (keys.Keys.SUBTRACT,) * times
action_chains.ActionChains(self.driver).key_down(
keys.Keys.CONTROL).send_keys(*zoom_out_keys).key_up(
keys.Keys.CONTROL).perform()
def _get_page_html_source(self):
"""Gets html page source.
self.driver.page_source is not used on purpose because it does not
display html code generated/changed by javascript.
"""
html_elem = self.driver.find_element_by_tag_name("html")
return html_elem.get_attribute("innerHTML").encode("utf-8")
class TestCase(BaseTestCase, AssertsMixin):
TEST_USER_NAME = BaseTestCase.CONFIG.identity.username
TEST_PASSWORD = BaseTestCase.CONFIG.identity.password
HOME_PROJECT = BaseTestCase.CONFIG.identity.home_project
def setUp(self):
super(TestCase, self).setUp()
self.login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
self.login_pg.go_to_login_page()
# TODO(schipiga): lets check that tests work without viewport changing,
# otherwise will uncomment.
# self.zoom_out()
self.home_pg = self.login_pg.login(self.TEST_USER_NAME,
self.TEST_PASSWORD)
self.home_pg.change_project(self.HOME_PROJECT)
self.assertTrue(
self.home_pg.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.home_pg.find_message_and_dismiss(messages.ERROR))
def cleanup():
if self.home_pg.is_logged_in:
self.home_pg.go_to_home_page()
self.home_pg.log_out()
self.addCleanup(cleanup)
class AdminTestCase(TestCase, AssertsMixin):
TEST_USER_NAME = TestCase.CONFIG.identity.admin_username
TEST_PASSWORD = TestCase.CONFIG.identity.admin_password
HOME_PROJECT = BaseTestCase.CONFIG.identity.admin_home_project
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import requests
import googleapiclient.discovery
import google.oauth2.credentials
import os
from frappe import _
from googleapiclient.errors import HttpError
from frappe.model.document import Document
from frappe.utils import get_request_site_address
from frappe.utils.background_jobs import enqueue
from six.moves.urllib.parse import quote
from apiclient.http import MediaFileUpload
from frappe.utils import get_backups_path, get_bench_path
from frappe.utils.backups import new_backup
from frappe.integrations.doctype.google_settings.google_settings import get_auth_url
from frappe.integrations.offsite_backup_utils import get_latest_backup_file, send_email, validate_file_size
SCOPES = "https://www.googleapis.com/auth/drive"
class GoogleDrive(Document):
def validate(self):
doc_before_save = self.get_doc_before_save()
if doc_before_save and doc_before_save.backup_folder_name != self.backup_folder_name:
self.backup_folder_id = ''
def get_access_token(self):
google_settings = frappe.get_doc("Google Settings")
if not google_settings.enable:
frappe.throw(_("Google Integration is disabled."))
if not self.refresh_token:
button_label = frappe.bold(_("Allow Google Drive Access"))
raise frappe.ValidationError(_("Click on {0} to generate Refresh Token.").format(button_label))
data = {
"client_id": google_settings.client_id,
"client_secret": google_settings.get_password(fieldname="client_secret", raise_exception=False),
"refresh_token": self.get_password(fieldname="refresh_token", raise_exception=False),
"grant_type": "refresh_token",
"scope": SCOPES
}
try:
r = requests.post(get_auth_url(), data=data).json()
except requests.exceptions.HTTPError:
button_label = frappe.bold(_("Allow Google Drive Access"))
frappe.throw(_("Something went wrong during the token generation. Click on {0} to generate a new one.").format(button_label))
return r.get("access_token")
@frappe.whitelist()
def authorize_access(reauthorize=None):
"""
If no Authorization code get it from Google and then request for Refresh Token.
Google Contact Name is set to flags to set_value after Authorization Code is obtained.
"""
google_settings = frappe.get_doc("Google Settings")
google_drive = frappe.get_doc("Google Drive")
redirect_uri = get_request_site_address(True) + "?cmd=frappe.integrations.doctype.google_drive.google_drive.google_callback"
if not google_drive.authorization_code or reauthorize:
if reauthorize:
frappe.db.set_value("Google Drive", None, "backup_folder_id", "")
return get_authentication_url(client_id=google_settings.client_id, redirect_uri=redirect_uri)
else:
try:
data = {
"code": google_drive.authorization_code,
"client_id": google_settings.client_id,
"client_secret": google_settings.get_password(fieldname="client_secret", raise_exception=False),
"redirect_uri": redirect_uri,
"grant_type": "authorization_code"
}
r = requests.post(get_auth_url(), data=data).json()
if "refresh_token" in r:
frappe.db.set_value("Google Drive", google_drive.name, "refresh_token", r.get("refresh_token"))
frappe.db.commit()
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = "/desk#Form/{0}".format(quote("Google Drive"))
frappe.msgprint(_("Google Drive has been configured."))
except Exception as e:
frappe.throw(e)
def get_authentication_url(client_id, redirect_uri):
return {
"url": "https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&response_type=code&prompt=consent&client_id={}&include_granted_scopes=true&scope={}&redirect_uri={}".format(client_id, SCOPES, redirect_uri)
}
@frappe.whitelist()
def google_callback(code=None):
"""
Authorization code is sent to callback as per the API configuration
"""
frappe.db.set_value("Google Drive", None, "authorization_code", code)
frappe.db.commit()
authorize_access()
def get_google_drive_object():
"""
Returns an object of Google Drive.
"""
google_settings = frappe.get_doc("Google Settings")
account = frappe.get_doc("Google Drive")
credentials_dict = {
"token": account.get_access_token(),
"refresh_token": account.get_password(fieldname="refresh_token", raise_exception=False),
"token_uri": get_auth_url(),
"client_id": google_settings.client_id,
"client_secret": google_settings.get_password(fieldname="client_secret", raise_exception=False),
"scopes": "https://www.googleapis.com/auth/drive/v3"
}
credentials = google.oauth2.credentials.Credentials(**credentials_dict)
google_drive = googleapiclient.discovery.build("drive", "v3", credentials=credentials)
return google_drive, account
def check_for_folder_in_google_drive():
"""Checks if folder exists in Google Drive else create it."""
def _create_folder_in_google_drive(google_drive, account):
file_metadata = {
"name": account.backup_folder_name,
"mimeType": "application/vnd.google-apps.folder"
}
try:
folder = google_drive.files().create(body=file_metadata, fields="id").execute()
frappe.db.set_value("Google Drive", None, "backup_folder_id", folder.get("id"))
frappe.db.commit()
except HttpError as e:
frappe.throw(_("Google Drive - Could not create folder in Google Drive - Error Code {0}").format(e))
google_drive, account = get_google_drive_object()
if account.backup_folder_id:
return
backup_folder_exists = False
try:
google_drive_folders = google_drive.files().list(q="mimeType='application/vnd.google-apps.folder'").execute()
except HttpError as e:
frappe.throw(_("Google Drive - Could not find folder in Google Drive - Error Code {0}").format(e))
for f in google_drive_folders.get("files"):
if f.get("name") == account.backup_folder_name:
frappe.db.set_value("Google Drive", None, "backup_folder_id", f.get("id"))
frappe.db.commit()
backup_folder_exists = True
break
if not backup_folder_exists:
_create_folder_in_google_drive(google_drive, account)
@frappe.whitelist()
def take_backup():
"""Enqueue longjob for taking backup to Google Drive"""
enqueue("frappe.integrations.doctype.google_drive.google_drive.upload_system_backup_to_google_drive", queue='long', timeout=1500)
frappe.msgprint(_("Queued for backup. It may take a few minutes to an hour."))
def upload_system_backup_to_google_drive():
"""
Upload system backup to Google Drive
"""
# Get Google Drive Object
google_drive, account = get_google_drive_object()
# Check if folder exists in Google Drive
check_for_folder_in_google_drive()
account.load_from_db()
validate_file_size()
if frappe.flags.create_new_backup:
set_progress(1, "Backing up Data.")
backup = new_backup()
file_urls = []
file_urls.append(backup.backup_path_db)
file_urls.append(backup.backup_path_conf)
if account.file_backup:
file_urls.append(backup.backup_path_files)
file_urls.append(backup.backup_path_private_files)
else:
file_urls = get_latest_backup_file(with_files=account.file_backup)
for fileurl in file_urls:
if not fileurl:
continue
file_metadata = {
"name": fileurl,
"parents": [account.backup_folder_id]
}
try:
media = MediaFileUpload(get_absolute_path(filename=fileurl), mimetype="application/gzip", resumable=True)
except IOError as e:
frappe.throw(_("Google Drive - Could not locate - {0}").format(e))
try:
set_progress(2, "Uploading backup to Google Drive.")
google_drive.files().create(body=file_metadata, media_body=media, fields="id").execute()
except HttpError as e:
send_email(False, "Google Drive", "Google Drive", "email", error_status=e)
set_progress(3, "Uploading successful.")
frappe.db.set_value("Google Drive", None, "last_backup_on", frappe.utils.now_datetime())
send_email(True, "Google Drive", "Google Drive", "email")
return _("Google Drive Backup Successful.")
def daily_backup():
drive_settings = frappe.db.get_singles_dict('Google Drive')
if drive_settings.enable and drive_settings.frequency == "Daily":
upload_system_backup_to_google_drive()
def weekly_backup():
drive_settings = frappe.db.get_singles_dict('Google Drive')
if drive_settings.enable and drive_settings.frequency == "Weekly":
upload_system_backup_to_google_drive()
def get_absolute_path(filename):
file_path = os.path.join(get_backups_path()[2:], os.path.basename(filename))
return "{0}/sites/{1}".format(get_bench_path(), file_path)
def set_progress(progress, message):
frappe.publish_realtime("upload_to_google_drive", dict(progress=progress, total=3, message=message), user=frappe.session.user)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from importlib import import_module
import itertools
import traceback
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.state import ProjectState
from django.db.migrations.autodetector import MigrationAutodetector
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument('app_label', nargs='?',
help='App label of an application to synchronize the state.')
parser.add_argument('migration_name', nargs='?',
help=(
'Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.'
),
)
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.')
parser.add_argument('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them')
parser.add_argument('--list', '-l', action='store_true', dest='list', default=False,
help='Show a list of all known migrations and which are applied')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.show_traceback = options.get('traceback')
self.load_initial_data = options.get('load_initial_data')
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit main execution flow and show it
if options.get("list", False):
return self.show_migration_list(connection, [options['app_label']] if options['app_label'] else None)
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = False
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (you cannot selectively "
"sync unmigrated apps)" % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (you cannot selectively "
"sync unmigrated apps)" % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
run_syncdb = True
plan = executor.migration_plan(targets)
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb and executor.loader.unmigrated_apps:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(executor.loader.unmigrated_apps))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(set(a for a, n in targets)) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
# Run the syncdb phase.
# If you ever manage to get rid of this, I owe you many, many drinks.
# Note that pre_migrate is called from inside here, as it needs
# the list of models about to be installed.
if run_syncdb and executor.loader.unmigrated_apps:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
else:
created_models = []
# The test runner requires us to flush after a syncdb but before migrations,
# so do that here.
if options.get("test_flush", False):
call_command(
'flush',
verbosity=max(self.verbosity - 1, 0),
interactive=False,
database=db,
reset_sequences=False,
inhibit_post_migrate=True,
)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
else:
executor.migrate(targets, plan, fake=options.get("fake", False))
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration, fake=False):
if self.verbosity >= 1:
if action == "apply_start":
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
elif action == "unapply_start":
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=True))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set(itertools.chain(*manifest.values()))
emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=connection.features.can_rollback_ddl):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(
connection.creation.sql_for_pending_references(
refto, no_style(), pending_references,
)
)
sql.extend(
connection.creation.sql_for_pending_references(
model, no_style(), pending_references
)
)
if self.verbosity >= 1 and sql:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
finally:
cursor.close()
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
try:
# Install custom SQL for the app (but only if this
# is a model we've just created)
if self.verbosity >= 1:
self.stdout.write(" Installing custom SQL...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, no_style(), connection)
if custom_sql:
if self.verbosity >= 2:
self.stdout.write(
" Installing custom SQL for %s.%s model\n" %
(app_name, model._meta.object_name)
)
try:
with transaction.atomic(using=connection.alias):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(
" Failed to install custom SQL for %s.%s model: %s\n"
% (app_name, model._meta.object_name, e)
)
if self.show_traceback:
traceback.print_exc()
else:
if self.verbosity >= 3:
self.stdout.write(
" No custom SQL for %s.%s model\n" %
(app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Installing indexes...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, no_style())
if index_sql:
if self.verbosity >= 2:
self.stdout.write(
" Installing index for %s.%s model\n" %
(app_name, model._meta.object_name)
)
savepoint = connection.features.can_rollback_ddl
try:
with transaction.atomic(using=connection.alias, savepoint=savepoint):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(
" Failed to install index for %s.%s model: %s\n" %
(app_name, model._meta.object_name, e)
)
finally:
cursor.close()
# Load initial_data fixtures (unless that has been disabled)
if self.load_initial_data:
for app_label in app_labels:
call_command(
'loaddata', 'initial_data', verbosity=self.verbosity,
database=connection.alias, skip_validation=True,
app_label=app_label, hide_empty=True,
)
return created_models
def show_migration_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
|
|
# -*- coding: utf-8 -*-
"""
Package :mod:`~xrt.backends.raycing` provides the internal backend of xrt. It
defines beam sources in the module :mod:`~xrt.backends.raycing.sources`,
rectangular and round apertures in :mod:`~xrt.backends.raycing.apertures`,
optical elements in :mod:`~xrt.backends.raycing.oes`, material properties
(essentially reflectivity, transmittivity and absorption coefficient) for
interfaces and crystals in :mod:`~xrt.backends.raycing.materials` and screens
in :mod:`~xrt.backends.raycing.screens`.
.. _scriptingRaycing:
Coordinate systems
------------------
The following coordinate systems are considered (always right-handed):
1) *The global coordinate system*. It is arbitrary (user-defined) with one
requirement driven by code simplification: Z-axis is vertical. For example,
the system origin of Alba synchrotron is in the center of the ring at the
ground level with Y-axis northward, Z upright and the units in mm.
.. note::
The positions of all optical elements, sources, screens etc. are given
in the global coordinate system. This feature simplifies the beamline
alignment when 3D CAD models are available.
2) *The local systems*.
a) *of the beamline*. The local Y direction (the direction of the source)
is determined by *azimuth* parameter of
:class:`~xrt.backends.raycing.BeamLine` -- the angle measured cw from the
global Y axis. The local beamline Z is also vertical and upward. The
local beamline X is to the right. At *azimuth* = 0 the global system and
the local beamline system are parallel to each other. In most of the
supplied examples the global system and the local beamline system
coincide.
b) *of an optical element*. The origin is on the optical surface. Z is
out-of-surface. At pitch, roll and yaw all zeros the local oe system
and the local beamline system are parallel to each other.
.. note::
Pitch, roll and yaw rotations (correspondingly: Rx, Ry and Rz) are
defined relative to the local axes of the optical element. The local
axes rotate together with the optical element!
.. note::
The rotations are done in the following default sequence: yaw, roll,
pitch. It can be changed by the user for any particular optical
element. Sometimes it is necessary to define misalignment angles in
addition to the positional angles. Because rotations do not commute,
an extra set of angles may become unavoidable, which are applied
after the positional rotations.
See :class:`~xrt.backends.raycing.oes.OE`.
The user-supplied functions for the surface height (z) and the normal as
functions of (x, y) are defined in the local oe system.
c) *of other beamline elements: sources, apertures, screens*. Z is upward
and Y is along the beam line. The origin is given by the user. Usually it
is on the original beam line.
xrt sequentially transforms beams (instances of
:class:`~xrt.backends.raycing.sources.Beam`) -- containers of arrays which hold
beam properties for each ray. Geometrical beam properties such as *x, y, z*
(ray origins) and *a, b, c* (directional cosines) as well as polarization
characteristics depend on the above coordinate systems. Therefore, beams are
usually represented by two different objects: one in the global and one in a
local system.
.. imagezoom:: _images/axes.png
Units
-----
For the internal calculations, lengths are assumed to be in mm, although for
reflection geometries and simple Bragg cases (thick crystals) this convention
is not used. Angles are unitless (radians). Energy is in eV.
For plotting, the user may select units and conversion factors. The latter are
usually automatically deduced from the units.
Beam categories
---------------
xrt discriminates rays by several categories:
a) ``good``: reflected within the working optical surface;
b) ``out``: reflected outside of the working optical surface, i.e. outside of
a metal stripe on a mirror;
c) ``over``: propagated over the surface without intersection;
d) ``dead``: arrived below the optical surface and thus absorbed by the OE.
This distinction simplifies the adjustment of entrance and exit slits. The
user supplies `physical` and `optical` limits, where the latter is used to
define the ``out`` category (for rays between `physical` and `optical` limits).
An alarm is triggered if the fraction of dead rays exceeds a specified level.
Scripting in python
-------------------
The user of :mod:`~xrt.backends.raycing` must do the following:
1) Instantiate class :class:`~xrt.backends.raycing.BeamLine` and fill it with
sources, optical elements, screens etc.
2) Create a module-level function that returns a dictionary of beams -- the
instances of :class:`~xrt.backends.raycing.sources.Beam`. Assign this
function to the module variable `xrt.backends.raycing.run.run_process`.
The beams should be obtained by the methods shine() of a source, expose() of
a screen, reflect() or multiple_reflect() of an optical element, propagate()
of an aperture.
3) Use the keys in this dictionary for creating the plots (instances of
:class:`~xrt.plotter.XYCPlot`). Note that at the time of instantiation the
plots are just empty placeholders for the future 2D and 1D histograms.
4) Run :func:`~xrt.runner.run_ray_tracing()` function for the created plots.
Additionally, the user may define a generator that will run a loop of ray
tracing for changing geometry (mimics a real scan) or for different material
properties etc. The generator should modify the beamline elements and output
file names of the plots before *yield*. After the *yield* the plots are ready
and the generator may use their fields, e.g. *intensity* or *dE* or *dy* or
others to prepare a scan plot. Typically, this sequence is within a loop; after
the loop the user may prepare the final scan plot using matplotlib
functionality. The generator is given to :func:`~xrt.runner.run_ray_tracing()`
as a parameter.
See the supplied examples."""
from __future__ import print_function
import types
import sys
import numpy as np
from itertools import compress
from collections import OrderedDict
import re
import copy
import inspect
__module__ = "raycing"
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "26 Mar 2016"
_DEBUG_ = True # If False, exceptions inside the module are ignored
_VERBOSITY_ = 10 # [0-100] Regulates the level of diagnostics printout
try: # for Python 3 compatibility:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
unicode = unicode
basestring = basestring
from .physconsts import SIE0 # analysis:ignore
stateGood, stateOut, stateOver = 1, 2, 3
zEps = 1e-12 # mm: target accuracy in z while searching for intersection
misalignmentTolerated = 0.1 # for automatic checking of oe center position
accuracyInPosition = 0.1 # accuracy for positioning of oe
dt = 1e-5 # mm: margin around OE within which the intersection is searched
ds = 0. # mm: margin used in multiple reflections
nrays = 100000
maxIteration = 100 # max number of iterations while searching for intersection
maxHalfSizeOfOE = 1000.
maxDepthOfOE = 100.
# maxZDeviationAtOE = 100.
# colors of the rays in a 0-10 range (red-violet)
hueGood = 3.
hueOut = 8.
hueOver = 1.6
hueDead = 0.2
hueMin = 0.
hueMax = 10.
targetOpenCL = 'auto'
precisionOpenCL = 'auto'
#targetOpenCL = (0, 0)
#precisionOpenCL = 'float32'
allBeamFields = ('energy', 'x', 'xprime', 'y', 'z', 'zprime', 'xzprime',
'a', 'b', 'path', 'phase_shift', 'reflection_number', 'order',
'circular_polarization_rate', 'polarization_degree',
'polarization_psi', 'ratio_ellipse_axes', 's', 'r',
'theta', 'phi', 'incidence_angle',
'elevation_d', 'elevation_x', 'elevation_y', 'elevation_z',
'Ep_amp', 'Ep_phase', 'Es_amp', 'Es_phase')
def is_sequence(arg):
"""Checks whether *arg* is a sequence."""
result = (not hasattr(arg, "strip") and hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
if result:
try:
arg[0]
except IndexError:
result = False
if result:
result = not isinstance(arg, (basestring, unicode))
return result
def distance_xy(p1, p2):
"""Calculates 2D distance between p1 and p2. p1 and p2 are vectors of
length >= 2."""
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5
def distance_xyz(p1, p2):
"""Calculates 2D distance between p1 and p2. p1 and p2 are vectors of
length >= 3."""
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)**0.5
def rotate_x(y, z, cosangle, sinangle):
"""3D rotaion around *x* (pitch). *y* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *yNew, zNew*."""
return cosangle*y - sinangle*z, sinangle*y + cosangle*z
def rotate_y(x, z, cosangle, sinangle):
"""3D rotaion around *y* (roll). *x* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, zNew*."""
return cosangle*x + sinangle*z, -sinangle*x + cosangle*z
def rotate_z(x, y, cosangle, sinangle):
"""3D rotaion around *z*. *x* and *y* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, yNew*."""
return cosangle*x - sinangle*y, sinangle*x + cosangle*y
def rotate_beam(beam, indarr=None, rotationSequence='RzRyRx',
pitch=0, roll=0, yaw=0, skip_xyz=False, skip_abc=False,
is2ndXtal=False):
"""Rotates the *beam* indexed by *indarr* by the angles *yaw, roll, pitch*
in the sequence given by *rotationSequence*. A leading '-' symbol of
*rotationSequence* reverses the sequences.
"""
angles = {'z': yaw, 'y': roll, 'x': pitch}
rotates = {'z': rotate_z, 'y': rotate_y, 'x': rotate_x}
if not skip_xyz:
coords1 = {'z': beam.x, 'y': beam.x, 'x': beam.y}
coords2 = {'z': beam.y, 'y': beam.z, 'x': beam.z}
if not skip_abc:
vcomps1 = {'z': beam.a, 'y': beam.a, 'x': beam.b}
vcomps2 = {'z': beam.b, 'y': beam.c, 'x': beam.c}
if rotationSequence[0] == '-':
seq = rotationSequence[6] + rotationSequence[4] + rotationSequence[2]
else:
seq = rotationSequence[1] + rotationSequence[3] + rotationSequence[5]
for s in seq:
angle, rotate = angles[s], rotates[s]
if not skip_xyz:
c1, c2 = coords1[s], coords2[s]
if not skip_abc:
v1, v2 = vcomps1[s], vcomps2[s]
if angle != 0:
cA = np.cos(angle)
sA = np.sin(angle)
if indarr is None:
indarr = slice(None)
if not skip_xyz:
c1[indarr], c2[indarr] = rotate(c1[indarr], c2[indarr], cA, sA)
if not skip_abc:
v1[indarr], v2[indarr] = rotate(v1[indarr], v2[indarr], cA, sA)
def rotate_xyz(x, y, z, indarr=None, rotationSequence='RzRyRx',
pitch=0, roll=0, yaw=0):
"""Rotates the arrays *x*, *y* and *z* indexed by *indarr* by the angles
*yaw, roll, pitch* in the sequence given by *rotationSequence*. A leading
'-' symbol of *rotationSequence* reverses the sequences.
"""
angles = {'z': yaw, 'y': roll, 'x': pitch}
rotates = {'z': rotate_z, 'y': rotate_y, 'x': rotate_x}
coords1 = {'z': x, 'y': x, 'x': y}
coords2 = {'z': y, 'y': z, 'x': z}
if rotationSequence[0] == '-':
seq = rotationSequence[6] + rotationSequence[4] + rotationSequence[2]
else:
seq = rotationSequence[1] + rotationSequence[3] + rotationSequence[5]
for s in seq:
angle, rotate = angles[s], rotates[s]
c1, c2 = coords1[s], coords2[s]
if angle != 0:
cA = np.cos(angle)
sA = np.sin(angle)
if indarr is None:
indarr = slice(None)
c1[indarr], c2[indarr] = rotate(c1[indarr], c2[indarr], cA, sA)
return x, y, z
def rotate_point(point, rotationSequence='RzRyRx', pitch=0, roll=0, yaw=0):
"""Rotates the *point* (3-sequence) by the angles *yaw, roll, pitch*
in the sequence given by *rotationSequence*. A leading '-' symbol of
*rotationSequence* reverses the sequences.
"""
angles = {'z': yaw, 'y': roll, 'x': pitch}
rotates = {'z': rotate_z, 'y': rotate_y, 'x': rotate_x}
ind1 = {'z': 0, 'y': 0, 'x': 1}
ind2 = {'z': 1, 'y': 2, 'x': 2}
newp = [coord for coord in point]
if rotationSequence[0] == '-':
seq = rotationSequence[6] + rotationSequence[4] + rotationSequence[2]
else:
seq = rotationSequence[1] + rotationSequence[3] + rotationSequence[5]
for s in seq:
angle, rotate = angles[s], rotates[s]
if angle != 0:
cA = np.cos(angle)
sA = np.sin(angle)
newp[ind1[s]], newp[ind2[s]] = rotate(
newp[ind1[s]], newp[ind2[s]], cA, sA)
return newp
def global_to_virgin_local(bl, beam, lo, center=None, part=None):
"""Transforms *beam* from the global to the virgin (i.e. with pitch, roll
and yaw all zeros) local system. The resulting local beam is *lo*. If
*center* is provided, the rotation Rz is about it, otherwise is about the
origin of *beam*. The beam arrays can be sliced by *part* indexing array.
*bl* is an instance of :class:`BeamLine`"""
if part is None:
part = np.ones(beam.x.shape, dtype=np.bool)
if center is None:
center = [0, 0, 0]
lo.x[part] = beam.x[part] - center[0]
lo.y[part] = beam.y[part] - center[1]
lo.z[part] = beam.z[part] - center[2]
if isinstance(bl, BeamLine):
a0, b0 = bl.sinAzimuth, bl.cosAzimuth
if a0 == 0:
lo.a[part] = beam.a[part]
lo.b[part] = beam.b[part]
else:
lo.x[part], lo.y[part] = rotate_z(lo.x[part], lo.y[part], b0, a0)
lo.a[part], lo.b[part] = \
rotate_z(beam.a[part], beam.b[part], b0, a0)
lo.c[part] = beam.c[part] # unchanged
elif isinstance(bl, (list, tuple)):
lx, ly, lz = bl
xyz = lo.x[part], lo.y[part], lo.z[part]
lo.x[part], lo.y[part], lo.z[part] = (
sum(c*b for c, b in zip(lx, xyz)),
sum(c*b for c, b in zip(ly, xyz)),
sum(c*b for c, b in zip(lz, xyz)))
abc = beam.a[part], beam.b[part], beam.c[part]
lo.a[part], lo.b[part], lo.c[part] = (
sum(c*b for c, b in zip(lx, abc)),
sum(c*b for c, b in zip(ly, abc)),
sum(c*b for c, b in zip(lz, abc)))
def virgin_local_to_global(bl, vlb, center=None, part=None,
skip_xyz=False, skip_abc=False, is2ndXtal=False):
"""Transforms *vlb* from the virgin (i.e. with pitch, roll and yaw all
zeros) local to the global system and overwrites the result to *vlb*. If
*center* is provided, the rotation Rz is about it, otherwise is about the
origin of *beam*. The beam arrays can be sliced by *part* indexing array.
*bl* is an instance of :class:`BeamLine`"""
if part is None:
part = np.ones(vlb.x.shape, dtype=np.bool)
a0, b0 = bl.sinAzimuth, bl.cosAzimuth
if a0 != 0:
if not skip_abc:
vlb.a[part], vlb.b[part] = rotate_z(
vlb.a[part], vlb.b[part], b0, -a0)
if not skip_xyz:
vlb.x[part], vlb.y[part] = rotate_z(
vlb.x[part], vlb.y[part], b0, -a0)
if (center is not None) and (not skip_xyz):
vlb.x[part] += center[0]
vlb.y[part] += center[1]
vlb.z[part] += center[2]
def xyz_from_xz(bl, x=None, z=None):
if isinstance(x, basestring) and isinstance(z, basestring):
return 'auto'
if isinstance(x, (list, tuple, np.ndarray)):
norm = sum([xc**2 for xc in x])**0.5
retx = [xc/norm for xc in x]
else:
retx = bl.cosAzimuth, -bl.sinAzimuth, 0.
if isinstance(z, (list, tuple, np.ndarray)):
norm = sum([zc**2 for zc in z])**0.5
retz = [zc/norm for zc in z]
else:
retz = 0., 0., 1.
xdotz = np.dot(retx, retz)
if abs(xdotz) > 1e-8:
print('x and z must be orthogonal, got xz={0:.4e}'.format(xdotz))
rety = np.cross(retz, retx)
return [retx, rety, retz]
def check_alarm(self, incoming, beam):
"""Appends an alarm string to the list of beamline alarms if the alarm
condition is fulfilled."""
incomingSum = incoming.sum()
if incomingSum > 0:
badSum = (beam.state == self.lostNum).sum()
ratio = float(badSum)/incomingSum
if ratio > self.alarmLevel:
alarmStr = ('{0}{1} absorbes {2:.2%} of rays ' +
'at {3:.0%} alarm level!').format(
'Alarm! ', self.name, ratio, self.alarmLevel)
self.bl.alarms.append(alarmStr)
else:
self.bl.alarms.append('no incident rays to {0}!'.format(self.name))
def get_energy(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.E
def get_x(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.x
def get_y(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.y
def get_z(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.z
def get_s(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.s
def get_phi(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.phi
def get_r(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.r
def get_a(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.a
def get_b(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.b
def get_xprime(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.a / beam.b
def get_zprime(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.c / beam.b
def get_xzprime(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return (beam.a**2 + beam.c**2)**0.5 / beam.b
def get_path(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.path
def get_order(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.order if hasattr(beam, 'order') else beam.state
def get_reflection_number(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.nRefl if hasattr(beam, 'nRefl') else beam.state
def get_elevation_d(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationD
# if hasattr(beam, 'elevationD') else np.zeros_like(beam.x)
def get_elevation_x(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationX # if hasattr(beam, 'elevationX') else beam.x
def get_elevation_y(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationY # if hasattr(beam, 'elevationY') else beam.y
def get_elevation_z(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationZ # if hasattr(beam, 'elevationZ') else beam.z
def get_Es_amp(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.abs(beam.Es)
def get_Ep_amp(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.abs(beam.Ep)
def get_Es_phase(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Es)
# return np.arctan2(beam.Es.imag, beam.Es.real)
def get_Ep_phase(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Ep)
# return np.arctan2(beam.Ep.imag, beam.Ep.real)
def get_polarization_degree(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
II = (beam.Jss + beam.Jpp)
II[II <= 0] = 1.
pd = np.sqrt((beam.Jss-beam.Jpp)**2 + 4.*abs(beam.Jsp)**2) / II
pd[II <= 0] = 0.
return pd
def get_ratio_ellipse_axes(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
dI2 = (beam.Jss - beam.Jpp)**2
return 2. * beam.Jsp.imag /\
(np.sqrt(dI2 + 4*abs(beam.Jsp)**2) + np.sqrt(dI2 + 4*beam.Jsp.real**2))
def get_circular_polarization_rate(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
II = (beam.Jss + beam.Jpp)
II[II <= 0] = 1.
cpr = 2. * beam.Jsp.imag / II
cpr[II <= 0] = 0.
return cpr
def get_polarization_psi(beam):
"""Angle between the semimajor axis of the polarization ellipse relative to
the s polarization. Used for retrieving data for x-, y- or c-axis of a
plot."""
# return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp) * 180 / np.pi
return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp)
def get_phase_shift(beam): # in units of pi!
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Jsp) / np.pi
def get_incidence_angle(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.theta if hasattr(beam, 'theta') else np.zeros_like(beam.x)
get_theta = get_incidence_angle
def get_output(plot, beamsReturnedBy_run_process):
"""Used by :mod:`multipro` for creating images of *plot* - instance of
:class:`XYCPlot`. *beamsReturnedBy_run_process* is a dictionary of
:class:`Beam` instances returned by user-defined :func:`run_process`.
:func:`get_output` creates an indexing array corresponding to the requested
properties of rays in *plot*. It also calculates the number of rays with
various properties defined in `raycing` backend.
"""
beam = beamsReturnedBy_run_process[plot.beam]
if plot.beamState is None:
beamState = beam.state
else:
beamState = beamsReturnedBy_run_process[plot.beamState].state
nrays = len(beam.x)
locAlive = (beamState > 0).sum()
part = np.zeros(nrays, dtype=np.bool)
locGood = 0
locOut = 0
locOver = 0
locDead = 0
for rayFlag in plot.rayFlag:
locPart = beamState == rayFlag
if rayFlag == 1:
locGood = locPart.sum()
if rayFlag == 2:
locOut = locPart.sum()
if rayFlag == 3:
locOver = locPart.sum()
if rayFlag < 0:
locDead += locPart.sum()
part = part | locPart
if hasattr(beam, 'accepted'):
locAccepted = beam.accepted
locAcceptedE = beam.acceptedE
locSeeded = beam.seeded
locSeededI = beam.seededI
else:
locAccepted = 0
locAcceptedE = 0
locSeeded = 0
locSeededI = 0
if hasattr(beam, 'displayAsAbsorbedPower'):
plot.displayAsAbsorbedPower = True
if isinstance(plot.xaxis.data, types.FunctionType):
x = plot.xaxis.data(beam) * plot.xaxis.factor
elif isinstance(plot.xaxis.data, np.ndarray):
x = plot.xaxis.data * plot.xaxis.factor
else:
raise ValueError('cannot find data for x!')
if isinstance(plot.yaxis.data, types.FunctionType):
y = plot.yaxis.data(beam) * plot.yaxis.factor
elif isinstance(plot.yaxis.data, np.ndarray):
y = plot.yaxis.data * plot.yaxis.factor
else:
raise ValueError('cannot find data for y!')
if plot.caxis.useCategory:
cData = np.zeros_like(beamState)
cData[beamState == stateGood] = hueGood
cData[beamState == stateOut] = hueOut
cData[beamState == stateOver] = hueOver
cData[beamState < 0] = hueDead
intensity = np.ones_like(x)
flux = intensity
else:
if plot.beamC is None:
beamC = beam
else:
beamC = beamsReturnedBy_run_process[plot.beamC]
if isinstance(plot.caxis.data, types.FunctionType):
cData = plot.caxis.data(beamC) * plot.caxis.factor
elif isinstance(plot.caxis.data, np.ndarray):
cData = plot.caxis.data * plot.caxis.factor
else:
raise ValueError('cannot find data for cData!')
if plot.fluxKind.startswith('power'):
intensity = ((beam.Jss + beam.Jpp) *
beam.E * beam.accepted / beam.seeded * SIE0)
elif plot.fluxKind.startswith('s'):
intensity = beam.Jss
elif plot.fluxKind.startswith('p'):
intensity = beam.Jpp
elif plot.fluxKind.startswith('+-45'):
intensity = 2*beam.Jsp.real
elif plot.fluxKind.startswith('left-right'):
intensity = 2*beam.Jsp.imag
elif plot.fluxKind.startswith('E'):
if plot.fluxKind.startswith('Es'):
intensity = beam.Es
flux = beam.Jss
elif plot.fluxKind.startswith('Ep'):
intensity = beam.Ep
flux = beam.Jpp
else:
intensity = beam.Es + beam.Ep
flux = beam.Jss + beam.Jpp
else:
intensity = beam.Jss + beam.Jpp
if not plot.fluxKind.startswith('E'):
flux = intensity
return x[part], y[part], intensity[part], flux[part], cData[part], nrays,\
locAlive, locGood, locOut, locOver, locDead,\
locAccepted, locAcceptedE, locSeeded, locSeededI
def auto_units_angle(angle, defaultFactor=1.):
if isinstance(angle, basestring):
if len(re.findall("auto", angle)) > 0:
return angle
elif len(re.findall("mrad", angle)) > 0:
return float(angle.split("m")[0].strip())*1e-3
elif len(re.findall("urad", angle)) > 0:
return float(angle.split("u")[0].strip())*1e-6
elif len(re.findall("nrad", angle)) > 0:
return float(angle.split("n")[0].strip())*1e-9
elif len(re.findall("rad", angle)) > 0:
return float(angle.split("r")[0].strip())
elif len(re.findall("deg", angle)) > 0:
return np.radians(float(angle.split("d")[0].strip()))
else:
print("Could not identify the units")
return angle
elif angle is None or isinstance(angle, list):
return angle
else:
return angle * defaultFactor
def append_to_flow(meth, bOut, frame):
oe = meth.__self__
if oe.bl is None:
return
if oe.bl.flowSource != 'legacy':
return
argValues = inspect.getargvalues(frame)
fdoc = re.findall(r"Returned values:.*", meth.__doc__)
if fdoc:
fdoc = fdoc[0].replace("Returned values: ", '').split(',')
if 'needNewGlobal' in argValues.args[1:]:
if argValues.locals['needNewGlobal']:
fdoc.insert(0, 'beamGlobal')
kwArgsIn = OrderedDict()
kwArgsOut = OrderedDict()
for arg in argValues.args[1:]:
if str(arg) == 'beam':
kwArgsIn[arg] = id(argValues.locals[arg])
else:
kwArgsIn[arg] = argValues.locals[arg]
for outstr, outbm in zip(list(fdoc), bOut):
kwArgsOut[outstr.strip()] = id(outbm)
oe.bl.flow.append([oe.name, meth.__func__, kwArgsIn, kwArgsOut])
def is_auto_align_required(oe):
needAutoAlign = False
for autoParam in ["_center", "_pitch", "_bragg"]:
naParam = autoParam.strip("_")
if hasattr(oe, autoParam) and hasattr(oe, naParam):
if str(getattr(oe, autoParam)) == str(getattr(oe, naParam)):
if _VERBOSITY_ > 20:
print(autoParam, str(getattr(oe, autoParam)),
naParam, str(getattr(oe, naParam)))
needAutoAlign = True
if _VERBOSITY_ > 10:
print("{0}.{1} requires auto-calculation".format(
oe.name, naParam))
return needAutoAlign
def set_name(elementClass, name):
if name not in [None, 'None', '']:
elementClass.name = name
elif not hasattr(elementClass, 'name'):
elementClass.name = '{0}{1}'.format(elementClass.__class__.__name__,
elementClass.ordinalNum)
class BeamLine(object):
u"""
Container class for beamline components. It also defines the beam line
direction and height."""
class aBeam(object):
def __init__(self):
for prop in ['a', 'b', 'c', 'x', 'y', 'z', 'E']:
setattr(self, prop, np.zeros(2))
def __init__(self, azimuth=0., height=0., alignE='auto'):
u"""
*azimuth*: float
Is counted in cw direction from the global Y axis. At
*azimuth* = 0 the local Y coincides with the global Y.
*height*: float
Beamline height in the global system.
*alignE*: float or 'auto'
Energy for automatic alignment in [eV]. If 'auto', alignment energy
is defined as the middle of the Source energy range.
Plays a role if the *pitch* or *bragg* parameters of the energy
dispersive optical elements were set to 'auto'.
"""
self.azimuth = azimuth
# self.sinAzimuth = np.sin(azimuth) # a0
# self.cosAzimuth = np.cos(azimuth) # b0
self.height = height
self.alignE = alignE
self.sources = []
self.oes = []
self.slits = []
self.screens = []
self.alarms = []
self.name = ''
self.oesDict = OrderedDict()
self.flow = []
self.materialsDict = OrderedDict()
self.beamsDict = OrderedDict()
self.flowSource = 'legacy'
self.forceAlign = False
self.beamsRevDict = OrderedDict()
self.beamsRevDictUsed = {}
self.blViewer = None
self.statusSignal = None
@property
def azimuth(self):
return self._azimuth
@azimuth.setter
def azimuth(self, value):
self._azimuth = value
self.sinAzimuth = np.sin(value)
self.cosAzimuth = np.cos(value)
def orient_along_global_Y(self, center='auto'):
if center == 'auto':
center0 = self.sources[0].center
a0, b0 = self.sinAzimuth, self.cosAzimuth
for oe in self.sources + self.oes + self.slits + self.screens:
newC = [c-c0 for c, c0 in zip(oe.center, center0)]
newC[0], newC[1] = rotate_z(newC[0], newC[1], b0, a0)
oe.center = newC
if hasattr(oe, 'jack1'):
oe.jack1 = [c-c0 for c, c0 in zip(oe.jack1, center0)]
oe.jack1[0], oe.jack1[1] = \
rotate_z(oe.jack1[0], oe.jack1[1], b0, a0)
if hasattr(oe, 'jack2'):
oe.jack2 = [c-c0 for c, c0 in zip(oe.jack2, center0)]
oe.jack2[0], oe.jack2[1] = \
rotate_z(oe.jack2[0], oe.jack2[1], b0, a0)
if hasattr(oe, 'jack3'):
oe.jack3 = [c-c0 for c, c0 in zip(oe.jack3, center0)]
oe.jack3[0], oe.jack3[1] = \
rotate_z(oe.jack3[0], oe.jack3[1], b0, a0)
self.azimuth = 0
def prepare_flow(self):
def _warning(v1=None, v2=None):
if v1 is None or v2 is None:
addw = ""
else:
addw = "\nThis beam has been used for {0} and is attempted"\
" for {1}.".format(v1, v2)
print("Warning: the flow seems corrupt. Make sure each propagation"
" method assigns returned beams to local variables." + addw)
if self.flowSource != 'legacy':
return
frame = inspect.currentframe()
localsDict = frame.f_back.f_locals
globalsDict = frame.f_back.f_globals
for objectName, memObject in globalsDict.items():
if len(re.findall('raycing.materials', str(type(memObject)))) > 0:
self.materialsDict[objectName] = memObject
for objectName, memObject in localsDict.items():
if len(re.findall('sources_beams.Beam', str(type(memObject)))) > 0:
self.beamsDict[objectName] = memObject
self.beamsRevDict[id(memObject)] = objectName
if objectName == 'outDict':
for odObjectName, odMemObject in memObject.items():
self.beamsDict[odObjectName] = odMemObject
self.beamsRevDict[id(odMemObject)] = odObjectName
if self.flow is not None and len(self.beamsRevDict) > 0:
for segment in self.flow:
for iseg in [2, 3]:
for argName, argVal in segment[iseg].items():
if len(re.findall('beam', str(argName))) > 0:
if iseg == 3:
if argVal in self.beamsRevDictUsed:
_warning(self.beamsRevDictUsed[argVal],
segment[0])
self.beamsRevDictUsed[argVal] = segment[0]
try:
segment[iseg][argName] =\
self.beamsRevDict[argVal]
except KeyError:
segment[iseg][argName] = 'beamTmp'
_warning()
self.flowSource = 'prepared_to_run'
def auto_align(self, oe, beam):
if self.flowSource == 'Qook':
self.forceAlign = True
if not (self.forceAlign or is_auto_align_required(oe)):
return
autoCenter = [False] * 3
autoPitch = autoBragg = False
alignE = self._alignE if hasattr(self, '_alignE') else self.alignE
if hasattr(oe, '_center'):
autoCenter = [x == 'auto' for x in oe._center]
if hasattr(oe, '_pitch'):
try:
if isinstance(oe._pitch, (list, tuple)):
alignE = float(oe._pitch[-1])
autoPitch = True
except:
print("Automatic Bragg angle calculation failed.")
raise
if hasattr(oe, '_bragg'):
try:
if isinstance(oe._bragg, (list, tuple)):
alignE = float(oe._bragg[-1])
autoBragg = True
except:
print("Automatic Bragg angle calculation failed.")
raise
if any(autoCenter) or autoPitch or autoBragg:
good = (beam.state == 1) | (beam.state == 2)
if self.flowSource == 'Qook':
beam.state[0] = 1
# beam.E[0] = alignE
intensity = beam.Jss[good] + beam.Jpp[good]
totalI = np.sum(intensity)
inBeam = self.aBeam()
for fieldName in ['x', 'y', 'z', 'a', 'b', 'c']:
field = getattr(beam, fieldName)
if totalI == 0:
fNorm = 1.
else:
fNorm = np.sum(field[good] * intensity) / totalI
try:
setattr(inBeam, fieldName,
np.ones(2) * fNorm)
if self.flowSource == 'Qook':
field[0] = fNorm
setattr(inBeam, fieldName, field)
except:
print("Cannot find direction for automatic alignment.")
raise
dirNorm = np.sqrt(inBeam.a[0]**2 + inBeam.b[0]**2 + inBeam.c[0]**2)
inBeam.a[0] /= dirNorm
inBeam.b[0] /= dirNorm
inBeam.c[0] /= dirNorm
if self.flowSource == 'Qook':
beam.a[0] /= dirNorm
beam.b[0] /= dirNorm
beam.c[0] /= dirNorm
if any(autoCenter):
bStartC = np.array([inBeam.x[0], inBeam.y[0], inBeam.z[0]])
bStartDir = np.array([inBeam.a[0], inBeam.b[0], inBeam.c[0]])
fixedCoord = np.where(np.invert(np.array(autoCenter)))[0]
autoCoord = np.where(autoCenter)[0]
for dim in fixedCoord:
if np.abs(bStartDir[dim]) > 1e-3:
plNorm = np.squeeze(np.identity(3)[dim, :])
newCenter = bStartC - (np.dot(
bStartC, plNorm) - oe.center[dim]) /\
np.dot(bStartDir, plNorm) * bStartDir
if np.linalg.norm(newCenter - bStartC) > 0:
break
for dim in autoCoord:
oe.center[dim] = newCenter[dim]
if _VERBOSITY_ > 0:
print(oe.name, "center:", oe.center)
if autoBragg or autoPitch:
if self.flowSource == 'Qook':
inBeam.E[0] = alignE
try:
if is_sequence(oe.material):
mat = oe.material[oe.curSurface]
else:
mat = oe.material
braggT = mat.get_Bragg_angle(alignE)
alphaT = 0.
lauePitch = 0.
if mat.kind == 'multilayer':
braggT += -mat.get_dtheta(alignE)
else:
alphaT = 0 if oe.alpha is None else oe.alpha
braggT += -mat.get_dtheta(alignE, alphaT)
if mat.geom.startswith('Laue'):
lauePitch = 0.5 * np.pi
loBeam = copy.deepcopy(inBeam) # Beam(copyFrom=inBeam)
global_to_virgin_local(self, inBeam, loBeam, center=oe.center)
rotate_beam(loBeam, roll=-(oe.positionRoll + oe.roll),
yaw=-oe.yaw, pitch=0)
theta0 = np.arctan2(-loBeam.c[0], loBeam.b[0])
th2pitch = np.sqrt(1. - loBeam.a[0]**2)
targetPitch = np.arcsin(np.sin(braggT) / th2pitch) - theta0
targetPitch += alphaT + lauePitch
if autoBragg:
if autoPitch:
oe.pitch = 0
oe.bragg = targetPitch - oe.pitch
if _VERBOSITY_ > 0:
print("{0}: Bragg={1} at E={2}".format(
oe.name, oe.bragg, alignE))
else: # autoPitch
oe.pitch = targetPitch
if _VERBOSITY_ > 0:
print(oe.name, "pitch:", oe.pitch)
except Exception as e:
if _DEBUG_:
raise e
else:
pass
def propagate_flow(self, startFrom=0, signal=None):
if self.oesDict is None or self.flow is None:
return
totalStages = len(self.flow[startFrom:])
for iseg, segment in enumerate(self.flow[startFrom:]):
segOE = self.oesDict[segment[0]][0]
fArgs = OrderedDict()
for inArg in segment[2].items():
if inArg[0].startswith('beam'):
if inArg[1] is None:
inBeam = None
break
fArgs[inArg[0]] = self.beamsDict[inArg[1]]
inBeam = fArgs['beam']
else:
fArgs[inArg[0]] = inArg[1]
try:
if inBeam is None:
continue
except NameError:
pass
try: # protection againt incorrect propagation parameters
if signal is not None:
signalStr = "Propagation: {0} {1}(), %p% done.".format(
str(segment[0]),
str(segment[1]).split(".")[-1].strip(">").split(
" ")[0])
signal.emit((float(iseg+1)/float(totalStages), signalStr))
self.statusSignal =\
[signal, iseg+1, totalStages, signalStr]
except:
pass
try:
outBeams = segment[1](segOE, **fArgs)
except:
if _DEBUG_:
raise
else:
continue
if isinstance(outBeams, tuple):
for outBeam, beamName in zip(list(outBeams),
list(segment[3].values())):
self.beamsDict[beamName] = outBeam
else:
self.beamsDict[str(list(segment[3].values())[0])] = outBeams
def glow(self, scale=[], centerAt='', startFrom=0, colorAxis=None,
colorAxisLimits=None, generator=None, generatorArgs=[]):
if generator is not None:
gen = generator(*generatorArgs)
try:
if sys.version_info < (3, 1):
gen.next()
else:
next(gen)
except StopIteration:
return
try:
from ...gui import xrtGlow as xrtglow
except ImportError:
print("cannot import xrtGlow")
return
from .run import run_process
run_process(self)
if self.blViewer is None:
app = xrtglow.qt.QApplication(sys.argv)
rayPath = self.export_to_glow()
self.blViewer = xrtglow.xrtGlow(rayPath)
self.blViewer.generator = generator
self.blViewer.generatorArgs = generatorArgs
self.blViewer.customGlWidget.generator = generator
self.blViewer.setWindowTitle("xrtGlow")
self.blViewer.startFrom = startFrom
self.blViewer.bl = self
if scale:
try:
self.blViewer.updateScaleFromGL(scale)
except:
pass
if centerAt:
try:
self.blViewer.centerEl(centerAt)
except:
pass
if colorAxis:
try:
colorCB = self.blViewer.colorControls[0]
colorCB.setCurrentIndex(colorCB.findText(colorAxis))
except:
pass
if colorAxisLimits:
try:
self.blViewer.customGlWidget.colorMin,\
self.blViewer.customGlWidget.colorMax = colorAxisLimits
self.blViewer.changeColorAxis(None, newLimits=True)
except:
pass
self.blViewer.show()
sys.exit(app.exec_())
else:
self.blViewer.show()
def export_to_glow(self, signal=None):
def calc_weighted_center(beam):
good = (beam.state == 1) | (beam.state == 2)
intensity = beam.Jss[good] + beam.Jpp[good]
totalI = np.sum(intensity)
if totalI == 0:
beam.wCenter = np.array([0., 0., 0.])
else:
beam.wCenter = np.array(
[np.sum(beam.x[good] * intensity),
np.sum(beam.y[good] * intensity),
np.sum(beam.z[good] * intensity)]) /\
totalI
if self.flow is not None:
beamDict = OrderedDict()
rayPath = []
outputBeamMatch = OrderedDict()
oesDict = OrderedDict()
totalStages = len(self.flow)
for iseg, segment in enumerate(self.flow):
try:
if signal is not None:
signalStr = "Processing {0} beams, %p% done.".format(
str(segment[0]))
signal.emit((float(iseg+1) / float(totalStages),
signalStr))
except:
if _DEBUG_:
raise
else:
pass
try:
methStr = str(segment[1])
oeStr = segment[0]
segOE = self.oesDict[oeStr][0]
if segOE is None: # Protection from non-initialized OEs
continue
oesDict[oeStr] = self.oesDict[oeStr]
if 'beam' in segment[2].keys():
if str(segment[2]['beam']) == 'None':
continue
tmpBeamName = segment[2]['beam']
beamDict[tmpBeamName] = copy.deepcopy(
self.beamsDict[tmpBeamName])
if 'beamGlobal' in segment[3].keys():
outputBeamMatch[segment[3]['beamGlobal']] = oeStr
if len(re.findall('raycing.sou',
str(type(segOE)).lower())):
gBeamName = segment[3]['beamGlobal']
beamDict[gBeamName] = self.beamsDict[gBeamName]
rayPath.append([oeStr, gBeamName, None, None])
elif len(re.findall(('expose'), methStr)) > 0 and\
len(re.findall(('expose_global'), methStr)) == 0:
gBeam = self.oesDict[oeStr][0].expose_global(
self.beamsDict[tmpBeamName])
gBeamName = '{}toGlobal'.format(
segment[3]['beamLocal'])
beamDict[gBeamName] = gBeam
rayPath.append([outputBeamMatch[tmpBeamName],
tmpBeamName, oeStr, gBeamName])
elif len(re.findall(('double'), methStr)) +\
len(re.findall(('multiple'), methStr)) > 0:
lBeam1Name = segment[3]['beamLocal1']
gBeam = copy.deepcopy(self.beamsDict[lBeam1Name])
segOE.local_to_global(gBeam)
g1BeamName = '{}toGlobal'.format(lBeam1Name)
beamDict[g1BeamName] = gBeam
rayPath.append([outputBeamMatch[tmpBeamName],
tmpBeamName, oeStr, g1BeamName])
gBeamName = segment[3]['beamGlobal']
beamDict[gBeamName] = self.beamsDict[gBeamName]
rayPath.append([oeStr, g1BeamName,
oeStr, gBeamName])
elif len(re.findall(('propagate'), methStr)) > 0:
if 'beamGlobal' in segment[3].keys():
lBeam1Name = segment[3]['beamGlobal']
gBeamName = lBeam1Name
else:
lBeam1Name = segment[3]['beamLocal']
gBeamName = '{}toGlobal'.format(lBeam1Name)
gBeam = copy.deepcopy(self.beamsDict[lBeam1Name])
segOE.local_to_global(gBeam)
beamDict[gBeamName] = gBeam
rayPath.append([outputBeamMatch[tmpBeamName],
tmpBeamName, oeStr, gBeamName])
else:
gBeamName = segment[3]['beamGlobal']
beamDict[gBeamName] = self.beamsDict[gBeamName]
rayPath.append([outputBeamMatch[tmpBeamName],
tmpBeamName, oeStr, gBeamName])
except:
if _DEBUG_:
raise
else:
continue
totalBeams = len(beamDict)
for itBeam, tBeam in enumerate(beamDict.values()):
if signal is not None:
try:
signalStr = "Calculating trajectory, %p% done."
signal.emit((float(itBeam+1)/float(totalBeams), signalStr))
except:
if _DEBUG_:
raise
else:
pass
if tBeam is not None:
calc_weighted_center(tBeam)
return [rayPath, beamDict, oesDict]
|
|
import os
import shutil
import tempfile
import uuid
import bcolz
import numpy as np
from bquery import ctable_ext
def rm_file_or_dir(path, ignore_errors=True):
"""
Helper function to clean a certain filepath
Parameters
----------
path
Returns
-------
"""
if os.path.exists(path):
if os.path.isdir(path):
if os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path, ignore_errors=ignore_errors)
else:
if os.path.islink(path):
os.unlink(path)
else:
os.remove(path)
class ctable(bcolz.ctable):
def __init__(self, *args, **kwargs):
super(ctable, self).__init__(*args, **kwargs)
# check autocaching
if self.rootdir and kwargs.get('auto_cache') is True:
# explicit auto_cache
self.auto_cache = True
elif self.rootdir and kwargs.get('auto_cache') is None and kwargs.get('mode') != 'r':
# implicit auto_cache
self.auto_cache = True
else:
self.auto_cache = False
self.auto_cache = True # debug
self._dir_clean_list = []
@staticmethod
def create_group_base_name(col_list):
group_name = '_'.join(sorted(col_list))
return group_name
def cache_valid(self, col):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_org_file_check = self[col].rootdir + '/__attrs__'
col_values_file_check = self[col].rootdir + '.values/__attrs__'
cache_valid = os.path.exists(col_org_file_check) and os.path.exists(col_values_file_check)
return cache_valid
def group_cache_valid(self, col_list):
"""
Checks whether the column has a factorization that exists and is not older than the source
:param col:
:return:
"""
cache_valid = False
if self.rootdir:
col_values_file_check = os.path.join(self.rootdir, self.create_group_base_name(col_list)) + \
'.values/__attrs__'
exists_group_index = os.path.exists(col_values_file_check)
missing_col_check = [1 for col in col_list if not os.path.exists(self[col].rootdir + '/__attrs__')]
cache_valid = (exists_group_index and not missing_col_check)
return cache_valid
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
if not isinstance(col_list, list):
col_list = [col_list]
if refresh:
kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x]
for kill_dir in kill_list:
rm_file_or_dir(os.path.join(self.rootdir, kill_dir))
for col in col_list:
# create cache if needed
if refresh or not self.cache_valid(col):
# todo: also add locking mechanism here
# create directories
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir = col_rootdir + '.values'
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
try:
# create factor
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir_tmp, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
finally:
rm_file_or_dir(col_factor_rootdir, ignore_errors=True)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
try:
# create values
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype),
rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
finally:
rm_file_or_dir(col_values_rootdir, ignore_errors=True)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
def unique(self, col_or_col_list):
"""
Return a list of unique values of a column or a list of lists of column list
:param col_or_col_list: a column or a list of columns
:return:
"""
if isinstance(col_or_col_list, list):
col_is_list = True
col_list = col_or_col_list
else:
col_is_list = False
col_list = [col_or_col_list]
output = []
for col in col_list:
if self.auto_cache or self.cache_valid(col):
# create factorization cache
if not self.cache_valid(col):
self.cache_factor([col])
# retrieve values from existing disk-based factorization
col_values_rootdir = self[col].rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
values = list(carray_values)
else:
# factorize on-the-fly
_, values = ctable_ext.factorize(self[col])
values = values.values()
output.append(values)
if not col_is_list:
output = output[0]
return output
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__')
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a list of column names (output has same name and sum is performed)
['m1', 'm2', ...]
- a list of lists, each list contains input column name and operation
[['m1', 'sum'], ['m2', 'mean'], ...]
- a list of lists, each list contains input column name, operation and
output column name
[['m1', 'sum', 'm1_sum'], ['m1', 'mean', 'm1_mean'], ...]
Currently supported aggregation operations are:
- 'sum'
- 'count'
- 'count_na'
- 'count_distinct'
- 'sorted_count_distinct', data should have been
previously presorted
- 'mean', arithmetic mean (average)
- 'std', standard deviation
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
try:
carray_factor, nr_groups, skip_key = \
self.make_group_index(groupby_cols, bool_arr)
# check if the bool_arr actually filters
if bool_arr is not None and np.all(bool_arr):
bool_arr = None
if bool_arr is None:
expectedlen = nr_groups
else:
expectedlen = nr_groups - 1
ct_agg, dtype_dict, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, expectedlen, rootdir)
# perform aggregation
self.aggregate_groups(ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols,
agg_ops, dtype_dict,
bool_arr=bool_arr)
finally:
# clean up everything that was used
self.clean_tmp_rootdir()
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
factorizes all columns that are used in the groupby
it will use cache carrays if available
if not yet auto_cache is valid, it will create cache carrays
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
if self.auto_cache or self.cache_valid(col):
# create factorization cache if needed
if not self.cache_valid(col):
self.cache_factor([col])
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
col_carray_factor = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_carray_values = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
col_carray_factor, values = ctable_ext.factorize(self[col])
col_carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype))
factor_list.append(col_carray_factor)
values_list.append(col_carray_values)
return factor_list, values_list
@staticmethod
def _int_array_hash(input_list):
"""
A function to calculate a hash value of multiple integer values, not used at the moment
Parameters
----------
input_list
Returns
-------
"""
list_len = len(input_list)
arr_len = len(input_list[0])
mult_arr = np.full(arr_len, 1000003, dtype=np.long)
value_arr = np.full(arr_len, 0x345678, dtype=np.long)
for i, current_arr in enumerate(input_list):
index = list_len - i - 1
value_arr ^= current_arr
value_arr *= mult_arr
mult_arr += (82520 + index + index)
value_arr += 97531
result_carray = bcolz.carray(value_arr)
del value_arr
return result_carray
def create_group_column_factor(self, factor_list, groupby_cols, cache=False):
"""
Create a unique, factorized column out of several individual columns
Parameters
----------
factor_list
groupby_cols
cache
Returns
-------
"""
if not self.rootdir:
# in-memory scenario
input_rootdir = None
col_rootdir = None
col_factor_rootdir = None
col_values_rootdir = None
col_factor_rootdir_tmp = None
col_values_rootdir_tmp = None
else:
# temporary
input_rootdir = tempfile.mkdtemp(prefix='bcolz-')
col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-')
# create combination of groupby columns
group_array = bcolz.zeros(0, dtype=np.int64, expectedlen=len(self), rootdir=input_rootdir, mode='w')
factor_table = bcolz.ctable(factor_list, names=groupby_cols)
ctable_iter = factor_table.iter(outcols=groupby_cols, out_flavor=tuple)
ctable_ext.create_group_index(ctable_iter, len(groupby_cols), group_array)
# now factorize the results
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w')
carray_factor, values = ctable_ext.factorize(group_array, labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(np.fromiter(values.values(), dtype=np.int64), rootdir=col_values_rootdir_tmp, mode='w')
carray_values.flush()
del group_array
if cache:
# clean up the temporary file
rm_file_or_dir(input_rootdir, ignore_errors=True)
if cache:
# official end destination
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
lock_file = col_rootdir + '.lock'
# only works for linux
if not os.path.exists(lock_file):
uid = str(uuid.uuid4())
try:
with open(lock_file, 'a+') as fn:
fn.write(uid + '\n')
with open(lock_file, 'r') as fn:
temp = fn.read().splitlines()
if temp[0] == uid:
lock = True
else:
lock = False
del temp
except:
lock = False
else:
lock = False
if lock:
rm_file_or_dir(col_factor_rootdir, ignore_errors=False)
shutil.move(col_factor_rootdir_tmp, col_factor_rootdir)
carray_factor = bcolz.carray(rootdir=col_factor_rootdir, mode='r')
rm_file_or_dir(col_values_rootdir, ignore_errors=False)
shutil.move(col_values_rootdir_tmp, col_values_rootdir)
carray_values = bcolz.carray(rootdir=col_values_rootdir, mode='r')
else:
# another process has a lock, we will work with our current files and clean up later
self._dir_clean_list.append(col_factor_rootdir)
self._dir_clean_list.append(col_values_rootdir)
return carray_factor, carray_values
def make_group_index(self, groupby_cols, bool_arr):
'''Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
'''
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
def create_tmp_rootdir(self):
"""
create a rootdir that we can destroy later again
Returns
-------
"""
if self.rootdir:
tmp_rootdir = tempfile.mkdtemp(prefix='bcolz-')
self._dir_clean_list.append(tmp_rootdir)
else:
tmp_rootdir = None
return tmp_rootdir
def clean_tmp_rootdir(self):
"""
clean up all used temporary rootdirs
Returns
-------
"""
for tmp_rootdir in list(self._dir_clean_list):
rm_file_or_dir(tmp_rootdir)
self._dir_clean_list.remove(tmp_rootdir)
def create_agg_ctable(self, groupby_cols, agg_list, expectedlen, rootdir):
'''Create a container for the output table, a dictionary describing it's
columns and a list of tuples describing aggregation
operations to perform.
Args:
groupby_cols (list): a list of columns to groupby over
agg_list (list): the aggregation operations (see groupby for more info)
expectedlen (int): expected length of output table
rootdir (string): the directory to write the table to
Returns:
ctable: A table in the correct format for containing the output of
the specified aggregation operations.
dict: (dtype_dict) dictionary describing columns to create
list: (agg_ops) list of tuples of the form:
(input_col_name, output_col_name, agg_op)
input_col_name (string): name of the column to act on
output_col_name (string): name of the column to output to
agg_op (int): aggregation operation to perform
'''
dtype_dict = {}
# include all the groupby columns
for col in groupby_cols:
dtype_dict[col] = self[col].dtype
agg_ops_list = ['sum', 'count', 'count_distinct', 'sorted_count_distinct', 'mean', 'std']
agg_ops = []
for agg_info in agg_list:
if not isinstance(agg_info, list):
# example: ['m1', 'm2', ...]
# default operation (sum) and default output column name (same is input)
output_col_name = agg_info
input_col_name = agg_info
agg_op = 'sum'
else:
input_col_name = agg_info[0]
agg_op = agg_info[1]
if len(agg_info) == 2:
# example: [['m1', 'sum'], ['m2', 'mean], ...]
# default output column name
output_col_name = input_col_name
else:
# example: [['m1', 'sum', 'mnew1'], ['m1, 'mean','mnew2'], ...]
# fully specified
output_col_name = agg_info[2]
if agg_op not in agg_ops_list:
raise NotImplementedError(
'Unknown Aggregation Type: ' + str(agg_op))
# choose output column dtype based on aggregation operation and
# input column dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
if agg_op in ('count', 'count_distinct', 'sorted_count_distinct'):
output_col_dtype = np.dtype(np.int64)
elif agg_op in ('mean', 'std'):
output_col_dtype = np.dtype(np.float64)
else:
output_col_dtype = self[input_col_name].dtype
dtype_dict[output_col_name] = output_col_dtype
# save output
agg_ops.append((input_col_name, output_col_name, agg_op))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(expectedlen, [('tmp_col_bquery__', np.bool)]),
expectedlen=expectedlen,
rootdir=rootdir)
return ct_agg, dtype_dict, agg_ops
def where_terms(self, term_list, cache=False):
"""
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
col_list = []
op_list = []
value_list = []
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
if filter_operator in ['==', 'eq']:
op_id = 1
elif filter_operator in ['!=', 'neq']:
op_id = 2
elif filter_operator in ['in']:
op_id = 3
elif filter_operator in ['nin', 'not in']:
op_id = 4
elif filter_operator in ['>']:
op_id = 5
elif filter_operator in ['>=']:
op_id = 6
elif filter_operator in ['<']:
op_id = 7
elif filter_operator in ['<=']:
op_id = 8
else:
raise KeyError(unicode(filter_operator) + ' is not an accepted operator for filtering')
if op_id in [3, 4]:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
if op_id == 3:
op_id = 1
else:
op_id = 2
filter_value = filter_value[0]
else:
filter_value = set(filter_value)
# prepare input for filter creation
col_list.append(filter_col)
op_list.append(op_id)
value_list.append(filter_value)
# rootdir
if cache:
# nb: this directory is not destroyed until the end of the groupby
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
# create boolean array and fill it
boolarr = bcolz.carray(np.ones(0, dtype=np.bool), expectedlen=self.len, rootdir=rootdir, mode='w')
ctable_iter = self[col_list].iter(out_flavor='tuple')
ctable_ext.apply_where_terms(ctable_iter, op_list, value_list, boolarr)
return boolarr
def where_terms_factorization_check(self, term_list):
"""
check for where terms if they are applicable
Create a boolean array where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
valid = True
for term in term_list:
# get terms
filter_col = term[0]
filter_operator = term[1].lower().strip(' ')
filter_value = term[2]
# check values
if filter_col not in self.cols:
raise KeyError(unicode(filter_col) + ' not in table')
col_values_rootdir = os.path.join(self.rootdir, filter_col + '.values')
if not os.path.exists(col_values_rootdir):
# no factorization available
break
col_carray = bcolz.carray(rootdir=col_values_rootdir, mode='r')
col_values = set(col_carray)
if filter_operator in ['in', 'not in', 'nin']:
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
# optimize lists of 1 value
if len(filter_value) == 1:
filter_value = filter_value[0]
if filter_operator == 'in':
filter_operator = '=='
else:
filter_operator = '!='
else:
filter_value = set(filter_value)
if filter_operator in ['==', 'eq']:
valid = filter_value in col_values
elif filter_operator in ['!=', 'neq']:
valid = any(val for val in col_values if val != filter_value)
elif filter_operator in ['in']:
valid = any(val for val in filter_value if val in col_values)
elif filter_operator in ['nin', 'not in']:
valid = any(val for val in col_values if val not in filter_value)
elif filter_operator in ['>']:
valid = any(val for val in col_values if val > filter_value)
elif filter_operator in ['>=']:
valid = any(val for val in col_values if val >= filter_value)
elif filter_operator in ['<']:
valid = any(val for val in col_values if val < filter_value)
elif filter_operator in ['<=']:
valid = any(val for val in col_values if val >= filter_value)
else:
raise KeyError(str(filter_operator) + ' is not an accepted operator for filtering')
# if one of the filters is blocking, we can stop
if not valid:
break
return valid
def is_in_ordered_subgroups(self, basket_col=None, bool_arr=None,
_max_len_subgroup=1000):
"""
Expands the filter using a specified column
Parameters
----------
basket_col
bool_arr
_max_len_subgroup
Returns
-------
"""
assert basket_col is not None
if bool_arr is None:
return None
if self.auto_cache and bool_arr.rootdir is not None:
rootdir = self.create_tmp_rootdir()
else:
rootdir = None
return \
ctable_ext.is_in_ordered_subgroups(
self[basket_col], bool_arr=bool_arr, rootdir=rootdir,
_max_len_subgroup=_max_len_subgroup)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import structured_function
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.group_by_reducer")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.Dataset.group_by_window(...)`.")
@tf_export("data.experimental.group_by_window")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=window_size,
window_size_func=window_size_func)
return _apply_fn
@deprecation.deprecated(None,
"Use `tf.data.Dataset.bucket_by_sequence_length(...)`.")
@tf_export("data.experimental.bucket_by_sequence_length")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False,
drop_remainder=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Below is an example to bucketize the input data to the 3 buckets
"[0, 3), [3, 5), [5, inf)" based on sequence length, with batch size 2.
>>> elements = [
... [0], [1, 2, 3, 4], [5, 6, 7],
... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, tf.int64, output_shapes=[None])
>>> dataset = dataset.apply(
... tf.data.experimental.bucket_by_sequence_length(
... element_length_func=lambda elem: tf.shape(elem)[0],
... bucket_boundaries=[3, 5],
... bucket_batch_sizes=[2, 2, 2]))
>>> for elem in dataset.as_numpy_iterator():
... print(elem)
[[1 2 3 4]
[5 6 7 0]]
[[ 7 8 9 10 11 0]
[13 14 15 16 19 20]]
[[ 0 0]
[21 22]]
There is also a possibility to pad the dataset till the bucket boundary.
You can also provide which value to be used while padding the data.
Below example uses `-1` as padding and it also shows the input data
being bucketizied to two buckets "[0,3], [4,6]".
>>> elements = [
... [0], [1, 2, 3, 4], [5, 6, 7],
... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, tf.int32, output_shapes=[None])
>>> dataset = dataset.apply(
... tf.data.experimental.bucket_by_sequence_length(
... element_length_func=lambda elem: tf.shape(elem)[0],
... bucket_boundaries=[4, 7],
... bucket_batch_sizes=[2, 2, 2],
... pad_to_bucket_boundary=True,
... padding_values=-1))
>>> for elem in dataset.as_numpy_iterator():
... print(elem)
[[ 0 -1 -1]
[ 5 6 7]]
[[ 1 2 3 4 -1 -1]
[ 7 8 9 10 11 -1]]
[[21 22 -1]]
[[13 14 15 16 19 20]]
When using `pad_to_bucket_boundary` option, it can be seen that it is
not always possible to maintain the bucket batch size.
You can drop the batches that do not maintain the bucket batch size by
using the option `drop_remainder`. Using the same input data as in the
above example you get the following result.
>>> elements = [
... [0], [1, 2, 3, 4], [5, 6, 7],
... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
>>> dataset = tf.data.Dataset.from_generator(
... lambda: elements, tf.int32, output_shapes=[None])
>>> dataset = dataset.apply(
... tf.data.experimental.bucket_by_sequence_length(
... element_length_func=lambda elem: tf.shape(elem)[0],
... bucket_boundaries=[4, 7],
... bucket_batch_sizes=[2, 2, 2],
... pad_to_bucket_boundary=True,
... padding_values=-1,
... drop_remainder=True))
>>> for elem in dataset.as_numpy_iterator():
... print(elem)
[[ 0 -1 -1]
[ 5 6 7]]
[[ 1 2 3 4 -1 -1]
[ 7 8 9 10 11 -1]]
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.sparse.SparseTensor` or of same shape).
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
def _apply_fn(dataset):
return dataset.bucket_by_sequence_length(
element_length_func=element_length_func,
bucket_boundaries=bucket_boundaries,
bucket_batch_sizes=bucket_batch_sizes,
padded_shapes=padded_shapes,
padding_values=padding_values,
pad_to_bucket_boundary=pad_to_bucket_boundary,
no_padding=no_padding,
drop_remainder=drop_remainder)
return _apply_fn
class _GroupByReducerDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a reduction."""
def __init__(self, input_dataset, key_func, reducer):
"""See `group_by_reducer()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_init_func(reducer.init_func)
self._make_reduce_func(reducer.reduce_func, input_dataset)
self._make_finalize_func(reducer.finalize_func)
variant_tensor = ged_ops.experimental_group_by_reducer_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._init_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
key_func=self._key_func.function,
init_func=self._init_func.function,
reduce_func=self._reduce_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor)
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
self._key_func = structured_function.StructuredFunctionWrapper(
key_func, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
f"Invalid `key_func`. Expected `key_func` to return a scalar "
f"tf.int64 tensor, but instead `key_func` has output "
f"types={self._key_func.output_types} "
f"and shapes={self._key_func.output_shapes}."
)
def _make_init_func(self, init_func):
"""Make wrapping defun for init_func."""
self._init_func = structured_function.StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=tensor_spec.TensorSpec([], dtypes.int64))
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_structure`.
self._state_structure = self._init_func.output_structure
state_types = self._init_func.output_types
state_shapes = self._init_func.output_shapes
state_classes = self._init_func.output_classes
need_to_rerun = True
while need_to_rerun:
wrapped_func = structured_function.StructuredFunctionWrapper(
reduce_func,
self._transformation_name(),
input_structure=(self._state_structure, input_dataset.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
for new_state_class, state_class in zip(
nest.flatten(wrapped_func.output_classes),
nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
f"Invalid `reducer`. The output class of the "
f"`reducer.reduce_func` {wrapped_func.output_classes}, "
f"does not match the class of the reduce state "
f"{self._state_classes}.")
# Extract and validate type information from the returned values.
for new_state_type, state_type in zip(
nest.flatten(wrapped_func.output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
f"Invalid `reducer`. The element types for the new state "
f"{wrapped_func.output_types} do not match the element types "
f"of the old state {self._init_func.output_types}."
)
# Extract shape information from the returned values.
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_shapes = nest.pack_sequence_as(
self._init_func.output_shapes, weakened_state_shapes)
self._state_structure = structure.convert_legacy_structure(
state_types, state_shapes, state_classes)
self._reduce_func = wrapped_func
self._reduce_func.function.add_to_graph(ops.get_default_graph())
def _make_finalize_func(self, finalize_func):
"""Make wrapping defun for finalize_func."""
self._finalize_func = structured_function.StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._state_structure)
@property
def element_spec(self):
return self._finalize_func.output_structure
def _functions(self):
return [
self._key_func, self._init_func, self._reduce_func, self._finalize_func
]
def _transformation_name(self):
return "tf.data.experimental.group_by_reducer()"
@tf_export("data.experimental.Reducer")
class Reducer(object):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
- init_func - to define initial value: key => initial state
- reducer_func - operation to perform on values with same key: (old state, input) => new state
- finalize_func - value to return in the end: state => result
For example,
```
def init_func(_):
return (0.0, 0.0)
def reduce_func(state, value):
return (state[0] + value['features'], state[1] + 1)
def finalize_func(s, n):
return s / n
reducer = tf.data.experimental.Reducer(init_func, reduce_func, finalize_func)
```
"""
def __init__(self, init_func, reduce_func, finalize_func):
self._init_func = init_func
self._reduce_func = reduce_func
self._finalize_func = finalize_func
@property
def init_func(self):
return self._init_func
@property
def reduce_func(self):
return self._reduce_func
@property
def finalize_func(self):
return self._finalize_func
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for support of portable globes."""
import json
import os
import shlex
import subprocess
import sys
import time
import urlparse
import xml.sax.saxutils as saxutils
import distutils.dir_util
import distutils.errors
import errors
BYTES_PER_MEGABYTE = 1024.0 * 1024.0
NAME_TEMPLATE = "%s_%s"
class OsCommandError(Exception):
"""Thrown if os command fails."""
pass
# TODO: consider to use a lib like bleach that is specifically
# aimed at foiling XSS attacks.
# Additional characters that need to be escaped for HTML defined in a dictionary
# the character to its escape string.
# xml.sax.saxutils.escape() takes care of &, < and >.
_HTML_ESCAPE_TABLE = {
'"': """,
"'": "'",
"`": "`",
"|": "|"
}
def HtmlEscape(text):
"""Escapes a string for HTML.
Args:
text: source string that needs to be escaped for HTML.
Returns:
HTML escaped string.
"""
if not text:
return text
return saxutils.escape(text, _HTML_ESCAPE_TABLE)
def FileSize(file_path):
"""Returns size of file in megabytes."""
return os.path.getsize(file_path) / BYTES_PER_MEGABYTE
def SizeAsString(size):
"""Converts megabyte float to a string."""
if size < 1000.0:
return "%0.2fMB" % size
size /= 1024.0
if size < 1000.0:
return "%0.2fGB" % size
else:
return "%0.2fTB" % (size / 1024.0)
def FileSizeAsString(file_path):
"""Returns size of file as a string."""
return SizeAsString(FileSize(file_path))
def DirectorySize(directory):
"""Returns size of directory in megabytes."""
directory_size = 0
if os.path.isdir(directory):
for (path, unused_dirs, files) in os.walk(directory):
for file_name in files:
file_path = os.path.join(path, file_name)
directory_size += os.path.getsize(file_path)
return directory_size / BYTES_PER_MEGABYTE
def DirectorySizeAsString(directory):
"""Returns size of directory as a string."""
return SizeAsString(DirectorySize(directory))
def CreateDirectory(directory):
"""Create entire directory path."""
if os.path.exists(directory):
return
try:
os.makedirs(directory)
except OSError:
PrintAndLog("Raising error: Cannot create directory \'%s\'" % directory)
raise
def CopyDirectory(source, destination, logger):
"""Copy from source to destination, which will be created if it does not exist."""
cmd = "Copying %s to %s" % (source, destination)
PrintAndLog(cmd, logger)
try:
distutils.dir_util.copy_tree(source, destination)
except distutils.errors.DistutilsFileError:
PrintAndLog("Raising error: Cannot copy to directory %s" % destination)
raise
def DiskSpace(path):
"""Returns remaining disk space in Megabytes."""
mount_info = os.statvfs(path)
return mount_info.f_bsize * mount_info.f_bavail / BYTES_PER_MEGABYTE
def Uid():
"""Returns a uid for identifying a globe building sequence."""
return "%d_%f" % (os.getpid(), time.time())
def GlobesToText(globes, template, sort_item, reverse=False, is_text=False):
"""Fills in globe template for each globe and returns as array of strings."""
result = []
# If it is text, sort the lower case version of the text.
if is_text:
items = sorted(globes.iteritems(),
key=lambda globe_pair: globe_pair[1][sort_item].lower(),
reverse=reverse)
# If it is NOT text, use default less than comparison.
else:
items = sorted(globes.iteritems(),
key=lambda globe_pair: globe_pair[1][sort_item],
reverse=reverse)
for [unused_key, globe] in iter(items):
next_entry = template
for [globe_term, globe_value] in globe.iteritems():
replace_item = "[$%s]" % globe_term.upper()
if globe_term == "globe" or globe_term == "info_loaded":
pass
elif globe_term == "size":
next_entry = next_entry.replace(replace_item, SizeAsString(globe_value))
else:
next_entry = next_entry.replace(replace_item, globe_value)
result.append(next_entry)
return result
def GlobeNameReplaceParams(globe_name):
"""Returns a single replacement parameter for the globe name."""
return {"[$GLOBE_NAME]": globe_name}
def ReplaceParams(text, replace_params):
"""Replace keys with values in the given text."""
for (key, value) in replace_params.iteritems():
text = text.replace(key, value)
return text
def OutputFile(file_name, replace_params):
"""Outputs a file to standard out with the globe name replaced."""
fp = open(file_name)
text = fp.read()
fp.close()
print ReplaceParams(text, replace_params)
def CreateInfoFile(path, description):
"""Create globe info file."""
content = "Portable Globe\n"
content += GmTimeStamp()
content += "\n%s" % TimeStamp()
content += "Globe description: %s\n" % description
CreateFile(path, content)
def CreateFile(path, content):
"""Create globe info file."""
try:
fp = open(path, "w")
fp.write(content)
fp.close()
except IOError as error:
print error
sys.exit(1)
def TimeStamp():
"""Create timestamp based on local time."""
return time.strftime("%Y-%m-%d %H:%M:%S\n", time.localtime())
def GmTimeStamp():
"""Create timestamp based on Greenwich Mean Time."""
return time.strftime("%Y-%m-%d %H:%M:%S GMT\n", time.gmtime())
def ConvertToQtNode(level, col, row):
"""Converts col, row, and level to corresponding qtnode string."""
qtnode = "0"
half_ndim = 1 << (level - 1)
for unused_ in xrange(level):
if row >= half_ndim and col < half_ndim:
qtnode += "0"
row -= half_ndim
elif row >= half_ndim and col >= half_ndim:
qtnode += "1"
row -= half_ndim
col -= half_ndim
elif row < half_ndim and col >= half_ndim:
qtnode += "2"
col -= half_ndim
else:
qtnode += "3"
half_ndim >>= 1
return qtnode
def JsBoolString(bool_value):
"""Write boolean value as javascript boolean."""
if bool_value:
return "true"
else:
return "false"
def WriteHeader(content_type="text/html"):
"""Output header for web page."""
# Pick up one print from the Python print.
print "Content-Type: %s\n" % content_type
def ExecuteCmd(os_cmd, logger, dry_run=False):
"""Execute os command and log results.
Runs command, waits until it finishes, then analyses the return code, and
reports either "SUCCESS" or "FAILED".
Use if output of command is not desired, otherwise it should be redirected
to a file or use RunCmd below.
Args:
os_cmd: Linux shell command to execute.
logger: Logger responsible for outputting log messages.
dry_run: Whether command should only be printed but not run.
Throws:
OsCommandError
"""
PrintAndLog("Executing: %s" % os_cmd, logger)
if dry_run:
PrintAndLog("-- dry run --", logger)
return
try:
if isinstance(os_cmd, str):
os_cmd = shlex.split(os_cmd)
p = subprocess.Popen(os_cmd, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err_data = p.communicate()[1]
return_code = p.returncode
if return_code != 0:
PrintAndLog("Raising error: %s (return code %d)\n"
% (err_data, return_code), logger)
raise OsCommandError()
else:
PrintAndLog("SUCCESS", logger, None)
except Exception, e:
PrintAndLog("FAILED: %s" % e.__str__(), logger)
raise OsCommandError()
def ExecuteCmdInBackground(os_cmd, logger):
"""Execute os command in the background and log results.
Runs command in the background and returns immediately without waiting for
the execution to finish.
Use if the command will take longer time to finish than request timeout.
Args:
os_cmd: Linux shell command to execute.
logger: Logger responsible for outputting log messages.
Throws:
OsCommandError
"""
PrintAndLog("Executing in background: %s" % os_cmd, logger)
try:
subprocess.Popen(os_cmd + " &", shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception, e:
PrintAndLog("FAILED: %s" % e.__str__(), logger)
raise OsCommandError()
def RunCmd(os_cmd):
"""Execute os command and return list of results and errors.
Runs command, waits until it finishes, then returns the output of execution
(if succeeded) or error information (if failed).
Use if output of command is needed.
Args:
os_cmd: Linux shell command to execute.
Returns:
Array of result lines.
"""
try:
if isinstance(os_cmd, str):
os_cmd = shlex.split(os_cmd)
p = subprocess.Popen(os_cmd, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# capture stdout/stderr into memory variables
# NOTE: communicate() can only be called one time
# per call of Popen as after the first call
# stdout/stderr pipe handles are closed
results, err_data = p.communicate()
return_code = p.returncode
if return_code != 0:
results = "{0} (return code {1})".format(err_data, return_code)
return ["", results]
else:
return results.split("\n")
except Exception, e:
# print "FAILURE: %s" % e.__str__()
return ["", e.__str__()]
def PrintAndLog(msg, logger=None, prefix="\n"):
if prefix:
print "%s%s" % (prefix, msg)
else:
print msg
if logger:
logger.Log(msg)
def GetDbrootInfoJson(globe, name):
"""Get dbroot info as a json string.
Args:
globe: portable_globe object.
name: name of portable globe
Returns:
Dbroot info in Json formatted string.
"""
dbroot_info = {"name": name,
"has_imagery": globe.HasImagery(),
"has_terrain": globe.HasTerrain(),
"is_proto_imagery": globe.IsProtoImagery(),
}
return json.dumps(dbroot_info)
def NormalizeTargetPath(target):
"""Normalizes the target path.
Adds leading slash if needed, strips ending slashes.
Args:
target: The target path (fusion db publish point).
Returns:
Normalized target path.
"""
if not target:
return target
target = target.strip()
target = target.rstrip("/")
if not target:
return target
if target[0] != "/":
target = "/{0}".format(target)
return target
def GetServerAndPathFromUrl(url):
"""Gets a server and a path from the url.
Args:
url: the URL.
Returns:
tuple (server, path). The server is 'scheme://host:port'.
The path can be empty string.
Raises:
InvalidValueError: when the url is not valid.
"""
server = ""
path = ""
url_obj = urlparse.urlparse(url)
if url_obj.scheme and url_obj.netloc and url_obj.path:
server = "{0}://{1}".format(url_obj.scheme, url_obj.netloc)
path = url_obj.path
elif url_obj.scheme and url_obj.netloc:
server = "{0}://{1}".format(url_obj.scheme, url_obj.netloc)
elif url_obj.path:
path = url_obj.path
else:
raise errors.InvalidValueError("Invalid URL: %s" % url)
return (server, path)
def IsProcessRunningForGlobe(tool_name, base_dir):
"""Checks whether specified job is running for portable.
Checks if process is running by detecting it in the output returned by
executing "ps -ef | grep base_dir".
Args:
tool_name: tool name to check if it is present in list of running
processes.
base_dir: base directory for corresponding portable.
Returns:
whether specified job is running.
"""
ps_cmd = "ps -ef"
grep_cmd = "grep \"%s\"" % base_dir
ps_subprocess = subprocess.Popen(shlex.split(ps_cmd),
shell=False,
stdout=subprocess.PIPE)
grep_subprocess = subprocess.Popen(shlex.split(grep_cmd),
shell=False,
stdin=ps_subprocess.stdout,
stdout=subprocess.PIPE)
ps_subprocess.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
procs = grep_subprocess.communicate()[0]
if procs:
procs = procs.split("/n")
for proc in procs:
if proc.find(tool_name) > 0:
return True
return False
class Log(object):
"""Simple logger class."""
def __init__(self, log_file, enabled=True):
self.log_file_ = log_file
self.enabled_ = enabled
def CheckLogFolder(self):
return os.path.exists(os.path.dirname(self.log_file_))
def Clear(self):
"""Clear the log file."""
if not self.CheckLogFolder():
return
fp = open(self.log_file_, "w")
fp.close()
def Log(self, message):
"""Log message to cutter log."""
if not self.enabled_ or not self.CheckLogFolder():
return
fp = open(self.log_file_, "a")
fp.write("%s" % TimeStamp())
fp.write("%s\n" % message)
fp.close()
|
|
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import datetime
import Queue
from orderer import ab_pb2, ab_pb2_grpc
from common import common_pb2
import bdd_test_util
import bootstrap_util
import bdd_grpc_util
from grpc.beta import implementations
from grpc.framework.interfaces.face.face import AbortionError
from grpc.beta.interfaces import StatusCode
# The default chain ID when the system is statically bootstrapped for testing
TEST_CHAIN_ID = "testchainid"
def _defaultDataFunction(index):
payload = common_pb2.Payload(
header = common_pb2.Header(
chainHeader = common_pb2.ChainHeader(
chainID = TEST_CHAIN_ID,
type = common_pb2.ENDORSER_TRANSACTION,
),
signatureHeader = common_pb2.SignatureHeader(),
),
data = str("BDD test: {0}".format(datetime.datetime.utcnow())),
)
envelope = common_pb2.Envelope(
payload = payload.SerializeToString()
)
return envelope
class StreamHelper:
def __init__(self):
self.streamClosed = False
self.sendQueue = Queue.Queue()
self.receivedMessages = []
self.replyGenerator = None
def setReplyGenerator(self, replyGenerator):
assert self.replyGenerator == None, "reply generator already set!!"
self.replyGenerator = replyGenerator
def createSendGenerator(self, timeout = 2):
while True:
try:
nextMsg = self.sendQueue.get(True, timeout)
if nextMsg:
yield nextMsg
else:
#None indicates desire to close send
return
except Queue.Empty:
return
def readMessage(self):
for reply in self.readMessages(1):
return reply
assert False, "Received no messages"
def readMessages(self, expectedCount):
msgsReceived = []
counter = 0
try:
for reply in self.replyGenerator:
counter += 1
#print("received reply: {0}, counter = {1}".format(reply, counter))
msgsReceived.append(reply)
if counter == int(expectedCount):
break
except AbortionError as networkError:
self.handleNetworkError(networkError)
return msgsReceived
def handleNetworkError(self, networkError):
if networkError.code == StatusCode.OUT_OF_RANGE and networkError.details == "EOF":
print("Error received and ignored: {0}".format(networkError))
print()
self.streamClosed = True
else:
raise Exception("Unexpected NetworkError: {0}".format(networkError))
class DeliverStreamHelper(StreamHelper):
def __init__(self, ordererStub, entity, directory, nodeAdminTuple, timeout = 110):
StreamHelper.__init__(self)
self.nodeAdminTuple = nodeAdminTuple
self.directory = directory
self.entity = entity
# Set the UpdateMessage and start the stream
sendGenerator = self.createSendGenerator(timeout)
self.replyGenerator = ordererStub.Deliver(sendGenerator, timeout + 1)
def createSeekInfo(self, chainID, start = 'Oldest', end = 'Newest', behavior = 'FAIL_IF_NOT_READY'):
seekInfo = ab_pb2.SeekInfo(
start = seekPosition(start),
stop = seekPosition(end),
behavior = ab_pb2.SeekInfo.SeekBehavior.Value(behavior),
)
print("SeekInfo = {0}".format(seekInfo))
print("")
return seekInfo
def seekToRange(self, chainID = TEST_CHAIN_ID, start = 'Oldest', end = 'Newest'):
seekInfo = self.createSeekInfo(start = start, end = end, chainID = chainID)
envelope = bootstrap_util.createEnvelopeForMsg(directory=self.directory, chainId=chainID, msg=seekInfo, typeAsString="DELIVER_SEEK_INFO", nodeAdminTuple=self.nodeAdminTuple)
self.sendQueue.put(envelope)
def getBlocks(self):
blocks = []
try:
while True:
reply = self.readMessage()
if reply.HasField("block"):
blocks.append(reply.block)
#print("received reply: {0}, len(blocks) = {1}".format(reply, len(blocks)))
else:
if reply.status != common_pb2.SUCCESS:
print("Got error: {0}".format(reply.status))
print("Done receiving blocks")
break
except Exception as e:
print("getBlocks got error: {0}".format(e) )
return blocks
class UserRegistration:
def __init__(self, userName, directory):
self.userName= userName
self.directory = directory
self.tags = {}
# Dictionary of composeService->atomic broadcast grpc Stub
self.atomicBroadcastStubsDict = {}
# composeService->StreamHelper
self.abDeliversStreamHelperDict = {}
def getUserName(self):
return self.userName
def closeStreams(self):
for compose_service, deliverStreamHelper in self.abDeliversStreamHelperDict.iteritems():
deliverStreamHelper.sendQueue.put(None)
def connectToDeliverFunction(self, context, composeService, certAlias, nodeAdminTuple, timeout=1):
'Connect to the deliver function and drain messages to associated orderer queue'
assert not composeService in self.abDeliversStreamHelperDict, "Already connected to deliver stream on {0}".format(composeService)
streamHelper = DeliverStreamHelper(directory=self.directory,
ordererStub=self.getABStubForComposeService(context=context,
composeService=composeService),
entity=self, nodeAdminTuple=nodeAdminTuple)
self.abDeliversStreamHelperDict[composeService] = streamHelper
return streamHelper
def getDelivererStreamHelper(self, context, composeService):
assert composeService in self.abDeliversStreamHelperDict, "NOT connected to deliver stream on {0}".format(composeService)
return self.abDeliversStreamHelperDict[composeService]
def broadcastMessages(self, context, numMsgsToBroadcast, composeService, chainID=TEST_CHAIN_ID, dataFunc=_defaultDataFunction):
abStub = self.getABStubForComposeService(context, composeService)
replyGenerator = abStub.Broadcast(generateBroadcastMessages(chainID=chainID, numToGenerate = int(numMsgsToBroadcast), dataFunc=dataFunc), 2)
counter = 0
try:
for reply in replyGenerator:
counter += 1
print("{0} received reply: {1}, counter = {2}".format(self.getUserName(), reply, counter))
if counter == int(numMsgsToBroadcast):
break
except Exception as e:
print("Got error: {0}".format(e) )
print("Got error")
print("Done")
assert counter == int(numMsgsToBroadcast), "counter = {0}, expected {1}".format(counter, numMsgsToBroadcast)
def getABStubForComposeService(self, context, composeService):
'Return a Stub for the supplied composeService, will cache'
if composeService in self.atomicBroadcastStubsDict:
return self.atomicBroadcastStubsDict[composeService]
# Get the IP address of the server that the user registered on
root_certificates = self.directory.getTrustedRootsForOrdererNetworkAsPEM()
# ipAddress = "{0}:{1}".format(*bdd_test_util.getPortHostMapping(context.compose_containers, composeService, 7050))
ipAddress = bdd_test_util.ipFromContainerNamePart(composeService, context.compose_containers)
print("ipAddress in getABStubForComposeService == {0}".format(ipAddress))
channel = bdd_grpc_util.getGRPCChannel(ipAddress=ipAddress, port=7050, root_certificates=root_certificates, ssl_target_name_override=composeService)
# channel = getGRPCChannel(*bdd_test_util.getPortHostMapping(context.compose_containers, composeService, 7050))
newABStub = ab_pb2_grpc.AtomicBroadcastStub(channel)
self.atomicBroadcastStubsDict[composeService] = newABStub
return newABStub
# Registerses a user on a specific composeService
def registerUser(context, secretMsg, composeService):
userName = secretMsg['enrollId']
if 'ordererUsers' in context:
pass
else:
context.ordererUsers = {}
if userName in context.ordererUsers:
raise Exception("Orderer user already registered: {0}".format(userName))
userRegistration = UserRegistration(secretMsg)
context.ordererUsers[userName] = userRegistration
return userRegistration
def getUserRegistration(context, enrollId):
userRegistration = None
if 'ordererUsers' in context:
pass
else:
ordererContext.ordererUsers = {}
if enrollId in context.ordererUsers:
userRegistration = context.ordererUsers[enrollId]
else:
raise Exception("Orderer user has not been registered: {0}".format(enrollId))
return userRegistration
def seekPosition(position):
if position == 'Oldest':
return ab_pb2.SeekPosition(oldest = ab_pb2.SeekOldest())
elif position == 'Newest':
return ab_pb2.SeekPosition(newest = ab_pb2.SeekNewest())
else:
return ab_pb2.SeekPosition(specified = ab_pb2.SeekSpecified(number = position))
def convertSeek(utfString):
try:
return int(utfString)
except ValueError:
return str(utfString)
def createSeekInfo(chainID = TEST_CHAIN_ID, start = 'Oldest', end = 'Newest', behavior = 'FAIL_IF_NOT_READY'):
return common_pb2.Envelope(
payload = common_pb2.Payload(
header = common_pb2.Header(
channel_header = common_pb2.ChannelHeader( channel_id = chainID ).SerializeToString(),
signature_header = common_pb2.SignatureHeader().SerializeToString(),
),
data = ab_pb2.SeekInfo(
start = seekPosition(start),
stop = seekPosition(end),
behavior = ab_pb2.SeekInfo.SeekBehavior.Value(behavior),
).SerializeToString(),
).SerializeToString(),
)
def generateBroadcastMessages(chainID = TEST_CHAIN_ID, numToGenerate = 3, timeToHoldOpen = 1, dataFunc =_defaultDataFunction):
messages = []
for i in range(0, numToGenerate):
messages.append(dataFunc(i))
for msg in messages:
yield msg
time.sleep(timeToHoldOpen)
|
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Demonstrate basic message operations in Amazon Simple Queue Service (Amazon SQS).
"""
# snippet-start:[python.example_code.sqs.message_wrapper_imports]
import logging
import sys
import boto3
from botocore.exceptions import ClientError
import queue_wrapper
logger = logging.getLogger(__name__)
sqs = boto3.resource('sqs')
# snippet-end:[python.example_code.sqs.message_wrapper_imports]
# snippet-start:[python.example_code.sqs.SendMessage]
def send_message(queue, message_body, message_attributes=None):
"""
Send a message to an Amazon SQS queue.
:param queue: The queue that receives the message.
:param message_body: The body text of the message.
:param message_attributes: Custom attributes of the message. These are key-value
pairs that can be whatever you want.
:return: The response from SQS that contains the assigned message ID.
"""
if not message_attributes:
message_attributes = {}
try:
response = queue.send_message(
MessageBody=message_body,
MessageAttributes=message_attributes
)
except ClientError as error:
logger.exception("Send message failed: %s", message_body)
raise error
else:
return response
# snippet-end:[python.example_code.sqs.SendMessage]
# snippet-start:[python.example_code.sqs.SendMessageBatch]
def send_messages(queue, messages):
"""
Send a batch of messages in a single request to an SQS queue.
This request may return overall success even when some messages were not sent.
The caller must inspect the Successful and Failed lists in the response and
resend any failed messages.
:param queue: The queue to receive the messages.
:param messages: The messages to send to the queue. These are simplified to
contain only the message body and attributes.
:return: The response from SQS that contains the list of successful and failed
messages.
"""
try:
entries = [{
'Id': str(ind),
'MessageBody': msg['body'],
'MessageAttributes': msg['attributes']
} for ind, msg in enumerate(messages)]
response = queue.send_messages(Entries=entries)
if 'Successful' in response:
for msg_meta in response['Successful']:
logger.info(
"Message sent: %s: %s",
msg_meta['MessageId'],
messages[int(msg_meta['Id'])]['body']
)
if 'Failed' in response:
for msg_meta in response['Failed']:
logger.warning(
"Failed to send: %s: %s",
msg_meta['MessageId'],
messages[int(msg_meta['Id'])]['body']
)
except ClientError as error:
logger.exception("Send messages failed to queue: %s", queue)
raise error
else:
return response
# snippet-end:[python.example_code.sqs.SendMessageBatch]
# snippet-start:[python.example_code.sqs.ReceiveMessage]
def receive_messages(queue, max_number, wait_time):
"""
Receive a batch of messages in a single request from an SQS queue.
:param queue: The queue from which to receive messages.
:param max_number: The maximum number of messages to receive. The actual number
of messages received might be less.
:param wait_time: The maximum time to wait (in seconds) before returning. When
this number is greater than zero, long polling is used. This
can result in reduced costs and fewer false empty responses.
:return: The list of Message objects received. These each contain the body
of the message and metadata and custom attributes.
"""
try:
messages = queue.receive_messages(
MessageAttributeNames=['All'],
MaxNumberOfMessages=max_number,
WaitTimeSeconds=wait_time
)
for msg in messages:
logger.info("Received message: %s: %s", msg.message_id, msg.body)
except ClientError as error:
logger.exception("Couldn't receive messages from queue: %s", queue)
raise error
else:
return messages
# snippet-end:[python.example_code.sqs.ReceiveMessage]
# snippet-start:[python.example_code.sqs.DeleteMessage]
def delete_message(message):
"""
Delete a message from a queue. Clients must delete messages after they
are received and processed to remove them from the queue.
:param message: The message to delete. The message's queue URL is contained in
the message's metadata.
:return: None
"""
try:
message.delete()
logger.info("Deleted message: %s", message.message_id)
except ClientError as error:
logger.exception("Couldn't delete message: %s", message.message_id)
raise error
# snippet-end:[python.example_code.sqs.DeleteMessage]
# snippet-start:[python.example_code.sqs.DeleteMessageBatch]
def delete_messages(queue, messages):
"""
Delete a batch of messages from a queue in a single request.
:param queue: The queue from which to delete the messages.
:param messages: The list of messages to delete.
:return: The response from SQS that contains the list of successful and failed
message deletions.
"""
try:
entries = [{
'Id': str(ind),
'ReceiptHandle': msg.receipt_handle
} for ind, msg in enumerate(messages)]
response = queue.delete_messages(Entries=entries)
if 'Successful' in response:
for msg_meta in response['Successful']:
logger.info("Deleted %s", messages[int(msg_meta['Id'])].receipt_handle)
if 'Failed' in response:
for msg_meta in response['Failed']:
logger.warning(
"Could not delete %s",
messages[int(msg_meta['Id'])].receipt_handle
)
except ClientError:
logger.exception("Couldn't delete messages from queue %s", queue)
else:
return response
# snippet-end:[python.example_code.sqs.DeleteMessageBatch]
# snippet-start:[python.example_code.sqs.Scenario_SendReceiveBatch]
def usage_demo():
"""
Shows how to:
* Read the lines from this Python file and send the lines in
batches of 10 as messages to a queue.
* Receive the messages in batches until the queue is empty.
* Reassemble the lines of the file and verify they match the original file.
"""
def pack_message(msg_path, msg_body, msg_line):
return {
'body': msg_body,
'attributes': {
'path': {'StringValue': msg_path, 'DataType': 'String'},
'line': {'StringValue': str(msg_line), 'DataType': 'String'}
}
}
def unpack_message(msg):
return (msg.message_attributes['path']['StringValue'],
msg.body,
int(msg.message_attributes['line']['StringValue']))
print('-'*88)
print("Welcome to the Amazon Simple Queue Service (Amazon SQS) demo!")
print('-'*88)
queue = queue_wrapper.create_queue('sqs-usage-demo-message-wrapper')
with open(__file__) as file:
lines = file.readlines()
line = 0
batch_size = 10
received_lines = [None]*len(lines)
print(f"Sending file lines in batches of {batch_size} as messages.")
while line < len(lines):
messages = [pack_message(__file__, lines[index], index)
for index in range(line, min(line + batch_size, len(lines)))]
line = line + batch_size
send_messages(queue, messages)
print('.', end='')
sys.stdout.flush()
print(f"Done. Sent {len(lines) - 1} messages.")
print(f"Receiving, handling, and deleting messages in batches of {batch_size}.")
more_messages = True
while more_messages:
received_messages = receive_messages(queue, batch_size, 2)
print('.', end='')
sys.stdout.flush()
for message in received_messages:
path, body, line = unpack_message(message)
received_lines[line] = body
if received_messages:
delete_messages(queue, received_messages)
else:
more_messages = False
print('Done.')
if all([lines[index] == received_lines[index] for index in range(len(lines))]):
print(f"Successfully reassembled all file lines!")
else:
print(f"Uh oh, some lines were missed!")
queue.delete()
print("Thanks for watching!")
print('-'*88)
# snippet-end:[python.example_code.sqs.Scenario_SendReceiveBatch]
if __name__ == '__main__':
usage_demo()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for applying Pruning / Compression to tensors.
Examples:
# Get compression object.
compression_obj = get_matrix_compression_object(hparams, global_step)
# Creating a compressed tensor.
compressed_tensor = apply_matrix_compression(compression_obj, matrix_tensor)
# Create an update op.
update_op = get_matrix_compression_update_op(scompression_obj, hparams)
# Group update_op with train_op, and used the grouped op in place of train_op.
train_op = tf.group(train_op, update_op)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import py_utils
from lingvo.core import symbolic
import tensorflow.compat.v1 as tf
from graph_compression.compression_lib import compression_wrapper_py2 as compression_wrapper
from model_pruning.python import pruning
UPDATE_OP_COLLECTION = 'update_op'
CompressionOptions = compression_wrapper.CompressionOptions
UpdateOptions = compression_wrapper.UpdateOptions
def get_matrix_compression_object(hparams, # pylint:disable=invalid-name
global_step=None,
sparsity=None):
"""Returns a pruning/compression object.
Args:
hparams: Pruning spec as defined in pruing.py;
global_step: A tensorflow variable that is used for scheduling
pruning/compression;
sparsity: A tensorflow scalar variable storing the sparsity.
Returns:
A Pruning or compression_lib.compression_op.ApplyCompression object.
"""
if global_step is None:
train_global_step = tf.train.get_global_step()
if train_global_step is None:
global_step = 0
else:
global_step = tf.cast(train_global_step, tf.int32)
if hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient']:
return pruning.Pruning(hparams, global_step, sparsity)
else:
return compression_wrapper.get_apply_compression(hparams,
global_step=global_step)
def apply_matrix_compression(matrix_compression_obj, # pylint:disable=invalid-name
weight,
scope='',
spec=None):
"""Apply pruning/compression to a weight tensor.
For pruning, this is equivalent to apply_mask; for compression, this is
equivalent to apply_compression.
Args:
matrix_compression_obj: A Pruning or
compression_lib.compression_op.ApplyCompression object;
weight: input weight tensor;
scope: the current variable scope. Defaults to ''.
spec: spec to use for the compression op.
Returns:
A TF node that represents the masked weight tensor if pruning_indicator is
True, and the compressed version of weight tensor if pruning_indicator is
False.
"""
if isinstance(matrix_compression_obj, pruning.Pruning):
prune_option = matrix_compression_obj.matrix_compression_spec.prune_option
return pruning.apply_mask(x=weight, scope=scope, prune_option=prune_option)
else:
compressed_matrix = matrix_compression_obj.apply_compression(
weight, scope, spec)
hparams = matrix_compression_obj.get_spec()
if hparams.use_collection:
tf.add_to_collection(UPDATE_OP_COLLECTION,
matrix_compression_obj.all_update_op())
return compressed_matrix
def apply_customized_matrix_compression(matrix_compression_obj, # pylint:disable=invalid-name
weight_params_fn,
weight_init_obj,
layer_obj,
weight_name,
weight_shape,
weight_dtype,
scope_name='pruning_interface',
spec=None):
"""Apply pruning or compression to a lingvo layer.
This provides a unified interface to perform pruning or compression for a
lingvo layer.
Args:
matrix_compression_obj: A Pruning or
compression_lib.lingvo_compression_op.ApplyCompression object;
weight_params_fn: functional handle to create model parameters;
weight_init_obj: a weight initialization object;
layer_obj: a layer object in the lingvo package, weight matrix of this
layer is pruned or compressed;
weight_name: name of the tensor that is compressed, str;
weight_shape: shape of the weight matrix;
weight_dtype: data type of the weight matrix;
scope_name: TensorFlow scope for creating relavant variables.
spec: spec to use for the compression op.
Returns:
None.
"""
if isinstance(matrix_compression_obj, pruning.Pruning):
prune_option = matrix_compression_obj.matrix_compression_spec.prune_option
with tf.variable_scope(scope_name):
# Create mask and threshold variable and add them to pruning collection.
mask_pc = weight_params_fn(weight_shape, weight_init_obj.Constant(1.0),
weight_dtype)
threshold_pc = weight_params_fn([], weight_init_obj.Constant(0.0),
tf.float32)
layer_obj.CreateVariable('mask', mask_pc, trainable=False)
layer_obj.CreateVariable('threshold', threshold_pc, trainable=False)
if layer_obj.vars.mask not in tf.get_collection(pruning.MASK_COLLECTION):
tf.add_to_collection(pruning.WEIGHT_COLLECTION,
getattr(layer_obj.vars, weight_name))
tf.add_to_collection(pruning.MASK_COLLECTION, layer_obj.vars.mask)
tf.add_to_collection(pruning.THRESHOLD_COLLECTION,
layer_obj.vars.threshold)
if prune_option in ['first_order_gradient', 'second_order_gradient']:
grad_pc = weight_params_fn(weight_shape, weight_init_obj.Constant(0.0),
weight_dtype)
layer_obj.CreateVariable('gradient', grad_pc, trainable=False)
layer_obj.CreateVariable('old_weight', grad_pc, trainable=False)
layer_obj.CreateVariable('old_old_weight', grad_pc, trainable=False)
tf.add_to_collection(pruning.WEIGHT_GRADIENT_COLLECTION,
layer_obj.vars.gradient)
tf.add_to_collection(pruning.OLD_WEIGHT_COLLECTION,
layer_obj.vars.old_weight)
tf.add_to_collection(pruning.OLD_OLD_WEIGHT_COLLECTION,
layer_obj.vars.old_old_weight)
else:
matrix_compression_obj.customized_apply_compression(
getattr(layer_obj.vars, weight_name, None), layer_obj, weight_params_fn,
weight_init_obj, scope=scope_name, spec=spec,
a_matrix_tfvar_shape=weight_shape)
hparams = matrix_compression_obj.get_spec()
if hparams.use_collection:
tf.add_to_collection(UPDATE_OP_COLLECTION,
matrix_compression_obj.all_update_op())
def apply_pruning(pruning_obj, # pylint:disable=invalid-name
pruning_hparams,
weight_params_fn, weight_init_obj, layerobj,
wm_pc, dtype):
"""Apply pruning to an lingvo layer.
Args:
pruning_obj: a Pruning object;
pruning_hparams: a Pruning hparams object;
weight_params_fn: functional handle to create model parameters;
weight_init_obj: a weight initialization object;
layerobj: a layer object in the lingvo package;
wm_pc: weight matrix;
dtype: data type of the weight matrix.
Returns:
pruning_obj as passed in or a compression_obj.
"""
# Pruning options that corresponds to the pruning operations in model_pruning.
if pruning_hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient']:
mask_pc = weight_params_fn(wm_pc.shape, weight_init_obj.Constant(1.0),
dtype)
threshold_pc = weight_params_fn([], weight_init_obj.Constant(0.0),
tf.float32)
layerobj.CreateVariable('mask', mask_pc, trainable=False)
layerobj.CreateVariable('threshold', threshold_pc, trainable=False)
if layerobj.vars.mask not in tf.get_collection(pruning.MASK_COLLECTION):
tf.add_to_collection(pruning.WEIGHT_COLLECTION, layerobj.vars.wm)
tf.add_to_collection(pruning.MASK_COLLECTION, layerobj.vars.mask)
tf.add_to_collection(pruning.THRESHOLD_COLLECTION,
layerobj.vars.threshold)
return pruning_obj
else: # TODO(wanxin): add model_compression options.
return pruning_obj
def get_pruning_update(pruning_obj, pruning_hparams): # pylint:disable=invalid-name
"""Return pruning mask update op.
Note: clients are encouraged to use get_matrix_compression_update_op instead,
which has the same functionality as this function, but supports compression
too.
Args:
pruning_obj: a Pruning object;
pruning_hparams: a Pruning hparams object.
Returns:
a mask_update_op if the prune_option of the pruning_obj is 'weight',
'first_order_gradient', or 'second_order_gradient'.
Raises:
NotImplementedError if the prune_option of the pruning_obj is not 'weight',
'first_order_gradient', or 'second_order_gradient'.
"""
if pruning_hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient']:
return pruning_obj.conditional_mask_update_op()
else:
raise NotImplementedError()
def get_matrix_compression_update_op(matrix_compression_obj): # pylint:disable=invalid-name
"""Return pruning/compression update op.
For pruning, this returns a contional_mask_update_op; for compression, this
returns an ApplyCompression.all_update_op.
Args:
matrix_compression_obj: a Pruning or a compression_lib.ApplyCompression
object;
Returns:
a mask_update_op if the prune_option of the pruning_obj is 'weight',
'first_order_gradient', or 'second_order_gradient'; or an
ApplyCompression.all_update_op otherwise.
Raises:
NotImplementedError if the prune_option of the pruning_obj is not 'weight',
'first_order_gradient', or 'second_order_gradient' and update_option is not
0; in this case, the compression should be applied by calling
compression_obj.run_update_step(session=session).
"""
hparams = matrix_compression_obj.get_spec()
if hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient']:
return matrix_compression_obj.conditional_mask_update_op()
elif (hparams.update_option == UpdateOptions.TF_UPDATE or
hparams.update_option
== UpdateOptions.TF_AND_PYTHON_UPDATE):
# 'update_option' == TF_UPDATE means matrix compression, for which we can
# return an update op here. 'update_option' == PYTHON_UPDATE means
# dictionary learning, for which we cannot return an update op here, and
# need to explicitly call run_update_step(),
# see graph_compression/compression_lib/compression_op.py for more details.
if hparams.use_collection:
# If use_collection is True, then update_ops are retrieved from
# UPDATE_OP_COLLECTION, to ensure the same behavior as pruning.
update_ops = tf.get_collection(UPDATE_OP_COLLECTION)
return tf.group(*update_ops)
else:
return matrix_compression_obj.all_update_op()
else:
raise NotImplementedError()
def run_update_step(matrix_compression_obj, session, step_number=None): # pylint:disable=invalid-name
"""This the update step that needs to be called periodically."""
hparams = matrix_compression_obj.get_spec()
if (hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient'] or
hparams.update_option == UpdateOptions.TF_UPDATE):
update_op = get_matrix_compression_update_op(matrix_compression_obj)
session.run(update_op)
else:
matrix_compression_obj.run_update_step(session, step_number)
def add_compression_summaries(matrix_compression_obj): # pylint:disable=invalid-name
"""Add compression summaries.
Args:
matrix_compression_obj: a Pruning or a compression_lib.ApplyCompression
object.
Returns:
None
"""
if isinstance(matrix_compression_obj, pruning.Pruning):
matrix_compression_obj.add_pruning_summaries()
def flat_embedding_lookup(emb_table, flat_ids, vocab_size, # pylint:disable=invalid-name
matmul_axis=1,
fprop_mode='matmul'):
"""Performs embedding lookup operation.
Args:
emb_table: tf.Tensor containing the embedding vectors.
flat_ids: tf.Tensor of shape (number_ids,).
vocab_size: vocabulary size of the embedding table, int.
matmul_axis: the axis of flat_ids that is used for matmul, int.
fprop_mode: embedding lookup option, should be 'matmul' or 'gather'.
Returns:
Embedding lookup result.
"""
if fprop_mode == 'matmul':
lhs = tf.equal(
tf.expand_dims(flat_ids, matmul_axis),
tf.range(vocab_size, dtype=flat_ids.dtype))
return tf.matmul(tf.cast(lhs, emb_table.dtype), emb_table)
elif fprop_mode == 'gather':
return tf.nn.embedding_lookup(emb_table, flat_ids)
else:
raise ValueError(
'flat_embedding_lookup(): fprop_mode {} is not supported.'.format(
fprop_mode))
class PruningOp(object):
"""A pruning op object.
This class encapsulates the methods that are needed for pruning (and
compression) so that both pruning and compression can be called in lingvo
using the same API.
"""
_pruning_hparams_dict = {}
_global_step = None
_pruning_obj = None
_pruning_hparams = None
@classmethod
def Setup(cls, pruning_hparams_dict, global_step): # pylint:disable=invalid-name
"""Set up the pruning op with pruning hyperparameters and global step.
Args:
pruning_hparams_dict: a dict containing pruning hyperparameters;
global_step: global step in TensorFlow.
"""
if cls._pruning_obj is not None:
pass
assert pruning_hparams_dict is not None
assert isinstance(pruning_hparams_dict, dict)
cls._pruning_hparams_dict = pruning_hparams_dict
cls._global_step = global_step
cls._pruning_hparams = pruning.get_pruning_hparams().override_from_dict(
pruning_hparams_dict)
cls._pruning_obj = get_matrix_compression_object(
cls._pruning_hparams, global_step=global_step)
add_compression_summaries(cls._pruning_obj)
@classmethod
def ApplyPruning(cls, pruning_hparams_dict, layerobj, weight_name, wm_pc, # pylint:disable=invalid-name
dtype, scope):
if not cls._pruning_obj:
train_global_step = py_utils.GetGlobalStep()
if train_global_step is None:
train_global_step = 0
cls.Setup(pruning_hparams_dict, global_step=train_global_step)
compression_op_spec = pruning.get_pruning_hparams().override_from_dict(
pruning_hparams_dict)
return apply_customized_matrix_compression(cls._pruning_obj,
py_utils.WeightParams,
py_utils.WeightInit,
layerobj, weight_name,
wm_pc.shape, dtype, scope,
compression_op_spec)
@classmethod
def GetMixResult(cls, theta, concat, lstmobj): # pylint:disable=invalid-name
"""Compute the mix result.
Args:
theta: a theta object in the LSTM cells;
concat: Tensor, concat of previous output and current state vector;
lstmobj: a LSTM cell object.
Returns:
result Tensor.
Raises:
NotImplementedError if prune_option is not 'weight',
'first_order_gradient', or 'second_order_gradient'.
"""
if cls._pruning_hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient'
]:
return tf.matmul(
concat,
lstmobj.QWeight(tf.multiply(theta.wm, theta.mask, 'masked_weight')))
elif cls._pruning_obj:
return lstmobj.compression_op.get_mix_operator(theta, concat)
else:
raise NotImplementedError()
@classmethod
def GetMatmulResult(cls,
a,
b,
softmax_layer_obj,
transpose_a=False,
transpose_b=False): # pylint:disable=invalid-name
"""Compute the compressed result of matmul(a,b).
Args:
a: a tensor of rank 2;
b: a tensor of rank 2;
softmax_layer_obj: a SimpleFullSoftmax layer object;
transpose_a: whether to transpose a before matmul;
transpose_b: whether to transpose b before matmul.
Returns:
result Tensor.
Raises:
NotImplementedError if prune_option is not 'weight',
'first_order_gradient', or 'second_order_gradient'
and pruning_obj is None.
"""
if cls._pruning_obj:
# current implementation works for num_shards = 1 in SimpleFullSoftmax.
return softmax_layer_obj.compression_ops[-1].get_matmul_operator(
a, b, softmax_layer_obj, transpose_a, transpose_b)
else:
raise NotImplementedError()
@classmethod
def GetEinSumResult(cls, inputs, proj_obj):
"""Compute the einsum result.
Args:
inputs: the left operand of the matmul operation.
proj_obj: the ProjectionLayer object from where get_einsum_operator
is called.
Returns:
result Tensor.
Raises:
NotImplementedError if pruning_obj is None.
"""
if cls._pruning_obj:
return proj_obj.compression_op.get_einsum_operator(
inputs, proj_obj)
else:
raise NotImplementedError()
@classmethod
def GetProjectLastDim(cls, inputs, weight, input_dim, output_dim, proj_obj):
"""Linear projection on the last dim of the input tensor along with pruning.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the
outputs.
proj_obj: a ProjectionLayer object.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
theta = proj_obj.theta
p = proj_obj.params
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim
) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
if (py_utils.use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
outputs = cls.GetEinSumResult(inputs, proj_obj)
else:
if p.pruning_hparams_dict[
'compression_option'] == CompressionOptions.MIXED_BLOCK_COMPRESSION:
# can directly call GetEinSumResult as it doesn't use einsum operator
# for this compression option.
outputs = cls.GetEinSumResult(inputs, proj_obj)
elif p.pruning_hparams_dict[
'compression_option'] == CompressionOptions.INPUTOUTPUT_COMPRESSION and p.pruning_hparams_dict[
'compress_input']:
blocked_inputs = tf.reshape(
inputs,
py_utils.ToStaticShape(
[-1, p.pruning_hparams_dict['input_block_size']]))
compressed_inputs = tf.reshape(
py_utils.Matmul(blocked_inputs, theta.b_matrix_tfvar),
py_utils.ToStaticShape([
-1, input_dim //
p.pruning_hparams_dict['input_compression_factor']
]))
else:
compressed_inputs = tf.reshape(inputs,
py_utils.ToStaticShape([-1, input_dim]))
if p.pruning_hparams_dict[
'compression_option'] == CompressionOptions.BLOCK_COMPRESSION:
if p.pruning_hparams_dict['block_method'] == 'mask':
intermediate_result = py_utils.Matmul(
compressed_inputs,
tf.multiply(theta.c_matrix_tfvar, theta.c_mask_tfvar))
elif p.pruning_hparams_dict['block_method'] == 'loop':
num_blocks = p.pruning_hparams_dict['block_compression_factor']
input_splitted = tf.split(compressed_inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
py_utils.Matmul(input_i, theta.c_matrix_tfvar[i, :, :]))
intermediate_result = tf.concat(output_splitted, axis=-1)
else:
intermediate_result = py_utils.Matmul(compressed_inputs,
theta.c_matrix_tfvar)
if p.pruning_hparams_dict[
'compression_option'] == CompressionOptions.INPUTOUTPUT_COMPRESSION and p.pruning_hparams_dict[
'compress_output']:
blocked_intermediate_result = tf.reshape(
intermediate_result,
py_utils.ToStaticShape([
-1, p.pruning_hparams_dict['output_block_size'] //
p.pruning_hparams_dict['output_compression_factor']
]))
outputs = py_utils.Matmul(blocked_intermediate_result,
theta.d_matrix_tfvar)
else:
outputs = intermediate_result
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(py_utils.GetShape(inputs)[:-1], tf.int32),
py_utils.ToStaticShape([output_dim])
],
axis=0))
return outputs
@classmethod
def GetLastCompressionOp(cls):
if not cls._pruning_obj:
raise NotImplementedError()
elif cls._pruning_hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient'
]:
# choosing pruning instead of compression.
return None
else:
return cls._pruning_obj.get_last_compression_op()
@classmethod
def GetEmbeddingLookupResult(cls, theta, flat_ids,
fprop_mode='gather',
layer_obj=None):
if cls._pruning_hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient'
]:
return flat_embedding_lookup(
emb_table=tf.multiply(theta.wm, theta.mask),
flat_ids=flat_ids,
vocab_size=theta.wm.shape[0],
fprop_mode=fprop_mode)
elif cls._pruning_obj:
assert layer_obj
return layer_obj.compression_op.get_embedding_lookup_operator(
theta, flat_ids, fprop_mode)
else:
raise NotImplementedError()
@classmethod
def GetPruningUpdate(cls): # pylint:disable=invalid-name
# for pruning, it returns pruning_obj.conditional_mask_update_op()
return get_matrix_compression_update_op(cls._pruning_obj)
@classmethod
def ApplyTensorflowUpdate(cls): # pylint:disable=invalid-name
if not cls._pruning_obj:
return False
hparams = cls._pruning_obj.get_spec()
return (hparams.prune_option in [
'weight', 'first_order_gradient', 'second_order_gradient'
] or hparams.update_option == UpdateOptions.TF_UPDATE or
hparams.update_option
== UpdateOptions.TF_AND_PYTHON_UPDATE)
@classmethod
def ApplyPythonUpdate(cls): # pylint:disable=invalid-name
if not cls._pruning_obj:
return False
hparams = cls._pruning_obj.get_spec()
return (hparams.update_option
== UpdateOptions.PYTHON_UPDATE or
hparams.update_option
== UpdateOptions.TF_AND_PYTHON_UPDATE)
@classmethod
def ApplyTensorflowAndPythonUpdate(cls): # pylint:disable=invalid-name
"""Returns True if both Tensorflow and Python updates need to run."""
if not cls._pruning_obj:
return False
hparams = cls._pruning_obj.get_spec()
return (hparams.update_option ==
UpdateOptions.TF_AND_PYTHON_UPDATE)
@classmethod
def RunPythonUpdate(cls, session, global_step): # pylint:disable=invalid-name
run_update_step(cls._pruning_obj, session, global_step)
|
|
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import eventlet.queue
import functools
from oslo_log import log as logging
from oslo_utils import excutils
from heat.common import exception
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import sync_point
from heat.objects import resource as resource_objects
from heat.rpc import api as rpc_api
from heat.rpc import listener_client
LOG = logging.getLogger(__name__)
class CancelOperation(BaseException):
"""Exception to cancel an in-progress operation on a resource.
This exception is raised when operations on a resource are cancelled.
"""
def __init__(self):
return super(CancelOperation, self).__init__('user triggered cancel')
class CheckResource(object):
def __init__(self,
engine_id,
rpc_client,
thread_group_mgr,
msg_queue,
input_data):
self.engine_id = engine_id
self._rpc_client = rpc_client
self.thread_group_mgr = thread_group_mgr
self.msg_queue = msg_queue
self.input_data = input_data
def _stale_resource_needs_retry(self, cnxt, rsrc, prev_template_id):
"""Determine whether a resource needs retrying after failure to lock.
Return True if we need to retry the check operation because of a
failure to acquire the lock. This can be either because the engine
holding the lock is no longer working, or because no other engine had
locked the resource and the data was just out of date.
In the former case, the lock will be stolen and the resource status
changed to FAILED.
"""
fields = {'current_template_id', 'engine_id'}
rs_obj = resource_objects.Resource.get_obj(cnxt,
rsrc.id,
refresh=True,
fields=fields)
if rs_obj.engine_id not in (None, self.engine_id):
if not listener_client.EngineListenerClient(
rs_obj.engine_id).is_alive(cnxt):
# steal the lock.
rs_obj.update_and_save({'engine_id': None})
# set the resource state as failed
status_reason = ('Worker went down '
'during resource %s' % rsrc.action)
rsrc.state_set(rsrc.action,
rsrc.FAILED,
six.text_type(status_reason))
return True
elif (rs_obj.engine_id is None and
rs_obj.current_template_id == prev_template_id):
LOG.debug('Resource id=%d stale; retrying check', rsrc.id)
return True
LOG.debug('Resource id=%d modified by another traversal', rsrc.id)
return False
def _handle_resource_failure(self, cnxt, is_update, rsrc_id,
stack, failure_reason):
failure_handled = stack.mark_failed(failure_reason)
if not failure_handled:
# Another concurrent update has taken over. But there is a
# possibility for that update to be waiting for this rsrc to
# complete, hence retrigger current rsrc for latest traversal.
self._retrigger_new_traversal(cnxt, stack.current_traversal,
is_update,
stack.id, rsrc_id)
def _retrigger_new_traversal(self, cnxt, current_traversal, is_update,
stack_id, rsrc_id):
latest_stack = parser.Stack.load(cnxt, stack_id=stack_id,
force_reload=True)
if current_traversal != latest_stack.current_traversal:
self.retrigger_check_resource(cnxt, is_update, rsrc_id,
latest_stack)
def _handle_stack_timeout(self, cnxt, stack):
failure_reason = u'Timed out'
stack.mark_failed(failure_reason)
def _handle_resource_replacement(self, cnxt,
current_traversal, new_tmpl_id, requires,
rsrc, stack, adopt_stack_data):
"""Create a replacement resource and trigger a check on it."""
try:
new_res_id = rsrc.make_replacement(new_tmpl_id, requires)
except exception.UpdateInProgress:
LOG.info("No replacement created - "
"resource already locked by new traversal")
return
if new_res_id is None:
LOG.info("No replacement created - "
"new traversal already in progress")
self._retrigger_new_traversal(cnxt, current_traversal, True,
stack.id, rsrc.id)
return
LOG.info("Replacing resource with new id %s", new_res_id)
rpc_data = sync_point.serialize_input_data(self.input_data)
self._rpc_client.check_resource(cnxt,
new_res_id,
current_traversal,
rpc_data, True,
adopt_stack_data)
def _do_check_resource(self, cnxt, current_traversal, tmpl, resource_data,
is_update, rsrc, stack, adopt_stack_data):
prev_template_id = rsrc.current_template_id
try:
if is_update:
requires = set(d.primary_key for d in resource_data.values()
if d is not None)
try:
check_resource_update(rsrc, tmpl.id, requires,
self.engine_id,
stack, self.msg_queue)
except resource.UpdateReplace:
self._handle_resource_replacement(cnxt, current_traversal,
tmpl.id, requires,
rsrc, stack,
adopt_stack_data)
return False
else:
check_resource_cleanup(rsrc, tmpl.id, self.engine_id,
stack.time_remaining(), self.msg_queue)
return True
except exception.UpdateInProgress:
if self._stale_resource_needs_retry(cnxt, rsrc, prev_template_id):
rpc_data = sync_point.serialize_input_data(self.input_data)
self._rpc_client.check_resource(cnxt,
rsrc.id,
current_traversal,
rpc_data, is_update,
adopt_stack_data)
except exception.ResourceFailure as ex:
action = ex.action or rsrc.action
reason = 'Resource %s failed: %s' % (action,
six.text_type(ex))
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, reason)
except scheduler.Timeout:
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, u'Timed out')
except CancelOperation as ex:
# Stack is already marked FAILED, so we just need to retrigger
# in case a new traversal has started and is waiting on us.
self._retrigger_new_traversal(cnxt, current_traversal, is_update,
stack.id, rsrc.id)
return False
def retrigger_check_resource(self, cnxt, is_update, resource_id, stack):
current_traversal = stack.current_traversal
graph = stack.convergence_dependencies.graph()
key = (resource_id, is_update)
if is_update:
# When re-trigger received for update in latest traversal, first
# check if update key is available in graph.
# if No, then latest traversal is waiting for delete.
if (resource_id, is_update) not in graph:
key = (resource_id, not is_update)
else:
# When re-trigger received for delete in latest traversal, first
# check if update key is available in graph,
# if yes, then latest traversal is waiting for update.
if (resource_id, True) in graph:
# not is_update evaluates to True below, which means update
key = (resource_id, not is_update)
LOG.info('Re-trigger resource: (%(key1)s, %(key2)s)',
{'key1': key[0], 'key2': key[1]})
predecessors = set(graph[key])
try:
propagate_check_resource(cnxt, self._rpc_client, resource_id,
current_traversal, predecessors, key,
None, key[1], None)
except exception.EntityNotFound as e:
if e.entity != "Sync Point":
raise
def _initiate_propagate_resource(self, cnxt, resource_id,
current_traversal, is_update, rsrc,
stack):
deps = stack.convergence_dependencies
graph = deps.graph()
graph_key = parser.ConvergenceNode(resource_id, is_update)
if graph_key not in graph and rsrc.replaces is not None:
# If we are a replacement, impersonate the replaced resource for
# the purposes of calculating whether subsequent resources are
# ready, since everybody has to work from the same version of the
# graph. Our real resource ID is sent in the input_data, so the
# dependencies will get updated to point to this resource in time
# for the next traversal.
graph_key = parser.ConvergenceNode(rsrc.replaces, is_update)
def _get_input_data(req_node, input_forward_data=None):
if req_node.is_update:
if input_forward_data is None:
return rsrc.node_data().as_dict()
else:
# do not re-resolve attrs
return input_forward_data
else:
# Don't send data if initiating clean-up for self i.e.
# initiating delete of a replaced resource
if req_node.rsrc_id != graph_key.rsrc_id:
# send replaced resource as needed_by if it exists
return (rsrc.replaced_by
if rsrc.replaced_by is not None
else resource_id)
return None
try:
input_forward_data = None
for req_node in sorted(deps.required_by(graph_key),
key=lambda n: n.is_update):
input_data = _get_input_data(req_node, input_forward_data)
if req_node.is_update:
input_forward_data = input_data
propagate_check_resource(
cnxt, self._rpc_client, req_node.rsrc_id,
current_traversal, set(graph[req_node]),
graph_key, input_data, req_node.is_update,
stack.adopt_stack_data)
if is_update:
if input_forward_data is None:
# we haven't resolved attribute data for the resource,
# so clear any old attributes so they may be re-resolved
rsrc.clear_stored_attributes()
else:
rsrc.store_attributes()
check_stack_complete(cnxt, stack, current_traversal,
graph_key.rsrc_id, deps, graph_key.is_update)
except exception.EntityNotFound as e:
if e.entity == "Sync Point":
# Reload the stack to determine the current traversal, and
# check the SyncPoint for the current node to determine if
# it is ready. If it is, then retrigger the current node
# with the appropriate data for the latest traversal.
stack = parser.Stack.load(cnxt, stack_id=rsrc.stack.id,
force_reload=True)
if current_traversal == stack.current_traversal:
LOG.debug('[%s] Traversal sync point missing.',
current_traversal)
return
self.retrigger_check_resource(cnxt, is_update,
resource_id, stack)
else:
raise
def check(self, cnxt, resource_id, current_traversal,
resource_data, is_update, adopt_stack_data,
rsrc, stack):
"""Process a node in the dependency graph.
The node may be associated with either an update or a cleanup of its
associated resource.
"""
if stack.has_timed_out():
self._handle_stack_timeout(cnxt, stack)
return
tmpl = stack.t
stack.adopt_stack_data = adopt_stack_data
stack.thread_group_mgr = self.thread_group_mgr
if is_update:
if (rsrc.replaced_by is not None and
rsrc.current_template_id != tmpl.id):
LOG.debug('Resource %s with id %s already replaced by %s; '
'not checking',
rsrc.name, resource_id, rsrc.replaced_by)
return
try:
check_resource_done = self._do_check_resource(cnxt,
current_traversal,
tmpl, resource_data,
is_update,
rsrc, stack,
adopt_stack_data)
if check_resource_done:
# initiate check on next set of resources from graph
self._initiate_propagate_resource(cnxt, resource_id,
current_traversal, is_update,
rsrc, stack)
except BaseException as exc:
with excutils.save_and_reraise_exception():
msg = six.text_type(exc)
LOG.exception("Unexpected exception in resource check.")
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, msg)
def load_resource(cnxt, resource_id, resource_data,
current_traversal, is_update):
try:
return resource.Resource.load(cnxt, resource_id, current_traversal,
is_update, resource_data)
except (exception.ResourceNotFound, exception.NotFound):
# can be ignored
return None, None, None
def check_stack_complete(cnxt, stack, current_traversal, sender_id, deps,
is_update):
"""Mark the stack complete if the update is complete.
Complete is currently in the sense that all desired resources are in
service, not that superfluous ones have been cleaned up.
"""
roots = set(deps.roots())
if (sender_id, is_update) not in roots:
return
def mark_complete(stack_id, data):
stack.mark_complete()
sender_key = (sender_id, is_update)
sync_point.sync(cnxt, stack.id, current_traversal, True,
mark_complete, roots, {sender_key: None})
def propagate_check_resource(cnxt, rpc_client, next_res_id,
current_traversal, predecessors, sender_key,
sender_data, is_update, adopt_stack_data):
"""Trigger processing of node if all of its dependencies are satisfied."""
def do_check(entity_id, data):
rpc_client.check_resource(cnxt, entity_id, current_traversal,
data, is_update, adopt_stack_data)
sync_point.sync(cnxt, next_res_id, current_traversal,
is_update, do_check, predecessors,
{sender_key: sender_data})
def _check_for_message(msg_queue):
if msg_queue is None:
return
try:
message = msg_queue.get_nowait()
except eventlet.queue.Empty:
return
if message == rpc_api.THREAD_CANCEL:
raise CancelOperation
LOG.error('Unknown message "%s" received', message)
def check_resource_update(rsrc, template_id, requires, engine_id,
stack, msg_queue):
"""Create or update the Resource if appropriate."""
check_message = functools.partial(_check_for_message, msg_queue)
if rsrc.action == resource.Resource.INIT:
rsrc.create_convergence(template_id, requires, engine_id,
stack.time_remaining(), check_message)
else:
rsrc.update_convergence(template_id, requires, engine_id,
stack.time_remaining(), stack,
check_message)
def check_resource_cleanup(rsrc, template_id, engine_id,
timeout, msg_queue):
"""Delete the Resource if appropriate."""
check_message = functools.partial(_check_for_message, msg_queue)
rsrc.delete_convergence(template_id, engine_id, timeout,
check_message)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import util
from telemetry.page import gtest_test_results
from telemetry.page import page_test_results
from telemetry.page.actions import all_page_actions
from telemetry.page.actions import page_action
def _GetActionFromData(action_data):
action_name = action_data['action']
action = all_page_actions.FindClassWithName(action_name)
if not action:
logging.critical('Could not find an action named %s.', action_name)
logging.critical('Check the page set for a typo and check the error '
'log for possible Python loading/compilation errors.')
raise Exception('Action "%s" not found.' % action_name)
return action(action_data)
def GetCompoundActionFromPage(page, action_name):
if not action_name:
return []
action_data_list = getattr(page, action_name)
if not isinstance(action_data_list, list):
action_data_list = [action_data_list]
action_list = []
for subaction_data in action_data_list:
subaction_name = subaction_data['action']
if hasattr(page, subaction_name):
subaction = GetCompoundActionFromPage(page, subaction_name)
else:
subaction = [_GetActionFromData(subaction_data)]
action_list += subaction * subaction_data.get('repeat', 1)
return action_list
class Failure(Exception):
"""Exception that can be thrown from PageMeasurement to indicate an
undesired but designed-for problem."""
pass
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests."""
def __init__(self,
test_method_name,
action_name_to_run='',
needs_browser_restart_after_each_run=False,
discard_first_result=False,
clear_cache_before_each_run=False):
self.options = None
try:
self._test_method = getattr(self, test_method_name)
except AttributeError:
raise ValueError, 'No such method %s.%s' % (
self.__class_, test_method_name) # pylint: disable=E1101
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_run = (
needs_browser_restart_after_each_run)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
def NeedsBrowserRestartAfterEachRun(self, tab): # pylint: disable=W0613
"""Override to specify browser restart after each run."""
return self._needs_browser_restart_after_each_run
def AddCommandLineOptions(self, parser):
"""Override to expose command-line options for this test.
The provided parser is an optparse.OptionParser instance and accepts all
normal results. The parsed options are available in Run as
self.options."""
pass
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
pass
def CustomizeBrowserOptionsForPage(self, page, options):
"""Add options specific to the test and the given page."""
if not self.CanRunForPage(page):
return
for action in GetCompoundActionFromPage(page, self._action_name_to_run):
action.CustomizeBrowserOptions(options)
def SetUpBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
pass
def CanRunForPage(self, page): #pylint: disable=W0613
"""Override to customize if the test can be ran for the given page."""
return True
def WillRunPageSet(self, tab):
"""Override to do operations before the page set is navigated."""
pass
def DidRunPageSet(self, tab, results):
"""Override to do operations after page set is completed, but before browser
is torn down."""
pass
def DidStartHTTPServer(self, tab):
"""Override to do operations after the HTTP server is started."""
pass
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated."""
pass
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated, but before
any waiting for completion has occurred."""
pass
def WillRunAction(self, page, tab, action):
"""Override to do operations before running the action on the page."""
pass
def DidRunAction(self, page, tab, action):
"""Override to do operations after running the action on the page."""
pass
def CreatePageSet(self, args, options): # pylint: disable=W0613
"""Override to make this test generate its own page set instead of
allowing arbitrary page sets entered from the command-line."""
return None
def AddOutputOptions(self, parser):
parser.add_option('--output-format',
default=self.output_format_choices[0],
choices=self.output_format_choices,
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(self.output_format_choices))
@property
def output_format_choices(self):
"""Allowed output formats. The default is the first item in the list."""
return ['gtest', 'none']
def PrepareResults(self, options):
if not hasattr(options, 'output_format'):
options.output_format = self.output_format_choices[0]
if options.output_format == 'gtest':
return gtest_test_results.GTestTestResults()
elif options.output_format == 'none':
return page_test_results.PageTestResults()
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (options.output_format,
', '.join(self.output_format_choices)))
def Run(self, options, page, tab, results):
self.options = options
compound_action = GetCompoundActionFromPage(page, self._action_name_to_run)
self._RunCompoundAction(page, tab, compound_action)
try:
self._test_method(page, tab, results)
finally:
self.options = None
def _RunCompoundAction(self, page, tab, actions):
for i, action in enumerate(actions):
prev_action = actions[i - 1] if i > 0 else None
next_action = actions[i + 1] if i < len(actions) - 1 else None
if (action.RunsPreviousAction() and
next_action and next_action.RunsPreviousAction()):
raise page_action.PageActionFailed('Consecutive actions cannot both '
'have RunsPreviousAction() == True.')
if not (next_action and next_action.RunsPreviousAction()):
action.WillRunAction(page, tab)
self.WillRunAction(page, tab, action)
try:
action.RunAction(page, tab, prev_action)
finally:
self.DidRunAction(page, tab, action)
# Closing the connections periodically is needed; otherwise we won't be
# able to open enough sockets, and the pages will time out.
util.CloseConnections(tab)
@property
def action_name_to_run(self):
return self._action_name_to_run
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from tornado.escape import utf8, _unicode
from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
from tornado.log import gen_log
from tornado import stack_context
from tornado.tcpclient import TCPClient
from tornado.util import PY3
import base64
import collections
import copy
import functools
import re
import socket
import sys
from io import BytesIO
if PY3:
import urllib.parse as urlparse
else:
import urlparse
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine.
ssl = None
class SimpleAsyncHTTPClient(AsyncHTTPClient):
"""Non-blocking HTTP client with no external dependencies.
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
Some features found in the curl-based AsyncHTTPClient are not yet
supported. In particular, proxies are not supported, connections
are not reused, and callers cannot select the network interface to be
used.
"""
def initialize(self, max_clients=10,
hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, max_header_size=None,
max_body_size=None):
"""Creates a AsyncHTTPClient.
Only a single AsyncHTTPClient instance exists per IOLoop
in order to provide limitations on the number of pending connections.
``force_instance=True`` may be used to suppress this behavior.
Note that because of this implicit reuse, unless ``force_instance``
is used, only the first call to the constructor actually uses
its arguments. It is recommended to use the ``configure`` method
instead of the constructor to ensure that arguments take effect.
``max_clients`` is the number of concurrent requests that can be
in progress; when this limit is reached additional requests will be
queued. Note that time spent waiting in this queue still counts
against the ``request_timeout``.
``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
It can be used to make local DNS changes when modifying system-wide
settings like ``/etc/hosts`` is not possible or desirable (e.g. in
unittests).
``max_buffer_size`` (default 100MB) is the number of bytes
that can be read into memory at once. ``max_body_size``
(defaults to ``max_buffer_size``) is the largest response body
that the client will accept. Without a
``streaming_callback``, the smaller of these two limits
applies; with a ``streaming_callback`` only ``max_body_size``
does.
.. versionchanged:: 4.2
Added the ``max_body_size`` argument.
"""
super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
self.max_clients = max_clients
self.queue = collections.deque()
self.active = {}
self.waiting = {}
self.max_buffer_size = max_buffer_size
self.max_header_size = max_header_size
self.max_body_size = max_body_size
# TCPClient could create a Resolver for us, but we have to do it
# ourselves to support hostname_mapping.
if resolver:
self.resolver = resolver
self.own_resolver = False
else:
self.resolver = Resolver()
self.own_resolver = True
if hostname_mapping is not None:
self.resolver = OverrideResolver(resolver=self.resolver,
mapping=hostname_mapping)
self.tcp_client = TCPClient(resolver=self.resolver)
def close(self):
super(SimpleAsyncHTTPClient, self).close()
if self.own_resolver:
self.resolver.close()
self.tcp_client.close()
def fetch_impl(self, request, callback):
key = object()
self.queue.append((key, request, callback))
if not len(self.active) < self.max_clients:
timeout_handle = self.io_loop.add_timeout(
self.io_loop.time() + min(request.connect_timeout,
request.request_timeout),
functools.partial(self._on_timeout, key, "in request queue"))
else:
timeout_handle = None
self.waiting[key] = (request, callback, timeout_handle)
self._process_queue()
if self.queue:
gen_log.debug("max_clients limit reached, request queued. "
"%d active, %d queued requests." % (
len(self.active), len(self.queue)))
def _process_queue(self):
with stack_context.NullContext():
while self.queue and len(self.active) < self.max_clients:
key, request, callback = self.queue.popleft()
if key not in self.waiting:
continue
self._remove_timeout(key)
self.active[key] = (request, callback)
release_callback = functools.partial(self._release_fetch, key)
self._handle_request(request, release_callback, callback)
def _connection_class(self):
return _HTTPConnection
def _handle_request(self, request, release_callback, final_callback):
self._connection_class()(
self, request, release_callback,
final_callback, self.max_buffer_size, self.tcp_client,
self.max_header_size, self.max_body_size)
def _release_fetch(self, key):
del self.active[key]
self._process_queue()
def _remove_timeout(self, key):
if key in self.waiting:
request, callback, timeout_handle = self.waiting[key]
if timeout_handle is not None:
self.io_loop.remove_timeout(timeout_handle)
del self.waiting[key]
def _on_timeout(self, key, info=None):
"""Timeout callback of request.
Construct a timeout HTTPResponse when a timeout occurs.
:arg object key: A simple object to mark the request.
:info string key: More detailed timeout information.
"""
request, callback, timeout_handle = self.waiting[key]
self.queue.remove((key, request, callback))
error_message = "Timeout {0}".format(info) if info else "Timeout"
timeout_response = HTTPResponse(
request, 599, error=HTTPError(599, error_message),
request_time=self.io_loop.time() - request.start_time)
self.io_loop.add_callback(callback, timeout_response)
del self.waiting[key]
class _HTTPConnection(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(self, client, request, release_callback,
final_callback, max_buffer_size, tcp_client,
max_header_size, max_body_size):
self.io_loop = IOLoop.current()
self.start_time = self.io_loop.time()
self.client = client
self.request = request
self.release_callback = release_callback
self.final_callback = final_callback
self.max_buffer_size = max_buffer_size
self.tcp_client = tcp_client
self.max_header_size = max_header_size
self.max_body_size = max_body_size
self.code = None
self.headers = None
self.chunks = []
self._decompressor = None
# Timeout handle returned by IOLoop.add_timeout
self._timeout = None
self._sockaddr = None
with stack_context.ExceptionStackContext(self._handle_exception):
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
if self.parsed.scheme not in ("http", "https"):
raise ValueError("Unsupported url scheme: %s" %
self.request.url)
# urlsplit results have hostname and port results, but they
# didn't support ipv6 literals until python 2.7.
netloc = self.parsed.netloc
if "@" in netloc:
userpass, _, netloc = netloc.rpartition("@")
host, port = httputil.split_host_and_port(netloc)
if port is None:
port = 443 if self.parsed.scheme == "https" else 80
if re.match(r'^\[.*\]$', host):
# raw ipv6 addresses in urls are enclosed in brackets
host = host[1:-1]
self.parsed_hostname = host # save final host for _on_connect
if request.allow_ipv6 is False:
af = socket.AF_INET
else:
af = socket.AF_UNSPEC
ssl_options = self._get_ssl_options(self.parsed.scheme)
timeout = min(self.request.connect_timeout, self.request.request_timeout)
if timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + timeout,
stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
self.tcp_client.connect(host, port, af=af,
ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size,
callback=self._on_connect)
def _get_ssl_options(self, scheme):
if scheme == "https":
if self.request.ssl_options is not None:
return self.request.ssl_options
# If we are using the defaults, don't construct a
# new SSLContext.
if (self.request.validate_cert and
self.request.ca_certs is None and
self.request.client_cert is None and
self.request.client_key is None):
return _client_ssl_defaults
ssl_ctx = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH,
cafile=self.request.ca_certs)
if not self.request.validate_cert:
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
if self.request.client_cert is not None:
ssl_ctx.load_cert_chain(self.request.client_cert,
self.request.client_key)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# See netutil.ssl_options_to_context
ssl_ctx.options |= ssl.OP_NO_COMPRESSION
return ssl_ctx
return None
def _on_timeout(self, info=None):
"""Timeout callback of _HTTPConnection instance.
Raise a timeout HTTPError when a timeout occurs.
:info string key: More detailed timeout information.
"""
self._timeout = None
error_message = "Timeout {0}".format(info) if info else "Timeout"
if self.final_callback is not None:
raise HTTPError(599, error_message)
def _remove_timeout(self):
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _on_connect(self, stream):
if self.final_callback is None:
# final_callback is cleared if we've hit our timeout.
stream.close()
return
self.stream = stream
self.stream.set_close_callback(self.on_connection_close)
self._remove_timeout()
if self.final_callback is None:
return
if self.request.request_timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + self.request.request_timeout,
stack_context.wrap(functools.partial(self._on_timeout, "during request")))
if (self.request.method not in self._SUPPORTED_METHODS and
not self.request.allow_nonstandard_methods):
raise KeyError("unknown method %s" % self.request.method)
for key in ('network_interface',
'proxy_host', 'proxy_port',
'proxy_username', 'proxy_password',
'proxy_auth_mode'):
if getattr(self.request, key, None):
raise NotImplementedError('%s not supported' % key)
if "Connection" not in self.request.headers:
self.request.headers["Connection"] = "close"
if "Host" not in self.request.headers:
if '@' in self.parsed.netloc:
self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
else:
self.request.headers["Host"] = self.parsed.netloc
username, password = None, None
if self.parsed.username is not None:
username, password = self.parsed.username, self.parsed.password
elif self.request.auth_username is not None:
username = self.request.auth_username
password = self.request.auth_password or ''
if username is not None:
if self.request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
self.request.auth_mode)
auth = utf8(username) + b":" + utf8(password)
self.request.headers["Authorization"] = (b"Basic " +
base64.b64encode(auth))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
if not self.request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
body_expected = self.request.method in ("POST", "PATCH", "PUT")
body_present = (self.request.body is not None or
self.request.body_producer is not None)
if ((body_expected and not body_present) or
(body_present and not body_expected)):
raise ValueError(
'Body must %sbe None for method %s (unless '
'allow_nonstandard_methods is true)' %
('not ' if body_expected else '', self.request.method))
if self.request.expect_100_continue:
self.request.headers["Expect"] = "100-continue"
if self.request.body is not None:
# When body_producer is used the caller is responsible for
# setting Content-Length (or else chunked encoding will be used).
self.request.headers["Content-Length"] = str(len(
self.request.body))
if (self.request.method == "POST" and
"Content-Type" not in self.request.headers):
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
if self.request.decompress_response:
self.request.headers["Accept-Encoding"] = "gzip"
req_path = ((self.parsed.path or '/') +
(('?' + self.parsed.query) if self.parsed.query else ''))
self.connection = self._create_connection(stream)
start_line = httputil.RequestStartLine(self.request.method,
req_path, '')
self.connection.write_headers(start_line, self.request.headers)
if self.request.expect_100_continue:
self._read_response()
else:
self._write_body(True)
def _create_connection(self, stream):
stream.set_nodelay(True)
connection = HTTP1Connection(
stream, True,
HTTP1ConnectionParameters(
no_keep_alive=True,
max_header_size=self.max_header_size,
max_body_size=self.max_body_size,
decompress=self.request.decompress_response),
self._sockaddr)
return connection
def _write_body(self, start_read):
if self.request.body is not None:
self.connection.write(self.request.body)
elif self.request.body_producer is not None:
fut = self.request.body_producer(self.connection.write)
if fut is not None:
fut = gen.convert_yielded(fut)
def on_body_written(fut):
fut.result()
self.connection.finish()
if start_read:
self._read_response()
self.io_loop.add_future(fut, on_body_written)
return
self.connection.finish()
if start_read:
self._read_response()
def _read_response(self):
# Ensure that any exception raised in read_response ends up in our
# stack context.
self.io_loop.add_future(
self.connection.read_response(self),
lambda f: f.result())
def _release(self):
if self.release_callback is not None:
release_callback = self.release_callback
self.release_callback = None
release_callback()
def _run_callback(self, response):
self._release()
if self.final_callback is not None:
final_callback = self.final_callback
self.final_callback = None
self.io_loop.add_callback(final_callback, response)
def _handle_exception(self, typ, value, tb):
if self.final_callback:
self._remove_timeout()
if isinstance(value, StreamClosedError):
if value.real_error is None:
value = HTTPError(599, "Stream closed")
else:
value = value.real_error
self._run_callback(HTTPResponse(self.request, 599, error=value,
request_time=self.io_loop.time() - self.start_time,
))
if hasattr(self, "stream"):
# TODO: this may cause a StreamClosedError to be raised
# by the connection's Future. Should we cancel the
# connection more gracefully?
self.stream.close()
return True
else:
# If our callback has already been called, we are probably
# catching an exception that is not caused by us but rather
# some child of our callback. Rather than drop it on the floor,
# pass it along, unless it's just the stream being closed.
return isinstance(value, StreamClosedError)
def on_connection_close(self):
if self.final_callback is not None:
message = "Connection closed"
if self.stream.error:
raise self.stream.error
try:
raise HTTPError(599, message)
except HTTPError:
self._handle_exception(*sys.exc_info())
def headers_received(self, first_line, headers):
if self.request.expect_100_continue and first_line.code == 100:
self._write_body(False)
return
self.code = first_line.code
self.reason = first_line.reason
self.headers = headers
if self._should_follow_redirect():
return
if self.request.header_callback is not None:
# Reassemble the start line.
self.request.header_callback('%s %s %s\r\n' % first_line)
for k, v in self.headers.get_all():
self.request.header_callback("%s: %s\r\n" % (k, v))
self.request.header_callback('\r\n')
def _should_follow_redirect(self):
return (self.request.follow_redirects and
self.request.max_redirects > 0 and
self.code in (301, 302, 303, 307, 308))
def finish(self):
data = b''.join(self.chunks)
self._remove_timeout()
original_request = getattr(self.request, "original_request",
self.request)
if self._should_follow_redirect():
assert isinstance(self.request, _RequestProxy)
new_request = copy.copy(self.request.request)
new_request.url = urlparse.urljoin(self.request.url,
self.headers["Location"])
new_request.max_redirects = self.request.max_redirects - 1
del new_request.headers["Host"]
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
# Client SHOULD make a GET request after a 303.
# According to the spec, 302 should be followed by the same
# method as the original request, but in practice browsers
# treat 302 the same as 303, and many servers use 302 for
# compatibility with pre-HTTP/1.1 user agents which don't
# understand the 303 status.
if self.code in (302, 303):
new_request.method = "GET"
new_request.body = None
for h in ["Content-Length", "Content-Type",
"Content-Encoding", "Transfer-Encoding"]:
try:
del self.request.headers[h]
except KeyError:
pass
new_request.original_request = original_request
final_callback = self.final_callback
self.final_callback = None
self._release()
self.client.fetch(new_request, final_callback)
self._on_end_request()
return
if self.request.streaming_callback:
buffer = BytesIO()
else:
buffer = BytesIO(data) # TODO: don't require one big string?
response = HTTPResponse(original_request,
self.code, reason=getattr(self, 'reason', None),
headers=self.headers,
request_time=self.io_loop.time() - self.start_time,
buffer=buffer,
effective_url=self.request.url)
self._run_callback(response)
self._on_end_request()
def _on_end_request(self):
self.stream.close()
def data_received(self, chunk):
if self._should_follow_redirect():
# We're going to follow a redirect so just discard the body.
return
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
self.chunks.append(chunk)
if __name__ == "__main__":
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
main()
|
|
from functools import wraps
from flask import Blueprint, render_template, abort, g
from flask.ext.admin import babel
from flask.ext.admin._compat import with_metaclass
from flask.ext.admin import helpers as h
# For compatibility reasons import MenuLink
from flask.ext.admin.menu import MenuCategory, MenuView, MenuLink
def expose(url='/', methods=('GET',)):
"""
Use this decorator to expose views in your view classes.
:param url:
Relative URL for the view
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, '_urls'):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
def expose_plugview(url='/'):
"""
Decorator to expose Flask's pluggable view classes
(``flask.views.View`` or ``flask.views.MethodView``).
:param url:
Relative URL for the view
.. versionadded:: 1.0.4
"""
def wrap(v):
handler = expose(url, v.methods)
if hasattr(v, 'as_view'):
return handler(v.as_view(v.__name__))
else:
return handler(v)
return wrap
# Base views
def _wrap_view(f):
@wraps(f)
def inner(self, *args, **kwargs):
# Store current admin view
h.set_current_view(self)
# Check if administrative piece is accessible
abort = self._handle_view(f.__name__, **kwargs)
if abort is not None:
return abort
return f(self, *args, **kwargs)
return inner
class AdminViewMeta(type):
"""
View metaclass.
Does some precalculations (like getting list of view methods from the class) to avoid
calculating them for each view class instance.
"""
def __init__(cls, classname, bases, fields):
type.__init__(cls, classname, bases, fields)
# Gather exposed views
cls._urls = []
cls._default_view = None
for p in dir(cls):
attr = getattr(cls, p)
if hasattr(attr, '_urls'):
# Collect methods
for url, methods in attr._urls:
cls._urls.append((url, p, methods))
if url == '/':
cls._default_view = p
# Wrap views
setattr(cls, p, _wrap_view(attr))
class BaseViewClass(object):
pass
class BaseView(with_metaclass(AdminViewMeta, BaseViewClass)):
"""
Base administrative view.
Derive from this class to implement your administrative interface piece. For example::
class MyView(BaseView):
@expose('/')
def index(self):
return 'Hello World!'
"""
@property
def _template_args(self):
"""
Extra template arguments.
If you need to pass some extra parameters to the template,
you can override particular view function, contribute
arguments you want to pass to the template and call parent view.
These arguments are local for this request and will be discarded
in the next request.
Any value passed through ``_template_args`` will override whatever
parent view function passed to the template.
For example::
class MyAdmin(ModelView):
@expose('/')
def index(self):
self._template_args['name'] = 'foobar'
self._template_args['code'] = '12345'
super(MyAdmin, self).index()
"""
args = getattr(g, '_admin_template_args', None)
if args is None:
args = g._admin_template_args = dict()
return args
def __init__(self, name=None, category=None, endpoint=None, url=None,
static_folder=None, static_url_path=None):
"""
Constructor.
:param name:
Name of this view. If not provided, will default to the class name.
:param category:
View category. If not provided, this view will be shown as a top-level menu item. Otherwise, it will
be in a submenu.
:param endpoint:
Base endpoint name for the view. For example, if there's a view method called "index" and
endpoint is set to "myadmin", you can use `url_for('myadmin.index')` to get the URL to the
view method. Defaults to the class name in lower case.
:param url:
Base URL. If provided, affects how URLs are generated. For example, if the url parameter
is "test", the resulting URL will look like "/admin/test/". If not provided, will
use endpoint as a base url. However, if URL starts with '/', absolute path is assumed
and '/admin/' prefix won't be applied.
:param static_url_path:
Static URL Path. If provided, this specifies the path to the static url directory.
:param debug:
Optional debug flag. If set to `True`, will rethrow exceptions in some cases, so Werkzeug
debugger can catch them.
"""
self.name = name
self.category = category
self.endpoint = endpoint
self.url = url
self.static_folder = static_folder
self.static_url_path = static_url_path
# Initialized from create_blueprint
self.admin = None
self.blueprint = None
# Default view
if self._default_view is None:
raise Exception(u'Attempted to instantiate admin view %s without default view' % self.__class__.__name__)
def create_blueprint(self, admin):
"""
Create Flask blueprint.
"""
# Store admin instance
self.admin = admin
# If endpoint name is not provided, get it from the class name
if self.endpoint is None:
self.endpoint = self.__class__.__name__.lower()
# If the static_url_path is not provided, use the admin's
if not self.static_url_path:
self.static_url_path = admin.static_url_path
# If url is not provided, generate it from endpoint name
if self.url is None:
if self.admin.url != '/':
self.url = '%s/%s' % (self.admin.url, self.endpoint)
else:
if self == admin.index_view:
self.url = '/'
else:
self.url = '/%s' % self.endpoint
else:
if not self.url.startswith('/'):
self.url = '%s/%s' % (self.admin.url, self.url)
# If we're working from the root of the site, set prefix to None
if self.url == '/':
self.url = None
# If name is not povided, use capitalized endpoint name
if self.name is None:
self.name = self._prettify_class_name(self.__class__.__name__)
# Create blueprint and register rules
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.url,
subdomain=self.admin.subdomain,
template_folder='templates',
static_folder=self.static_folder,
static_url_path=self.static_url_path)
for url, name, methods in self._urls:
self.blueprint.add_url_rule(url,
name,
getattr(self, name),
methods=methods)
return self.blueprint
def render(self, template, **kwargs):
"""
Render template
:param template:
Template path to render
:param kwargs:
Template arguments
"""
# Store self as admin_view
kwargs['admin_view'] = self
kwargs['admin_base_template'] = self.admin.base_template
# Provide i18n support even if flask-babel is not installed
# or enabled.
kwargs['_gettext'] = babel.gettext
kwargs['_ngettext'] = babel.ngettext
kwargs['h'] = h
# Contribute extra arguments
kwargs.update(self._template_args)
return render_template(template, **kwargs)
def _prettify_class_name(self, name):
"""
Split words in PascalCase string into separate words.
:param name:
String to prettify
"""
return h.prettify_class_name(name)
def is_visible(self):
"""
Override this method if you want dynamically hide or show administrative views
from Flask-Admin menu structure
By default, item is visible in menu.
Please note that item should be both visible and accessible to be displayed in menu.
"""
return True
def is_accessible(self):
"""
Override this method to add permission checks.
Flask-Admin does not make any assumptions about the authentication system used in your application, so it is
up to you to implement it.
By default, it will allow access for everyone.
"""
return True
def _handle_view(self, name, **kwargs):
"""
This method will be executed before calling any view method.
By default, it will check if the admin class is accessible and if it is not it will
throw HTTP 404 error.
:param name:
View function name
:param kwargs:
View function arguments
"""
if not self.is_accessible():
return abort(403)
@property
def _debug(self):
if not self.admin or not self.admin.app:
return False
return self.admin.app.debug
class AdminIndexView(BaseView):
"""
Default administrative interface index page when visiting the ``/admin/`` URL.
It can be overridden by passing your own view class to the ``Admin`` constructor::
class MyHomeView(AdminIndexView):
@expose('/')
def index(self):
arg1 = 'Hello'
return render_template('adminhome.html', arg1=arg1)
admin = Admin(index_view=MyHomeView())
Default values for the index page are:
* If a name is not provided, 'Home' will be used.
* If an endpoint is not provided, will default to ``admin``
* Default URL route is ``/admin``.
* Automatically associates with static folder.
* Default template is ``admin/index.html``
"""
def __init__(self, name=None, category=None,
endpoint=None, url=None,
template='admin/index.html'):
super(AdminIndexView, self).__init__(name or babel.lazy_gettext('Home'),
category,
endpoint or 'admin',
url or '/admin',
'static')
self._template = template
@expose()
def index(self):
return self.render(self._template)
class Admin(object):
"""
Collection of the admin views. Also manages menu structure.
"""
def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
# Add predefined index view
self.add_view(self.index_view)
# Register with application
if app is not None:
self._init_extension()
def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self._add_menu_item(link, link.category)
else:
self._menu_links.append(link)
def _add_menu_item(self, menu_item, target_category):
"""
Add a view to the menu tree
:param view:
View to add
"""
if target_category:
category = self._menu_categories.get(target_category)
if category is None:
category = MenuCategory(target_category)
self._menu_categories[target_category] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
def _add_view_to_menu(self, view):
self._add_menu_item(MenuView(view.name, view), view.category)
def init_app(self, app):
"""
Register all views with the Flask application.
:param app:
Flask application instance
"""
self.app = app
self._init_extension()
# Register views
for view in self._views:
app.register_blueprint(view.create_blueprint(self))
def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
def menu(self):
"""
Return the menu hierarchy.
"""
return self._menu
def menu_links(self):
"""
Return menu links.
"""
return self._menu_links
|
|
#!/usr/bin/python
#
# sslsniff Captures data on read/recv or write/send functions of OpenSSL,
# GnuTLS and NSS
# For Linux, uses BCC, eBPF.
#
# USAGE: sslsniff.py [-h] [-p PID] [-u UID] [-x] [-c COMM] [-o] [-g] [-n] [-d]
# [--hexdump] [--max-buffer-size SIZE] [-l] [--handshake]
#
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 12-Aug-2016 Adrian Lopez Created this.
# 13-Aug-2016 Mark Drayton Fix SSL_Read
# 17-Aug-2016 Adrian Lopez Capture GnuTLS and add options
#
from __future__ import print_function
from bcc import BPF
import argparse
import binascii
import textwrap
import os.path
# arguments
examples = """examples:
./sslsniff # sniff OpenSSL and GnuTLS functions
./sslsniff -p 181 # sniff PID 181 only
./sslsniff -u 1000 # sniff only UID 1000
./sslsniff -c curl # sniff curl command only
./sslsniff --no-openssl # don't show OpenSSL calls
./sslsniff --no-gnutls # don't show GnuTLS calls
./sslsniff --no-nss # don't show NSS calls
./sslsniff --hexdump # show data as hex instead of trying to decode it as UTF-8
./sslsniff -x # show process UID and TID
./sslsniff -l # show function latency
./sslsniff -l --handshake # show SSL handshake latency
./sslsniff --extra-lib openssl:/path/libssl.so.1.1 # sniff extra library
"""
def ssllib_type(input_str):
valid_types = frozenset(['openssl', 'gnutls', 'nss'])
try:
lib_type, lib_path = input_str.split(':', 1)
except ValueError:
raise argparse.ArgumentTypeError("Invalid SSL library param: %r" % input_str)
if lib_type not in valid_types:
raise argparse.ArgumentTypeError("Invalid SSL library type: %r" % lib_type)
if not os.path.isfile(lib_path):
raise argparse.ArgumentTypeError("Invalid library path: %r" % lib_path)
return lib_type, lib_path
parser = argparse.ArgumentParser(
description="Sniff SSL data",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, help="sniff this PID only.")
parser.add_argument("-u", "--uid", type=int, default=None,
help="sniff this UID only.")
parser.add_argument("-x", "--extra", action="store_true",
help="show extra fields (UID, TID)")
parser.add_argument("-c", "--comm",
help="sniff only commands matching string.")
parser.add_argument("-o", "--no-openssl", action="store_false", dest="openssl",
help="do not show OpenSSL calls.")
parser.add_argument("-g", "--no-gnutls", action="store_false", dest="gnutls",
help="do not show GnuTLS calls.")
parser.add_argument("-n", "--no-nss", action="store_false", dest="nss",
help="do not show NSS calls.")
parser.add_argument('-d', '--debug', dest='debug', action='count', default=0,
help='debug mode.')
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("--hexdump", action="store_true", dest="hexdump",
help="show data as hexdump instead of trying to decode it as UTF-8")
parser.add_argument('--max-buffer-size', type=int, default=8192,
help='Size of captured buffer')
parser.add_argument("-l", "--latency", action="store_true",
help="show function latency")
parser.add_argument("--handshake", action="store_true",
help="show SSL handshake latency, enabled only if latency option is on.")
parser.add_argument("--extra-lib", type=ssllib_type, action='append',
help="Intercept calls from extra library (format: lib_type:lib_path)")
args = parser.parse_args()
prog = """
#include <linux/ptrace.h>
#include <linux/sched.h> /* For TASK_COMM_LEN */
#define MAX_BUF_SIZE __MAX_BUF_SIZE__
struct probe_SSL_data_t {
u64 timestamp_ns;
u64 delta_ns;
u32 pid;
u32 tid;
u32 uid;
u32 len;
int buf_filled;
int rw;
char comm[TASK_COMM_LEN];
u8 buf[MAX_BUF_SIZE];
};
#define BASE_EVENT_SIZE ((size_t)(&((struct probe_SSL_data_t*)0)->buf))
#define EVENT_SIZE(X) (BASE_EVENT_SIZE + ((size_t)(X)))
BPF_PERCPU_ARRAY(ssl_data, struct probe_SSL_data_t, 1);
BPF_PERF_OUTPUT(perf_SSL_rw);
BPF_HASH(start_ns, u32);
BPF_HASH(bufs, u32, u64);
int probe_SSL_rw_enter(struct pt_regs *ctx, void *ssl, void *buf, int num) {
int ret;
u32 zero = 0;
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = pid_tgid;
u32 uid = bpf_get_current_uid_gid();
u64 ts = bpf_ktime_get_ns();
PID_FILTER
UID_FILTER
bufs.update(&tid, (u64*)&buf);
start_ns.update(&tid, &ts);
return 0;
}
static int SSL_exit(struct pt_regs *ctx, int rw) {
int ret;
u32 zero = 0;
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
u32 uid = bpf_get_current_uid_gid();
u64 ts = bpf_ktime_get_ns();
PID_FILTER
UID_FILTER
u64 *bufp = bufs.lookup(&tid);
if (bufp == 0)
return 0;
u64 *tsp = start_ns.lookup(&tid);
if (tsp == 0)
return 0;
int len = PT_REGS_RC(ctx);
if (len <= 0) // no data
return 0;
struct probe_SSL_data_t *data = ssl_data.lookup(&zero);
if (!data)
return 0;
data->timestamp_ns = ts;
data->delta_ns = ts - *tsp;
data->pid = pid;
data->tid = tid;
data->uid = uid;
data->len = (u32)len;
data->buf_filled = 0;
data->rw = rw;
u32 buf_copy_size = min((size_t)MAX_BUF_SIZE, (size_t)len);
bpf_get_current_comm(&data->comm, sizeof(data->comm));
if (bufp != 0)
ret = bpf_probe_read_user(&data->buf, buf_copy_size, (char *)*bufp);
bufs.delete(&tid);
start_ns.delete(&tid);
if (!ret)
data->buf_filled = 1;
else
buf_copy_size = 0;
perf_SSL_rw.perf_submit(ctx, data, EVENT_SIZE(buf_copy_size));
return 0;
}
int probe_SSL_read_exit(struct pt_regs *ctx) {
return (SSL_exit(ctx, 0));
}
int probe_SSL_write_exit(struct pt_regs *ctx) {
return (SSL_exit(ctx, 1));
}
BPF_PERF_OUTPUT(perf_SSL_do_handshake);
int probe_SSL_do_handshake_enter(struct pt_regs *ctx, void *ssl) {
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
u64 ts = bpf_ktime_get_ns();
PID_FILTER
UID_FILTER
start_ns.update(&tid, &ts);
return 0;
}
int probe_SSL_do_handshake_exit(struct pt_regs *ctx) {
u32 zero = 0;
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
u32 uid = bpf_get_current_uid_gid();
u64 ts = bpf_ktime_get_ns();
int ret;
PID_FILTER
UID_FILTER
u64 *tsp = start_ns.lookup(&tid);
if (tsp == 0)
return 0;
ret = PT_REGS_RC(ctx);
if (ret <= 0) // handshake failed
return 0;
struct probe_SSL_data_t *data = ssl_data.lookup(&zero);
if (!data)
return 0;
data->timestamp_ns = ts;
data->delta_ns = ts - *tsp;
data->pid = pid;
data->tid = tid;
data->uid = uid;
data->len = ret;
data->buf_filled = 0;
data->rw = 2;
bpf_get_current_comm(&data->comm, sizeof(data->comm));
start_ns.delete(&tid);
perf_SSL_do_handshake.perf_submit(ctx, data, EVENT_SIZE(0));
return 0;
}
"""
if args.pid:
prog = prog.replace('PID_FILTER', 'if (pid != %d) { return 0; }' % args.pid)
else:
prog = prog.replace('PID_FILTER', '')
if args.uid is not None:
prog = prog.replace('UID_FILTER', 'if (uid != %d) { return 0; }' % args.uid)
else:
prog = prog.replace('UID_FILTER', '')
prog = prog.replace('__MAX_BUF_SIZE__', str(args.max_buffer_size))
if args.debug or args.ebpf:
print(prog)
if args.ebpf:
exit()
b = BPF(text=prog)
# It looks like SSL_read's arguments aren't available in a return probe so you
# need to stash the buffer address in a map on the function entry and read it
# on its exit (Mark Drayton)
#
def attach_openssl(lib):
b.attach_uprobe(name=lib, sym="SSL_write",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="SSL_write",
fn_name="probe_SSL_write_exit", pid=args.pid or -1)
b.attach_uprobe(name=lib, sym="SSL_read",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="SSL_read",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
if args.latency and args.handshake:
b.attach_uprobe(name="ssl", sym="SSL_do_handshake",
fn_name="probe_SSL_do_handshake_enter", pid=args.pid or -1)
b.attach_uretprobe(name="ssl", sym="SSL_do_handshake",
fn_name="probe_SSL_do_handshake_exit", pid=args.pid or -1)
def attach_gnutls(lib):
b.attach_uprobe(name=lib, sym="gnutls_record_send",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="gnutls_record_send",
fn_name="probe_SSL_write_exit", pid=args.pid or -1)
b.attach_uprobe(name=lib, sym="gnutls_record_recv",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="gnutls_record_recv",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
def attach_nss(lib):
b.attach_uprobe(name=lib, sym="PR_Write",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="PR_Write",
fn_name="probe_SSL_write_exit", pid=args.pid or -1)
b.attach_uprobe(name=lib, sym="PR_Send",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="PR_Send",
fn_name="probe_SSL_write_exit", pid=args.pid or -1)
b.attach_uprobe(name=lib, sym="PR_Read",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="PR_Read",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
b.attach_uprobe(name=lib, sym="PR_Recv",
fn_name="probe_SSL_rw_enter", pid=args.pid or -1)
b.attach_uretprobe(name=lib, sym="PR_Recv",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
LIB_TRACERS = {
"openssl": attach_openssl,
"gnutls": attach_gnutls,
"nss": attach_nss,
}
if args.openssl:
attach_openssl("ssl")
if args.gnutls:
attach_gnutls("gnutls")
if args.nss:
attach_nss("nspr4")
if args.extra_lib:
for lib_type, lib_path in args.extra_lib:
LIB_TRACERS[lib_type](lib_path)
# define output data structure in Python
# header
header = "%-12s %-18s %-16s %-7s %-6s" % ("FUNC", "TIME(s)", "COMM", "PID", "LEN")
if args.extra:
header += " %-7s %-7s" % ("UID", "TID")
if args.latency:
header += " %-7s" % ("LAT(ms)")
print(header)
# process event
start = 0
def print_event_rw(cpu, data, size):
print_event(cpu, data, size, "perf_SSL_rw")
def print_event_handshake(cpu, data, size):
print_event(cpu, data, size, "perf_SSL_do_handshake")
def print_event(cpu, data, size, evt):
global start
event = b[evt].event(data)
if event.len <= args.max_buffer_size:
buf_size = event.len
else:
buf_size = args.max_buffer_size
if event.buf_filled == 1:
buf = bytearray(event.buf[:buf_size])
else:
buf_size = 0
buf = b""
# Filter events by command
if args.comm:
if not args.comm == event.comm.decode('utf-8', 'replace'):
return
if start == 0:
start = event.timestamp_ns
time_s = (float(event.timestamp_ns - start)) / 1000000000
lat_str = "%.3f" % (event.delta_ns / 1000000) if event.delta_ns else "N/A"
s_mark = "-" * 5 + " DATA " + "-" * 5
e_mark = "-" * 5 + " END DATA " + "-" * 5
truncated_bytes = event.len - buf_size
if truncated_bytes > 0:
e_mark = "-" * 5 + " END DATA (TRUNCATED, " + str(truncated_bytes) + \
" bytes lost) " + "-" * 5
base_fmt = "%(func)-12s %(time)-18.9f %(comm)-16s %(pid)-7d %(len)-6d"
if args.extra:
base_fmt += " %(uid)-7d %(tid)-7d"
if args.latency:
base_fmt += " %(lat)-7s"
fmt = ''.join([base_fmt, "\n%(begin)s\n%(data)s\n%(end)s\n\n"])
if args.hexdump:
unwrapped_data = binascii.hexlify(buf)
data = textwrap.fill(unwrapped_data.decode('utf-8', 'replace'), width=32)
else:
data = buf.decode('utf-8', 'replace')
rw_event = {
0: "READ/RECV",
1: "WRITE/SEND",
2: "HANDSHAKE"
}
fmt_data = {
'func': rw_event[event.rw],
'time': time_s,
'lat': lat_str,
'comm': event.comm.decode('utf-8', 'replace'),
'pid': event.pid,
'tid': event.tid,
'uid': event.uid,
'len': event.len,
'begin': s_mark,
'end': e_mark,
'data': data
}
# use base_fmt if no buf filled
if buf_size == 0:
print(base_fmt % fmt_data)
else:
print(fmt % fmt_data)
b["perf_SSL_rw"].open_perf_buffer(print_event_rw)
b["perf_SSL_do_handshake"].open_perf_buffer(print_event_handshake)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
import sys
# Local Imports
from resource_management import get_bare_principal
from status_params import *
from resource_management import format_stack_version, Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
def configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol):
"""
Return a dictionary of additional configs to merge if Atlas HA is enabled.
:param atlas_hosts: List of hostnames that contain Atlas
:param metadata_port: Port number
:param is_atlas_ha_enabled: None, True, or False
:param metadata_protocol: http or https
:return: Dictionary with additional configs to merge to application-properties if HA is enabled.
"""
additional_props = {}
if atlas_hosts is None or len(atlas_hosts) == 0 or metadata_port is None:
return additional_props
# Sort to guarantee each host sees the same values, assuming restarted at the same time.
atlas_hosts = sorted(atlas_hosts)
# E.g., id1,id2,id3,...,idn
_server_id_list = ["id" + str(i) for i in range(1, len(atlas_hosts) + 1)]
atlas_server_ids = ",".join(_server_id_list)
additional_props["atlas.server.ids"] = atlas_server_ids
i = 0
for curr_hostname in atlas_hosts:
id = _server_id_list[i]
prop_name = "atlas.server.address." + id
prop_value = curr_hostname + ":" + metadata_port
additional_props[prop_name] = prop_value
if "atlas.rest.address" in additional_props:
additional_props["atlas.rest.address"] += "," + metadata_protocol + "://" + prop_value
else:
additional_props["atlas.rest.address"] = metadata_protocol + "://" + prop_value
i += 1
# This may override the existing property
if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
additional_props["atlas.server.ha.enabled"] = "false"
elif i > 1:
additional_props["atlas.server.ha.enabled"] = "true"
return additional_props
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
java_version = expect("/hostLevelParams/java_version", int)
zk_root = default('/configurations/application-properties/atlas.server.ha.zookeeper.zkroot', '/apache_atlas')
stack_supports_zk_security = True
atlas_kafka_group_id = default('/configurations/application-properties/atlas.kafka.hook.group.id', None)
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
_atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# stack version
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
metadata_home = format('{stack_root}/atlas-server')
metadata_bin = format("{metadata_home}/bin")
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
metadata_start_script = format("{metadata_bin}/atlas_start.py")
metadata_stop_script = format("{metadata_bin}/atlas_stop.py")
# metadata local directory structure
log_dir = config['configurations']['atlas-env']['metadata_log_dir']
# service locations
hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf'
# some commands may need to supply the JAAS location when running as atlas
atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf")
# user
user_group = config['configurations']['cluster-env']['user_group']
# metadata env
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
env_sh_template = config['configurations']['atlas-env']['content']
# credential provider
credential_provider = format( "jceks://file@{conf_dir}/atlas-site.jceks")
# command line args
ssl_enabled = default("/configurations/application-properties/atlas.enableTLS", False)
http_port = default("/configurations/application-properties/atlas.server.http.port", "21000")
https_port = default("/configurations/application-properties/atlas.server.https.port", "21443")
if ssl_enabled:
metadata_port = https_port
metadata_protocol = 'https'
else:
metadata_port = http_port
metadata_protocol = 'http'
metadata_host = config['hostname']
atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
metadata_server_host = atlas_hosts[0] if len(atlas_hosts) > 0 else "UNKNOWN_HOST"
# application properties
application_properties = dict(config['configurations']['application-properties'])
application_properties["atlas.server.bind.address"] = metadata_host
# trimming knox_key
if 'atlas.sso.knox.publicKey' in application_properties:
knox_key = application_properties['atlas.sso.knox.publicKey']
knox_key_without_new_line = knox_key.replace("\n","")
application_properties['atlas.sso.knox.publicKey'] = knox_key_without_new_line
if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
metadata_server_url = application_properties["atlas.rest.address"]
else:
# In HDP 2.3 and 2.4 the property was computed and saved to the local config but did not exist in the database.
metadata_server_url = format('{metadata_protocol}://{metadata_server_host}:{metadata_port}')
application_properties["atlas.rest.address"] = metadata_server_url
# Atlas HA should populate
# atlas.server.ids = id1,id2,...,idn
# atlas.server.address.id# = host#:port
# User should not have to modify this property, but still allow overriding it to False if multiple Atlas servers exist
# This can be None, True, or False
is_atlas_ha_enabled = default("/configurations/application-properties/atlas.server.ha.enabled", None)
additional_ha_props = configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol)
for k,v in additional_ha_props.iteritems():
application_properties[k] = v
metadata_env_content = config['configurations']['atlas-env']['content']
metadata_opts = config['configurations']['atlas-env']['metadata_opts']
metadata_classpath = config['configurations']['atlas-env']['metadata_classpath']
data_dir = format("{stack_root}/atlas-server/data")
expanded_war_dir = os.environ['METADATA_EXPANDED_WEBAPP_DIR'] if 'METADATA_EXPANDED_WEBAPP_DIR' in os.environ else format("{stack_root}/atlas-server/server/webapp")
metadata_log4j_content = config['configurations']['atlas-log4j']['content']
metadata_solrconfig_content = default("/configurations/atlas-solrconfig/content", None)
atlas_log_level = config['configurations']['atlas-log4j']['atlas_log_level']
audit_log_level = config['configurations']['atlas-log4j']['audit_log_level']
atlas_log_max_backup_size = default("/configurations/atlas-log4j/atlas_log_max_backup_size", 256)
atlas_log_number_of_backup_files = default("/configurations/atlas-log4j/atlas_log_number_of_backup_files", 20)
# smoke test
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smoke_test_password = 'smoke'
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
security_check_status_file = format('{log_dir}/security_check.status')
# hbase
hbase_conf_dir = "/etc/hbase/conf"
atlas_search_backend = default("/configurations/application-properties/atlas.graph.index.search.backend", "")
search_backend_solr = atlas_search_backend.startswith('solr')
# infra solr
infra_solr_znode = default("/configurations/infra-solr-env/infra_solr_znode", None)
infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
atlas_solr_shards = default("/configurations/atlas-env/atlas_solr-shards", 1)
has_infra_solr = len(infra_solr_hosts) > 0
infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
# zookeeper
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
# get comma separated lists of zookeeper hosts from clusterHostInfo
index = 0
zookeeper_quorum = ""
for host in zookeeper_hosts:
zookeeper_host = host
if zookeeper_port is not None:
zookeeper_host = host + ":" + str(zookeeper_port)
zookeeper_quorum += zookeeper_host
index += 1
if index < len(zookeeper_hosts):
zookeeper_quorum += ","
stack_supports_atlas_hdfs_site_on_namenode_ha = True
atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
has_hbase_master = not len(hbase_master_hosts) == 0
atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
atlas_hbase_secure_setup = format("{exec_tmp_dir}/atlas_hbase_secure_setup.rb")
atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
# ToDo: Kafka port to Atlas
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
hosts_with_kafka = default('/clusterHostInfo/kafka_broker_hosts', [])
host_with_kafka = hostname in hosts_with_kafka
ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
rangertagsync_user = "rangertagsync"
kafka_keytab = default('/configurations/kafka-env/kafka_keytab', None)
kafka_principal_name = default('/configurations/kafka-env/kafka_principal_name', None)
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
if True: #check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_home = os.path.join(stack_root, "", "kafka")
kafka_conf_dir = os.path.join(kafka_home, "config")
kafka_zk_endpoint = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and 'kafka_principal_name' in config['configurations']['kafka-env']:
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST', _hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config={0}/kafka_jaas.conf".format(kafka_conf_dir)
else:
kafka_kerberos_params = ''
kafka_jaas_principal = None
kafka_keytab_path = None
namenode_host = set(default("/clusterHostInfo/namenode_host", []))
has_namenode = not len(namenode_host) == 0
# ranger altas plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
retry_enabled = default("/commandParams/command_retry_enabled", False)
stack_supports_atlas_ranger_plugin = True
stack_supports_ranger_kerberos = True
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
# ranger atlas plugin enabled property
enable_ranger_atlas = default("/configurations/ranger-atlas-plugin-properties/ranger-atlas-plugin-enabled", "No")
enable_ranger_atlas = True if enable_ranger_atlas.lower() == "yes" else False
# ranger hbase plugin enabled property
enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
# for create_hdfs_directory
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# ranger atlas service/repository name
repo_name = str(config['clusterName']) + '_atlas'
repo_name_value = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
ssl_keystore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
ssl_truststore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
# get ranger policy url
policymgr_mgr_url = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
downloaded_custom_connector = None
driver_curl_source = None
driver_curl_target = None
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_atlas:
external_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
ranger_atlas_security = config['configurations']['ranger-atlas-security']
ranger_atlas_security_attrs = config['configuration_attributes']['ranger-atlas-security']
ranger_atlas_policymgr_ssl = config['configurations']['ranger-atlas-policymgr-ssl']
ranger_atlas_policymgr_ssl_attrs = config['configuration_attributes']['ranger-atlas-policymgr-ssl']
policy_user = config['configurations']['ranger-atlas-plugin-properties']['policy_user']
atlas_repository_configuration = {
'username' : config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'atlas.rest.address' : metadata_server_url,
'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
'ambari.service.check.user' : policy_user
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
atlas_repository_configuration.update(custom_ranger_service_config)
if security_enabled:
atlas_repository_configuration['policy.download.auth.users'] = metadata_user
atlas_repository_configuration['tag.download.auth.users'] = metadata_user
atlas_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': atlas_repository_configuration,
'description': 'atlas repo',
'name': repo_name,
'type': 'atlas',
}
# ranger atlas plugin section end
# atlas admin login username password
atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']
|
|
from datetime import datetime
from functools import partial
import numpy as np
from unittest.mock import Mock
from unittest.mock import call
from unittest.mock import patch
from dateutil.parser import parse
import pytest
class TestFit:
@pytest.fixture
def fit(self):
from palladium.fit import fit
return fit
@pytest.fixture
def dataset_loader(self):
dataset_loader = Mock()
dataset_loader.return_value = Mock(), Mock()
return dataset_loader
def test_it(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
model_persister.write.assert_called_with(model)
model_persister.activate.assert_called_with(
model_persister.write.return_value)
def test_no_persist(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister,
persist=False)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
assert model_persister.call_count == 0
def test_evaluate_no_test_dataset(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister,
evaluate=True)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
assert model.score.call_count == 1
model.score.assert_called_with(X, y)
model_persister.write.assert_called_with(model)
def test_evaluate_with_test_dataset(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
dataset_loader_test = Mock()
X, y, X_test, y_test = object(), object(), object(), object()
dataset_loader_train.return_value = X, y
dataset_loader_test.return_value = X_test, y_test
result = fit(dataset_loader_train, model, model_persister,
dataset_loader_test=dataset_loader_test,
evaluate=True)
assert result is model
dataset_loader_train.assert_called_with()
dataset_loader_test.assert_called_with()
model.fit.assert_called_with(X, y)
assert model.score.call_count == 2
assert model.score.mock_calls[0] == call(X, y)
assert model.score.mock_calls[1] == call(X_test, y_test)
model_persister.write.assert_called_with(model)
def test_evaluate_annotations(self, fit, dataset_loader):
model = Mock()
model.score.side_effect = [0.9, 0.8]
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=Mock(),
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result.__metadata__['score_train'] == 0.9
assert result.__metadata__['score_test'] == 0.8
def test_persist_if_better_than(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result is model
assert model_persister.write.call_count == 1
def test_persist_if_better_than_false(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.91,
)
assert result is model
assert model_persister.write.call_count == 0
def test_persist_if_better_than_persist_false(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
persist=False,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result is model
assert model_persister.write.call_count == 0
def test_persist_if_better_than_no_dataset_test(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
with pytest.raises(ValueError):
fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=None,
persist_if_better_than=0.9,
)
def test_activate_no_persist(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
persist=False,
)
assert result is model
model_persister.activate.call_count == 0
def test_timestamp(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
def persist(model):
assert 'train_timestamp' in model.__metadata__
model_persister.write.side_effect = persist
before_fit = datetime.now()
result = fit(
dataset_loader,
model,
model_persister,
)
after_fit = datetime.now()
assert result is model
timestamp = parse(model.__metadata__['train_timestamp'])
assert before_fit < timestamp < after_fit
model_persister.write.assert_called_with(model)
def test_activate():
from palladium.fit import activate
persister = Mock()
activate(persister, 2)
persister.activate.assert_called_with(2)
def test_delete():
from palladium.fit import delete
persister = Mock()
delete(persister, 2)
persister.delete.assert_called_with(2)
class TestGridSearch:
@pytest.fixture
def grid_search(self):
from palladium.fit import grid_search
return grid_search
def test_it(self, grid_search):
model, dataset_loader_train = Mock(), Mock()
grid_search_params = {'verbose': 4}
X, y = object(), object()
dataset_loader_train.return_value = X, y
scores = [
Mock(mean_validation_score=0.1),
Mock(mean_validation_score=0.2),
]
with patch('palladium.fit.GridSearchCV') as GridSearchCV:
GridSearchCV().grid_scores_ = scores
result = grid_search(
dataset_loader_train, model, grid_search_params)
assert result == list(reversed(scores))
dataset_loader_train.assert_called_with()
GridSearchCV.assert_called_with(model, refit=False, verbose=4)
GridSearchCV().fit.assert_called_with(X, y)
def test_no_score_method_raises(self, grid_search):
model, dataset_loader_train = Mock(spec=['fit', 'predict']), Mock()
dataset_loader_train.return_value = object(), object()
with pytest.raises(ValueError):
grid_search(dataset_loader_train, model, {})
def test_grid_search(self, grid_search):
model, dataset_loader_train = Mock(), Mock()
dataset_loader_train.return_value = (
np.random.random((10, 10)), np.random.random(10))
CVIterator = Mock()
def cv_iterator(n, p):
return CVIterator(n=n, p=p)
grid_search_params = {'cv': partial(cv_iterator, p=2)}
scores = [
Mock(mean_validation_score=0.1),
Mock(mean_validation_score=0.2),
]
with patch('palladium.fit.GridSearchCV') as GridSearchCV:
GridSearchCV().grid_scores_ = scores
grid_search(dataset_loader_train, model, grid_search_params)
GridSearchCV.assert_called_with(model, refit=False,
cv=CVIterator.return_value)
CVIterator.assert_called_with(n=10, p=2)
|
|
import datetime
import itertools
import logging
import os
import six
import sys
import threading
from collections import deque
from pkg_resources import parse_version
from rbtools.api.errors import APIError
from rbtools.clients import SCMClient, RepositoryInfo
from rbtools.clients.errors import InvalidRevisionSpecError
from rbtools.utils.checks import check_gnu_diff, check_install
from rbtools.utils.filesystem import make_tempfile
from rbtools.utils.process import die, execute
# This specific import is necessary to handle the paths for
# cygwin enabled machines.
if (sys.platform.startswith('win')
or sys.platform.startswith('cygwin')):
import ntpath as cpath
else:
import posixpath as cpath
class get_elements_from_label_thread(threading.Thread):
def __init__(self, threadID, dir_name, label, elements):
self.threadID = threadID
self.dir_name = dir_name
self.elements = elements
# Remove any trailing vobstag not supported by cleartool find.
try:
label, vobstag = label.rsplit('@', 1)
except:
pass
self.label = label
if sys.platform.startswith('win'):
self.cc_xpn = '%CLEARCASE_XPN%'
else:
self.cc_xpn = '$CLEARCASE_XPN'
threading.Thread.__init__(self)
def run(self):
"""Returns a dictionnary of ClearCase elements (oid + version)
belonging to a label and identified by path.
"""
output = execute(
['cleartool', 'find', self.dir_name, '-version',
'lbtype(%s)' % self.label, '-exec',
r'cleartool describe -fmt "%On\t%En\t%Vn\n" ' + self.cc_xpn],
extra_ignore_errors=(1,), with_errors=False)
for line in output.split('\n'):
# Does not process empty lines.
if not line:
continue
oid, path, version = line.split('\t', 2)
self.elements[path] = {
'oid': oid,
'version': version,
}
class ClearCaseClient(SCMClient):
"""
A wrapper around the clearcase tool that fetches repository
information and generates compatible diffs.
This client assumes that cygwin is installed on windows.
"""
name = 'ClearCase'
viewtype = None
supports_patch_revert = True
REVISION_ACTIVITY_BASE = '--rbtools-activity-base'
REVISION_ACTIVITY_PREFIX = 'activity:'
REVISION_BRANCH_BASE = '--rbtools-branch-base'
REVISION_BRANCH_PREFIX = 'brtype:'
REVISION_CHECKEDOUT_BASE = '--rbtools-checkedout-base'
REVISION_CHECKEDOUT_CHANGESET = '--rbtools-checkedout-changeset'
REVISION_FILES = '--rbtools-files'
REVISION_LABEL_BASE = '--rbtools-label-base'
REVISION_LABEL_PREFIX = 'lbtype:'
def __init__(self, **kwargs):
super(ClearCaseClient, self).__init__(**kwargs)
def get_repository_info(self):
"""Returns information on the Clear Case repository.
This will first check if the cleartool command is installed and in the
path, and that the current working directory is inside of the view.
"""
if not check_install(['cleartool', 'help']):
logging.debug('Unable to execute "cleartool help": skipping '
'ClearCase')
return None
viewname = execute(["cleartool", "pwv", "-short"]).strip()
if viewname.startswith('** NONE'):
return None
# Now that we know it's ClearCase, make sure we have GNU diff
# installed, and error out if we don't.
check_gnu_diff()
property_lines = execute(
["cleartool", "lsview", "-full", "-properties", "-cview"],
split_lines=True)
for line in property_lines:
properties = line.split(' ')
if properties[0] == 'Properties:':
# Determine the view type and check if it's supported.
#
# Specifically check if webview was listed in properties
# because webview types also list the 'snapshot'
# entry in properties.
if 'webview' in properties:
die("Webviews are not supported. You can use rbt commands"
" only in dynamic or snapshot views.")
if 'dynamic' in properties:
self.viewtype = 'dynamic'
else:
self.viewtype = 'snapshot'
break
# Find current VOB's tag
vobstag = execute(["cleartool", "describe", "-short", "vob:."],
ignore_errors=True).strip()
if "Error: " in vobstag:
die("To generate diff run rbt inside vob.")
root_path = execute(["cleartool", "pwv", "-root"],
ignore_errors=True).strip()
if "Error: " in root_path:
die("To generate diff run rbt inside view.")
# From current working directory cut path to VOB.
# VOB's tag contain backslash character before VOB's name.
# I hope that first character of VOB's tag like '\new_proj'
# won't be treat as new line character but two separate:
# backslash and letter 'n'
cwd = os.getcwd()
base_path = cwd[:len(root_path) + len(vobstag)]
return ClearCaseRepositoryInfo(path=base_path,
base_path=base_path,
vobstag=vobstag,
supports_parent_diffs=False)
def _determine_branch_path(self, version_path):
"""Determine branch path of revision.
"""
branch_path, number = cpath.split(version_path)
return branch_path
def _list_checkedout(self, path):
"""List all checked out elements in current view below path.
Run cleartool command twice because:
-recurse finds checked out elements under path except path whereas
-directory detect only if path directory is checked out.
"""
checkedout_elements = []
for option in ['-recurse', '-directory']:
# We ignore return code 1 in order to omit files that ClearCase
# cannot read.
output = execute(['cleartool', 'lscheckout', option, '-cview',
'-fmt', r'%En@@%Vn\n', path],
split_lines=True,
extra_ignore_errors=(1,),
with_errors=False)
if output:
checkedout_elements.extend(output)
logging.debug(output)
return checkedout_elements
def _is_a_label(self, label, vobstag=None):
"""Return True when label is a valid ClearCase lbtype.
Raise an error when vobstag expected does not match.
"""
label_vobstag = None
# Try to find any vobstag.
try:
label, label_vobstag = label.rsplit('@', 1)
except:
pass
# Be sure label is prefix by lbtype, required by cleartool describe.
if not label.startswith(self.REVISION_LABEL_PREFIX):
label = '%s%s' % (self.REVISION_LABEL_PREFIX, label)
# If vobstag defined, check if it matchs with the one extracted from
# label, otherwise raise an exception.
if vobstag and label_vobstag and label_vobstag != vobstag:
raise Exception('label vobstag %s does not match expected vobstag '
'%s' % (label_vobstag, vobstag))
# Finally check if label exists in database, otherwise quit. Ignore
# return code 1, it means label does not exist.
output = execute(['cleartool', 'describe', '-short', label],
extra_ignore_errors=(1,),
with_errors=False)
return bool(output)
def _get_tmp_label(self):
"""Generate a string that will be used to set a ClearCase label."""
now = datetime.datetime.now()
temporary_label = 'Current_%d_%d_%d_%d_%d_%d_%d' % (
now.year, now.month, now.day, now.hour, now.minute, now.second,
now.microsecond)
return temporary_label
def _set_label(self, label, path):
"""Set a ClearCase label on elements seen under path."""
checkedout_elements = self._list_checkedout(path)
if checkedout_elements:
raise Exception(
'ClearCase backend cannot set label when some elements are '
'checked out:\n%s' % ''.join(checkedout_elements))
# First create label in vob database.
execute(['cleartool', 'mklbtype', '-c', 'label created for rbtools',
label],
with_errors=True)
# We ignore return code 1 in order to omit files that ClearCase cannot
# read.
recursive_option = ''
if cpath.isdir(path):
recursive_option = '-recurse'
# Apply label to path.
execute(['cleartool', 'mklabel', '-nc', recursive_option, label, path],
extra_ignore_errors=(1,),
with_errors=False)
def _remove_label(self, label):
"""Remove a ClearCase label from vob database.
It will remove all references of this label on elements.
"""
# Be sure label is prefix by lbtype.
if not label.startswith(self.REVISION_LABEL_PREFIX):
label = '%s%s' % (self.REVISION_LABEL_PREFIX, label)
# Label exists so remove it.
execute(['cleartool', 'rmtype', '-rmall', '-force', label],
with_errors=True)
def _determine_version(self, version_path):
"""Determine numeric version of revision.
CHECKEDOUT is marked as infinity to be treated
always as highest possible version of file.
CHECKEDOUT, in ClearCase, is something like HEAD.
"""
branch, number = cpath.split(version_path)
if number == 'CHECKEDOUT':
return float('inf')
return int(number)
def _construct_extended_path(self, path, version):
"""Combine extended_path from path and version.
CHECKEDOUT must be removed becasue this one version
doesn't exists in MVFS (ClearCase dynamic view file
system). Only way to get content of checked out file
is to use filename only."""
if not version or version.endswith('CHECKEDOUT'):
return path
return "%s@@%s" % (path, version)
def _construct_revision(self, branch_path, version_number):
"""Combine revision from branch_path and version_number."""
return cpath.join(branch_path, version_number)
def parse_revision_spec(self, revisions):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
These will be used to generate the diffs to upload to Review Board (or
print).
There are many different ways to generate diffs for clearcase, because
there are so many different workflows. This method serves more as a way
to validate the passed-in arguments than actually parsing them in the
way that other clients do.
"""
n_revs = len(revisions)
if n_revs == 0:
return {
'base': self.REVISION_CHECKEDOUT_BASE,
'tip': self.REVISION_CHECKEDOUT_CHANGESET,
}
elif n_revs == 1:
if revisions[0].startswith(self.REVISION_ACTIVITY_PREFIX):
return {
'base': self.REVISION_ACTIVITY_BASE,
'tip': revisions[0][len(self.REVISION_ACTIVITY_PREFIX):],
}
if revisions[0].startswith(self.REVISION_BRANCH_PREFIX):
return {
'base': self.REVISION_BRANCH_BASE,
'tip': revisions[0][len(self.REVISION_BRANCH_PREFIX):],
}
if revisions[0].startswith(self.REVISION_LABEL_PREFIX):
return {
'base': self.REVISION_LABEL_BASE,
'tip': [revisions[0][len(self.REVISION_BRANCH_PREFIX):]],
}
# TODO:
# stream:streamname[@pvob] => review changes in this UCM stream
# (UCM "branch")
# baseline:baseline[@pvob] => review changes between this baseline
# and the working directory
elif n_revs == 2:
if self.viewtype != 'dynamic':
die('To generate a diff using multiple revisions, you must '
'use a dynamic view.')
if (revisions[0].startswith(self.REVISION_LABEL_PREFIX) and
revisions[1].startswith(self.REVISION_LABEL_PREFIX)):
return {
'base': self.REVISION_LABEL_BASE,
'tip': [x[len(self.REVISION_BRANCH_PREFIX):]
for x in revisions],
}
# TODO:
# baseline:baseline1[@pvob] baseline:baseline2[@pvob]
# => review changes between these two
# baselines
pass
pairs = []
for r in revisions:
p = r.split(':')
if len(p) != 2:
raise InvalidRevisionSpecError(
'"%s" is not a valid file@revision pair' % r)
pairs.append(p)
return {
'base': self.REVISION_FILES,
'tip': pairs,
}
def _sanitize_activity_changeset(self, changeset):
"""Return changeset containing non-binary, branched file versions.
A UCM activity changeset contains all file revisions created/touched
during this activity. File revisions are ordered earlier versions first
in the format:
changelist = [
<path>@@<branch_path>/<version_number>, ...,
<path>@@<branch_path>/<version_number>
]
<path> is relative path to file
<branch_path> is clearcase specific branch path to file revision
<version number> is the version number of the file in <branch_path>.
A UCM activity changeset can contain changes from different vobs,
however reviewboard supports only changes from a single repo at the
same time, so changes made outside of the current vobstag will be
ignored.
"""
changelist = {}
# Maybe we should be able to access repository_info without calling
# cleartool again.
repository_info = self.get_repository_info()
for change in changeset:
path, current = change.split('@@')
# If a file isn't in the correct vob, then ignore it.
if path.find("%s/" % (repository_info.vobstag,)) == -1:
logging.debug("Vobstag does not match, so ignore changes on %s"
% path)
continue
version_number = self._determine_version(current)
if path not in changelist:
changelist[path] = {
'highest': version_number,
'lowest': version_number,
'current': current,
}
if version_number == 0:
die("Unexepected version_number=0 in activity changeset")
elif version_number > changelist[path]['highest']:
changelist[path]['highest'] = version_number
changelist[path]['current'] = current
elif version_number < changelist[path]['lowest']:
changelist[path]['lowest'] = version_number
# Convert to list
changeranges = []
for path, version in six.iteritems(changelist):
# Previous version is predecessor of lowest ie its version number
# decreased by 1.
branch_path = self._determine_branch_path(version['current'])
prev_version_number = str(int(version['lowest']) - 1)
version['previous'] = self._construct_revision(branch_path,
prev_version_number)
changeranges.append(
(self._construct_extended_path(path, version['previous']),
self._construct_extended_path(path, version['current']))
)
return changeranges
def _sanitize_branch_changeset(self, changeset):
"""Return changeset containing non-binary, branched file versions.
Changeset contain only first and last version of file made on branch.
"""
changelist = {}
for path, previous, current in changeset:
version_number = self._determine_version(current)
if path not in changelist:
changelist[path] = {
'highest': version_number,
'current': current,
'previous': previous
}
if version_number == 0:
# Previous version of 0 version on branch is base
changelist[path]['previous'] = previous
elif version_number > changelist[path]['highest']:
changelist[path]['highest'] = version_number
changelist[path]['current'] = current
# Convert to list
changeranges = []
for path, version in six.iteritems(changelist):
changeranges.append(
(self._construct_extended_path(path, version['previous']),
self._construct_extended_path(path, version['current']))
)
return changeranges
def _sanitize_checkedout_changeset(self, changeset):
"""Return changeset containing non-binary, checkdout file versions."""
changeranges = []
for path, previous, current in changeset:
changeranges.append(
(self._construct_extended_path(path, previous),
self._construct_extended_path(path, current))
)
return changeranges
def _sanitize_version_0_file(self, file_revision):
"""Replace file version with Predecessor version when
version is 0 except for /main/0."""
# There is no predecessor for @@/main/0, so keep current revision.
if file_revision.endswith("@@/main/0"):
return file_revision
if file_revision.endswith("/0"):
logging.debug("Found file %s with version 0", file_revision)
file_revision = execute(["cleartool",
"describe",
"-fmt", "%En@@%PSn",
file_revision])
logging.debug("Sanitized with predecessor, new file: %s",
file_revision)
return file_revision
def _sanitize_version_0_changeset(self, changeset):
"""Return changeset sanitized of its <branch>/0 version.
Indeed this predecessor (equal to <branch>/0) should already be
available from previous vob synchro in multi-site context.
"""
sanitized_changeset = []
for old_file, new_file in changeset:
# This should not happen for new file but it is safer to sanitize
# both file revisions.
sanitized_changeset.append(
(self._sanitize_version_0_file(old_file),
self._sanitize_version_0_file(new_file)))
return sanitized_changeset
def _directory_content(self, path):
"""Return directory content ready for saving to tempfile."""
# Get the absolute path of each element located in path, but only
# clearcase elements => -vob_only
output = execute(["cleartool", "ls", "-short", "-nxname", "-vob_only",
path])
lines = output.splitlines(True)
content = []
# The previous command returns absolute file paths but only file names
# are required.
for absolute_path in lines:
short_path = os.path.basename(absolute_path.strip())
content.append(short_path)
return ''.join([
'%s\n' % s
for s in sorted(content)])
def _construct_changeset(self, output):
return [
info.split('\t')
for info in output.strip().split('\n')
]
def _get_checkedout_changeset(self):
"""Return information about the checked out changeset.
This function returns: kind of element, path to file,
previews and current file version.
"""
changeset = []
# We ignore return code 1 in order to omit files thatClear Case can't
# read.
output = execute(['cleartool',
'lscheckout',
'-all',
'-cview',
'-me',
'-fmt',
r'%En\t%PVn\t%Vn\n'],
extra_ignore_errors=(1,),
with_errors=False)
if output:
changeset = self._construct_changeset(output)
return self._sanitize_checkedout_changeset(changeset)
def _get_activity_changeset(self, activity):
"""Returns information about the versions changed on a branch.
This takes into account the changes attached to this activity
(including rebase changes) in all vobs of the current view.
"""
changeset = []
# Get list of revisions and get the diff of each one. Return code 1 is
# ignored in order to omit files that ClearCase can't read.
output = execute(['cleartool',
'lsactivity',
'-fmt',
'%[versions]p',
activity],
extra_ignore_errors=(1,),
with_errors=False)
if output:
# UCM activity changeset is split by spaces not but EOL, so we
# cannot reuse self._construct_changeset here.
changeset = output.split()
return self._sanitize_activity_changeset(changeset)
def _get_branch_changeset(self, branch):
"""Returns information about the versions changed on a branch.
This takes into account the changes on the branch owned by the
current user in all vobs of the current view.
"""
changeset = []
# We ignore return code 1 in order to omit files that Clear Case can't
# read.
if sys.platform.startswith('win'):
CLEARCASE_XPN = '%CLEARCASE_XPN%'
else:
CLEARCASE_XPN = '$CLEARCASE_XPN'
output = execute(
[
"cleartool",
"find",
"-all",
"-version",
"brtype(%s)" % branch,
"-exec",
'cleartool descr -fmt "%%En\t%%PVn\t%%Vn\n" %s' % CLEARCASE_XPN
],
extra_ignore_errors=(1,),
with_errors=False)
if output:
changeset = self._construct_changeset(output)
return self._sanitize_branch_changeset(changeset)
def _get_label_changeset(self, labels):
"""Returns information about the versions changed between labels.
This takes into account the changes done between labels and restrict
analysis to current working directory. A ClearCase label belongs to a
uniq vob.
"""
changeset = []
tmp_labels = []
# Initialize comparison_path to current working directory.
# TODO: support another argument to manage a different comparison path.
comparison_path = os.getcwd()
error_message = None
try:
# Unless user has provided 2 labels, set a temporary label on
# current version seen of comparison_path directory. It will be
# used to process changeset.
# Indeed ClearCase can identify easily each file and associated
# version belonging to a label.
if len(labels) == 1:
tmp_lb = self._get_tmp_label()
tmp_labels.append(tmp_lb)
self._set_label(tmp_lb, comparison_path)
labels.append(tmp_lb)
label_count = len(labels)
if label_count != 2:
raise Exception(
'ClearCase label comparison does not support %d labels'
% label_count)
# Now we get 2 labels for comparison, check if they are both valid.
repository_info = self.get_repository_info()
for label in labels:
if not self._is_a_label(label, repository_info.vobstag):
raise Exception(
'ClearCase label %s is not a valid label' % label)
previous_label, current_label = labels
logging.debug('Comparison between labels %s and %s on %s' %
(previous_label, current_label, comparison_path))
# List ClearCase element path and version belonging to previous and
# current labels, element path is the key of each dict.
previous_elements = {}
current_elements = {}
previous_label_elements_thread = get_elements_from_label_thread(
1, comparison_path, previous_label, previous_elements)
previous_label_elements_thread.start()
current_label_elements_thread = get_elements_from_label_thread(
2, comparison_path, current_label, current_elements)
current_label_elements_thread.start()
previous_label_elements_thread.join()
current_label_elements_thread.join()
seen = []
changelist = {}
# Iterate on each ClearCase path in order to find respective
# previous and current version.
for path in itertools.chain(previous_elements.keys(),
current_elements.keys()):
if path in seen:
continue
seen.append(path)
# Initialize previous and current version to "/main/0"
changelist[path] = {
'previous': '/main/0',
'current': '/main/0',
}
if path in current_elements:
changelist[path]['current'] = \
current_elements[path]['version']
if path in previous_elements:
changelist[path]['previous'] = \
previous_elements[path]['version']
logging.debug('path: %s\nprevious: %s\ncurrent: %s\n' %
(path,
changelist[path]['previous'],
changelist[path]['current']))
# Prevent adding identical version to comparison.
if changelist[path]['current'] == changelist[path]['previous']:
continue
changeset.append(
(self._construct_extended_path(
path,
changelist[path]['previous']),
self._construct_extended_path(
path,
changelist[path]['current'])))
except Exception as e:
error_message = str(e)
finally:
# Delete all temporary labels.
for lb in tmp_labels:
if self._is_a_label(lb):
self._remove_label(lb)
if error_message:
die('Label comparison failed because:\n%s' % error_message)
return changeset
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
if include_files:
raise Exception(
'The ClearCase backend does not currently support the '
'-I/--include parameter. To diff for specific files, pass in '
'file@revision1:file@revision2 pairs as arguments')
if revisions['tip'] == self.REVISION_CHECKEDOUT_CHANGESET:
changeset = self._get_checkedout_changeset()
return self._do_diff(changeset)
elif revisions['base'] == self.REVISION_ACTIVITY_BASE:
changeset = self._get_activity_changeset(revisions['tip'])
return self._do_diff(changeset)
elif revisions['base'] == self.REVISION_BRANCH_BASE:
changeset = self._get_branch_changeset(revisions['tip'])
return self._do_diff(changeset)
elif revisions['base'] == self.REVISION_LABEL_BASE:
changeset = self._get_label_changeset(revisions['tip'])
return self._do_diff(changeset)
elif revisions['base'] == self.REVISION_FILES:
include_files = revisions['tip']
return self._do_diff(include_files)
else:
assert False
def _diff_files(self, old_file, new_file):
"""Return unified diff for file.
Most effective and reliable way is use gnu diff.
"""
# In snapshot view, diff can't access history clearcase file version
# so copy cc files to tempdir by 'cleartool get -to dest-pname pname',
# and compare diff with the new temp ones.
if self.viewtype == 'snapshot':
# Create temporary file first.
tmp_old_file = make_tempfile()
tmp_new_file = make_tempfile()
# Delete so cleartool can write to them.
try:
os.remove(tmp_old_file)
except OSError:
pass
try:
os.remove(tmp_new_file)
except OSError:
pass
execute(["cleartool", "get", "-to", tmp_old_file, old_file])
execute(["cleartool", "get", "-to", tmp_new_file, new_file])
diff_cmd = ["diff", "-uN", tmp_old_file, tmp_new_file]
else:
diff_cmd = ["diff", "-uN", old_file, new_file]
dl = execute(diff_cmd, extra_ignore_errors=(1, 2),
translate_newlines=False)
# Replace temporary file name in diff with the one in snapshot view.
if self.viewtype == "snapshot":
dl = dl.replace(tmp_old_file, old_file)
dl = dl.replace(tmp_new_file, new_file)
# If the input file has ^M characters at end of line, lets ignore them.
dl = dl.replace('\r\r\n', '\r\n')
dl = dl.splitlines(True)
# Special handling for the output of the diff tool on binary files:
# diff outputs "Files a and b differ"
# and the code below expects the output to start with
# "Binary files "
if (len(dl) == 1 and
dl[0].startswith('Files %s and %s differ' % (old_file, new_file))):
dl = ['Binary files %s and %s differ\n' % (old_file, new_file)]
# We need oids of files to translate them to paths on reviewboard
# repository.
old_oid = execute(["cleartool", "describe", "-fmt", "%On", old_file])
new_oid = execute(["cleartool", "describe", "-fmt", "%On", new_file])
if dl == [] or dl[0].startswith("Binary files "):
if dl == []:
dl = ["File %s in your changeset is unmodified\n" % new_file]
dl.insert(0, "==== %s %s ====\n" % (old_oid, new_oid))
dl.append('\n')
else:
dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid))
return dl
def _diff_directories(self, old_dir, new_dir):
"""Return uniffied diff between two directories content.
Function save two version's content of directory to temp
files and treate them as casual diff between two files.
"""
old_content = self._directory_content(old_dir)
new_content = self._directory_content(new_dir)
old_tmp = make_tempfile(content=old_content)
new_tmp = make_tempfile(content=new_content)
diff_cmd = ["diff", "-uN", old_tmp, new_tmp]
dl = execute(diff_cmd,
extra_ignore_errors=(1, 2),
translate_newlines=False,
split_lines=True)
# Replace temporary filenames with real directory names and add ids
if dl:
dl[0] = dl[0].replace(old_tmp, old_dir)
dl[1] = dl[1].replace(new_tmp, new_dir)
old_oid = execute(["cleartool", "describe", "-fmt", "%On",
old_dir])
new_oid = execute(["cleartool", "describe", "-fmt", "%On",
new_dir])
dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid))
return dl
def _do_diff(self, changeset):
"""Generates a unified diff for all files in the changeset."""
# Sanitize all changesets of version 0 before processing
changeset = self._sanitize_version_0_changeset(changeset)
diff = []
for old_file, new_file in changeset:
dl = []
# cpath.isdir does not work for snapshot views but this
# information can be found using `cleartool describe`.
if self.viewtype == 'snapshot':
# ClearCase object path is file path + @@
object_path = new_file.split('@@')[0] + '@@'
output = execute(["cleartool", "describe", "-fmt", "%m",
object_path])
object_kind = output.strip()
isdir = object_kind == 'directory element'
else:
isdir = cpath.isdir(new_file)
if isdir:
dl = self._diff_directories(old_file, new_file)
elif cpath.exists(new_file) or self.viewtype == 'snapshot':
dl = self._diff_files(old_file, new_file)
else:
logging.error("File %s does not exist or access is denied."
% new_file)
continue
if dl:
diff.append(''.join(dl))
return {
'diff': ''.join(diff),
}
class ClearCaseRepositoryInfo(RepositoryInfo):
"""
A representation of a ClearCase source code repository. This version knows
how to find a matching repository on the server even if the URLs differ.
"""
def __init__(self, path, base_path, vobstag, supports_parent_diffs=False):
RepositoryInfo.__init__(self, path, base_path,
supports_parent_diffs=supports_parent_diffs)
self.vobstag = vobstag
def find_server_repository_info(self, server):
"""
The point of this function is to find a repository on the server that
matches self, even if the paths aren't the same. (For example, if self
uses an 'http' path, but the server uses a 'file' path for the same
repository.) It does this by comparing VOB's name and uuid. If the
repositories use the same path, you'll get back self, otherwise you'll
get a different ClearCaseRepositoryInfo object (with a different path).
"""
# Find VOB's family uuid based on VOB's tag
uuid = self._get_vobs_uuid(self.vobstag)
logging.debug("Repository's %s uuid is %r" % (self.vobstag, uuid))
# To reduce HTTP requests (_get_repository_info call), we build an
# ordered list of ClearCase repositories starting with the ones that
# have a matching vobstag.
repository_scan_order = deque()
# Reduce list of repositories to only ClearCase ones and sort them by
# repo name matching vobstag first.
for repository in server.get_repositories(tool='ClearCase').all_items:
# Ignore non-ClearCase repositories.
if repository['tool'] != 'ClearCase':
continue
# Add repos where the vobstag matches at the beginning and others
# at the end.
if repository['name'] == self.vobstag:
repository_scan_order.appendleft(repository)
else:
repository_scan_order.append(repository)
# Now try to find a matching uuid
for repository in repository_scan_order:
repo_name = repository['name']
try:
info = repository.get_info()
except APIError as e:
# If the current repository is not publicly accessible and the
# current user has no explicit access to it, the server will
# return error_code 101 and http_status 403.
if not (e.error_code == 101 and e.http_status == 403):
# We can safely ignore this repository unless the VOB tag
# matches.
if repo_name == self.vobstag:
die('You do not have permission to access this '
'repository.')
continue
else:
# Bubble up any other errors
raise e
if not info or uuid != info['uuid']:
continue
path = info['repopath']
logging.debug('Matching repository uuid:%s with path:%s',
uuid, path)
return ClearCaseRepositoryInfo(path=path, base_path=path,
vobstag=self.vobstag)
# We didn't found uuid but if version is >= 1.5.3
# we can try to use VOB's name hoping it is better
# than current VOB's path.
if parse_version(server.rb_version) >= parse_version('1.5.3'):
self.path = cpath.split(self.vobstag)[1]
# We didn't find a matching repository on the server.
# We'll just return self and hope for the best.
return self
def _get_vobs_uuid(self, vobstag):
"""Return family uuid of VOB."""
property_lines = execute(["cleartool", "lsvob", "-long", vobstag],
split_lines=True)
for line in property_lines:
if line.startswith('Vob family uuid:'):
return line.split(' ')[-1].rstrip()
def _get_repository_info(self, server, repository):
try:
return server.get_repository_info(repository['id'])
except APIError as e:
# If the server couldn't fetch the repository info, it will return
# code 210. Ignore those.
# Other more serious errors should still be raised, though.
if e.error_code == 210:
return None
raise e
|
|
from myhdl import *
import numpy as np
from math import e, pi, log
import matplotlib.pyplot as plt
t_state = enum('INIT', 'DATA_IN', 'COMPUTE', 'COMPUTE_INDEX', 'COMPUTE_MULT', 'DATA_OUT')
#########################CHANGES NEEDED IF N!=8###############################
def FFT(clk, reset, start, data_valid,
#ADD PORTS HERE
#In_real
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7,
#In_imag
xi0, xi1, xi2, xi3, xi4, xi5, xi6, xi7,
#Out_real
zr0, zr1, zr2, zr3, zr4, zr5, zr6, zr7,
#Out_imag
zi0, zi1, zi2, zi3, zi4, zi5, zi6, zi7,
N_points, Q):
##############################################################################
points = N_points
half_points = points>>1
logpoints = int(log(points,2))
n_bits = len(xr0)
#Declaration of the buffers used to go through the Butterfly
xr_buf = [Signal(modbv(0, min=-2**(n_bits-1), max=2**(n_bits-1))) for i in
range(points)]
xi_buf = [Signal(modbv(0, min=-2**(n_bits-1), max=2**(n_bits-1))) for i in
range(points)]
state = Signal(t_state.INIT)
#Level of FFT --> total number of levels = log(points)
level = Signal(intbv(0, min=0, max= logpoints+1))
#Step to get the right pair, depends on the level
step = Signal(intbv(1, min=1, max=2**logpoints+1))
compute_index = Signal(intbv(0, min=0, max=points))
fft_index = Signal(intbv(0, min=0, max= half_points+1))
xr_buf_idx = Signal(intbv(0)[5:])
increm = Signal(intbv(0)[7:])
#Signals for twiddles
ur = Signal(modbv(0,min=-2**(31), max=2**(31)))
ui = Signal(modbv(0,min=-2**(31), max=2**(31)))
#Signals for products
prod01 = Signal(modbv(0,min=-2**(31), max=2**(31)))
prod02 = Signal(modbv(0,min=-2**(31), max=2**(31)))
prod11 = Signal(modbv(0,min=-2**(31), max=2**(31)))
prod12 = Signal(modbv(0,min=-2**(31), max=2**(31)))
#Used to get Twiddles in the W arrays transformed in tuples
level_tuple = Signal(intbv(0, min=0, max= (logpoints)*points+1))
#####
#Prepare the twiddles as follows (Example for N=8):
# 0 0 0 0
# W = [ W W W W
# 8 8 8 8
# 0 0 2 2
# W W W W
# 8 8 8 8
# 0 1 2 3
# W W W W ]
# 8 8 8 8
#########################CHANGES NEEDED IF N!=8###############################
#For 8 points:
tw_index = [[0,0,0,0],
[0,0,2,2],
[0,1,2,3]]
##############################################################################
W = np.ones([logpoints,points>>1])+np.ones([logpoints,points>>1])*1j
Wr = np.zeros([logpoints, points>>1], np.int32)
Wi = np.zeros([logpoints, points>>1], np.int32)
Tw = np.zeros([logpoints, points>>1], np.int32)
for k in range(logpoints):
#index=modbv(0, min=0, max=points>>1)
for i in range(points>>1):
index = tw_index[k][i]
W[k][i] = e**(-2j*pi*int(index)/points)
#Tw[k][i] = index
#index+=points>>(k+1)
Wr[k][i] = W[k][i].real * 2**Q
Wi[k][i] = W[k][i].imag * 2**Q
Wr = tuple(map(int, Wr.reshape((1,logpoints*(points>>1)))[0]))
Wi = tuple(map(int, Wi.reshape((1,logpoints*(points>>1)))[0]))
#####
@always_seq(clk.posedge, reset)
def compute():
if state == t_state.INIT:
data_valid.next = 0
if start == 1:
state.next = t_state.DATA_IN
#fill the buffers in the correct input order of the Butterfly:
#Example (N=8): xr[1] --> xr[4] (001 --> 100)
elif state == t_state.DATA_IN:
state.next = t_state.COMPUTE_INDEX
#########################CHANGES NEEDED IF N!=8###############################
xr_buf[0].next = xr0
xr_buf[4].next = xr1
xr_buf[2].next = xr2
xr_buf[6].next = xr3
xr_buf[1].next = xr4
xr_buf[5].next = xr5
xr_buf[3].next = xr6
xr_buf[7].next = xr7
xi_buf[0].next = xi0
xi_buf[4].next = xi1
xi_buf[2].next = xi2
xi_buf[6].next = xi3
xi_buf[1].next = xi4
xi_buf[5].next = xi5
xi_buf[3].next = xi6
xi_buf[7].next = xi7
##############################################################################
#To avoid a critical path exceding the timing constraints, 3 states
#are used to execute the FFT
#State1 : Prepare the indeces
elif state == t_state.COMPUTE_INDEX:
increm.next = step+step
if level < level.max-1:
#print('level %d of %d'%(level, level.max-2))
#print('step: %d' %step)
#print('increm: %d' %increm)
if fft_index < fft_index.max-1:
ur.next = Wr[level_tuple+fft_index]
ui.next = Wi[level_tuple+fft_index]
xr_buf_idx.next = compute_index+step
state.next =t_state.COMPUTE_MULT
else:
compute_index.next = 0
level.next=level+1
fft_index.next=0
level_tuple.next=level_tuple+half_points
step.next=step+step
#print('------------NEXT LEVEL--------------')
else:
state.next = t_state.DATA_OUT
#State2 : Compute the products
elif state == t_state.COMPUTE_MULT:
prod01.next = xr_buf[xr_buf_idx]*ur
prod02.next = xi_buf[xr_buf_idx]*ui
prod11.next = xr_buf[xr_buf_idx]*ui
prod12.next = xi_buf[xr_buf_idx]*ur
state.next = t_state.COMPUTE
#State3 : Compute the new FFT value
elif state == t_state.COMPUTE:
#print('W = %d + i(%d)'%(ur, ui))
#print('computing: x[%d] & x[%d]' %(compute_index, compute_index+step))
prod0 = modbv(prod01 - prod02, min=-2**31, max=2**31)
prod1 = modbv(prod11 + prod12, min=-2**31, max=2**31)
xr_buf[compute_index].next = xr_buf[compute_index] + prod0[32:16]
xi_buf[compute_index].next = xi_buf[compute_index] + prod1[32:16]
xr_buf[compute_index+step].next = xr_buf[compute_index] - \
prod0[32:16]
xi_buf[compute_index+step].next = xi_buf[compute_index] - \
prod1[32:16]
#print('xr[%d] = %d'%(compute_index, xr_buf[compute_index]+prod0))
#print('xi[%d] = %d'%(compute_index, xi_buf[compute_index]+prod1))
#print('xr[%d] = %d'%(compute_index+step,
# xr_buf[compute_index+step]-prod0))
#print('xi[%d] = %d'%(compute_index+step,
# xi_buf[compute_index+step]-prod1))
compute_index.next = (compute_index+increm)%(points-1)
fft_index.next=fft_index+1
state.next = t_state.COMPUTE_INDEX
#Assign the buffers to the outputs
elif state == t_state.DATA_OUT:
data_valid.next = 1
#########################CHANGES NEEDED IF N!=8###############################
zr0.next = xr_buf[0]
zr1.next = xr_buf[1]
zr2.next = xr_buf[2]
zr3.next = xr_buf[3]
zr4.next = xr_buf[4]
zr5.next = xr_buf[5]
zr6.next = xr_buf[6]
zr7.next = xr_buf[7]
zi0.next = xi_buf[0]
zi1.next = xi_buf[1]
zi2.next = xi_buf[2]
zi3.next = xi_buf[3]
zi4.next = xi_buf[4]
zi5.next = xi_buf[5]
zi6.next = xi_buf[6]
zi7.next = xi_buf[7]
##############################################################################
level.next = 0
level_tuple.next = 0
step.next = 1
state.next = t_state.INIT
return compute
def compile_FFT():
n_bits=16
Q=16
clk = Signal(bool(0))
reset = ResetSignal(0, active=1, async=True)
start = Signal(bool(0))
data_valid = Signal(bool(0))
#########################CHANGES NEEDED IF N!=8###############################
N_points=8
xr0 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr1 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr2 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr3 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr4 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr5 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr6 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xr7 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi0 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi1 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi2 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi3 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi4 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi5 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi6 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
xi7 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr0 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr1 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr2 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr3 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr4 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr5 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr6 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zr7 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi0 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi1 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi2 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi3 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi4 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi5 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi6 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
zi7 = Signal(intbv(0, min=-2**(n_bits-1), max=2**(n_bits-1)))
toVHDL(FFT, clk, reset, start, data_valid,
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7,
xi0, xi1, xi2, xi3, xi4, xi5, xi6, xi7,
zr0, zr1, zr2, zr3, zr4, zr5, zr6, zr7,
zi0, zi1, zi2, zi3, zi4, zi5, zi6, zi7,
N_points, Q)
##############################################################################
#SIMULATION
def FFT_tb():
HALF_PERIOD = delay(5)
n_bits=16
Q=16
clk = Signal(bool(0))
reset = ResetSignal(0, active=1, async=True)
start = Signal(bool(0))
data_valid = Signal(bool(0))
#########################CHANGES NEEDED IF N!=8###############################
N_points=8
[xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7,
xi0, xi1, xi2, xi3, xi4, xi5, xi6, xi7,
zr0, zr1, zr2, zr3, zr4, zr5, zr6, zr7,
zi0, zi1, zi2, zi3, zi4, zi5, zi6, zi7] = [Signal(intbv(0,
min=-2**(n_bits-1), max=2**(n_bits-1))) for i in range(N_points*4)]
#Can ONLY be usedto simplify the simulation
fft_in_bus = [xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7]
fft_re_bus = [zr0, zr1, zr2, zr3, zr4, zr5, zr6, zr7]
fft_im_bus = [zi0, zi1, zi2, zi3, zi4, zi5, zi6, zi7]
DUT = FFT(clk, reset, start, data_valid,
xr0, xr1, xr2, xr3, xr4, xr5, xr6, xr7,
xi0, xi1, xi2, xi3, xi4, xi5, xi6, xi7,
zr0, zr1, zr2, zr3, zr4, zr5, zr6, zr7,
zi0, zi1, zi2, zi3, zi4, zi5, zi6, zi7,
N_points, Q)
##############################################################################
@always(HALF_PERIOD)
def clockGen():
clk.next = not clk
raw = [-3, -2, -1, 0, 0, 1, 2, 3]
@instance
def tb_stim():
reset.next = True
yield clk.posedge
reset.next = False
i = 0
for sig in fft_in_bus:
sig.next = raw[i]
i+=1
yield clk.posedge
yield clk.negedge
yield clk.negedge
start.next = 1
yield clk.negedge
start.next = 0
for i in range(50):
yield clk.negedge
X = np.zeros(len(fft_im_bus), dtype='complex')
x = np.zeros(len(fft_im_bus))
for i in range(len(x)):
X[i] = fft_re_bus[i] + fft_im_bus[i]*1j
x[i] = fft_in_bus[i]
X_np = np.fft.fftn(x)
print(X_np)
plt.plot(np.abs((X)))
plt.plot(np.abs(X_np))
plt.show()
raise StopSimulation
return DUT, clockGen, tb_stim
if __name__ == "__main__":
compile_FFT()
sim = Simulation(traceSignals(FFT_tb))
sim.run()
|
|
# -*- coding: utf-8 -*-
"""
args
~~~~
"""
import os
import sys
from sys import argv
from glob import glob
from collections import OrderedDict
# Python 3
if sys.version_info[0] == 3:
string_type = str
else:
string_type = basestring
def _expand_path(path):
"""Expands directories and globs in given path."""
paths = []
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isdir(path):
for (dir, dirs, files) in os.walk(path):
for file in files:
paths.append(os.path.join(dir, file))
else:
paths.extend(glob(path))
return paths
def _is_collection(obj):
"""Tests if an object is a collection. Strings don't count."""
if isinstance(obj, string_type):
return False
return hasattr(obj, '__getitem__')
class ArgsList(object):
"""CLI Argument management."""
def __init__(self, args=None, no_argv=False):
if not args:
if not no_argv:
self._args = argv[1:]
else:
self._args = []
else:
self._args = args
def __len__(self):
return len(self._args)
def __repr__(self):
return '<args %s>' % (repr(self._args))
def __getitem__(self, i):
try:
return self.all[i]
except IndexError:
return None
def __contains__(self, x):
return self.first(x) is not None
def get(self, x):
"""Returns argument at given index, else none."""
try:
return self.all[x]
except IndexError:
return None
def get_with(self, x):
"""Returns first argument that contains given string."""
return self.all[self.first_with(x)]
def remove(self, x):
"""Removes given arg (or list thereof) from Args object."""
def _remove(x):
found = self.first(x)
if found is not None:
self._args.pop(found)
if _is_collection(x):
for item in x:
_remove(x)
else:
_remove(x)
def pop(self, x):
"""Removes and Returns value at given index, else none."""
try:
return self._args.pop(x)
except IndexError:
return None
def any_contain(self, x):
"""Tests if given string is contained in any stored argument."""
return bool(self.first_with(x))
def contains(self, x):
"""Tests if given object is in arguments list.
Accepts strings and lists of strings."""
return self.__contains__(x)
def first(self, x):
"""Returns first found index of given value (or list of values)."""
def _find(x):
try:
return self.all.index(str(x))
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found is not None:
return found
return None
else:
return _find(x)
def first_with(self, x):
"""Returns first found index containing value (or list of values)."""
def _find(x):
try:
for arg in self.all:
if x in arg:
return self.all.index(arg)
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found:
return found
return None
else:
return _find(x)
def first_without(self, x):
"""Returns first found index not containing value
(or list of values).
"""
def _find(x):
try:
for arg in self.all:
if x not in arg:
return self.all.index(arg)
except ValueError:
return None
if _is_collection(x):
for item in x:
found = _find(item)
if found:
return found
return None
else:
return _find(x)
def start_with(self, x):
"""Returns all arguments beginning with given string
(or list thereof).
"""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if arg.startswith(x):
_args.append(arg)
break
else:
if arg.startswith(x):
_args.append(arg)
return ArgsList(_args, no_argv=True)
def contains_at(self, x, index):
"""Tests if given [list of] string is at given index."""
try:
if _is_collection(x):
for _x in x:
if (_x in self.all[index]) or (_x == self.all[index]):
return True
else:
return False
else:
return (x in self.all[index])
except IndexError:
return False
def has(self, x):
"""Returns true if argument exists at given index.
Accepts: integer.
"""
try:
self.all[x]
return True
except IndexError:
return False
def value_after(self, x):
"""Returns value of argument after given found argument
(or list thereof).
"""
try:
try:
i = self.all.index(x)
except ValueError:
return None
return self.all[i + 1]
except IndexError:
return None
@property
def grouped(self):
"""Extracts --flag groups from argument list.
Returns {format: Args, ...}
"""
collection = OrderedDict(_=ArgsList(no_argv=True))
_current_group = None
for arg in self.all:
if arg.startswith('-'):
_current_group = arg
collection.setdefault(arg, ArgsList(no_argv=True))
else:
if _current_group:
collection[_current_group]._args.append(arg)
else:
collection['_']._args.append(arg)
return collection
@property
def last(self):
"""Returns last argument."""
try:
return self.all[-1]
except IndexError:
return None
@property
def all(self):
"""Returns all arguments."""
return self._args
def all_with(self, x):
"""Returns all arguments containing given string (or list thereof)."""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if _x in arg:
_args.append(arg)
break
else:
if x in arg:
_args.append(arg)
return ArgsList(_args, no_argv=True)
def all_without(self, x):
"""Returns all arguments not containing given string
(or list thereof).
"""
_args = []
for arg in self.all:
if _is_collection(x):
for _x in x:
if _x not in arg:
_args.append(arg)
break
else:
if x not in arg:
_args.append(arg)
return ArgsList(_args, no_argv=True)
@property
def flags(self):
"""Returns Arg object including only flagged arguments."""
return self.start_with('-')
@property
def not_flags(self):
"""Returns Arg object excluding flagged arguments."""
return self.all_without('-')
@property
def files(self, absolute=False):
"""Returns an expanded list of all valid paths that were passed in."""
_paths = []
for arg in self.all:
for path in _expand_path(arg):
if os.path.exists(path):
if absolute:
_paths.append(os.path.abspath(path))
else:
_paths.append(path)
return _paths
@property
def not_files(self):
"""Returns a list of all arguments that aren't files/globs."""
_args = []
for arg in self.all:
if not len(_expand_path(arg)):
if not os.path.exists(arg):
_args.append(arg)
return ArgsList(_args, no_argv=True)
@property
def copy(self):
"""Returns a copy of Args object for temporary manipulation."""
return ArgsList(self.all)
@property
def assignments(self):
"""Extracts assignment values from assignments."""
collection = OrderedDict()
for arg in self.all:
if '=' in arg:
collection.setdefault(
arg.split('=', 1)[0], ArgsList(no_argv=True))
collection[arg.split('=', 1)[0]]._args.append(
arg.split('=', 1)[1])
return collection
args = ArgsList()
get = args.get
get_with = args.get_with
remove = args.remove
pop = args.pop
any_contain = args.any_contain
contains = args.contains
first = args.first
first_with = args.first_with
first_without = args.first_without
start_with = args.start_with
contains_at = args.contains_at
has = args.has
value_after = args.value_after
grouped = args.grouped
last = args.last
all = args.all
all_with = args.all_with
all_without = args.all_without
flags = args.flags
not_flags = args.not_flags
files = args.files
not_files = args.not_files
copy = args.copy
assignments = args.assignments
|
|
# -*- coding: utf-8 -*-
import unittest
import copy
from music21 import key
from music21 import metadata
from music21 import interval
from music21 import pitch
from music21 import chord
from music21 import stream
from music21 import harmony
from music21 import scale
from music21.alpha.theoryAnalysis import theoryAnalyzer
import random
#---------------
def reduction(sc):
reductionStream = sc.chordify()
for c in reductionStream.flat.getElementsByClass('Chord'):
c.closedPosition(forceOctave=4, inPlace=True)
c.removeRedundantPitches(inPlace=True)
c.annotateIntervals()
return reductionStream
#---------------
def generateIntervals(numIntervals,kind = None, octaveSpacing = None):
if kind in ['anyChords','majorChords','diatonicTriads','diatonicTriadInversions']:
return generateChords(numIntervals,kind)
sc = stream.Stream()
for i in range(numIntervals):
loPs = pitch.Pitch("C3").ps
hiPs = pitch.Pitch("C#5").ps
startPs = random.randrange(loPs,hiPs)
startPitch = pitch.Pitch(ps=startPs)
numHalfSteps = random.randrange(-19,20)
intv = interval.ChromaticInterval(numHalfSteps)
if kind == 'consonant':
invType = random.choice(['m3','M3','P5','m6','M6','P8'])
intv = interval.Interval(invType)
elif kind == 'noAugDim':
invType = random.choice(['m2','M2','m3','M3','P4','P5','m6','M6','m7','M7'])
intv = interval.Interval(invType)
elif kind == 'dissonant':
invType = random.choice(['m2','M2','m3','M3','P4','P5','m6','M6','m7','M7'])
intv = interval.Interval(invType)
endPitch = intv.transposePitch(startPitch)
if kind == 'diatonic':
startPitch = pitch.Pitch(random.choice('abcdefg'))
endPitch = pitch.Pitch(random.choice('abcdefg'))
if octaveSpacing is not None:
startPitch.octave = 4
endPitch.octave = 4 - octaveSpacing
c = chord.Chord([startPitch,endPitch])
c.volume.velocity = 127
c.quarterLength=2
sc.append(c)
c = chord.Chord(['C','G'])
c.quarterLength=2
sc.append(c)
return sc
traidInversions = [[1,3,5],[1,3,6],[1,4,6]]
def generateChords(numChords,kind=''):
'''
Randomly generate a score of chords for use with the perceived dissonances
app. These chords may be dissonant or consonant. if kind = 'diatonicTriads',
only diatonic triads will be generated
>>> sc = alpha.webapps.commands.generateChords(4,'diatonicTriads')
>>> a = alpha.webapps.commands.runPerceivedDissonanceAnalysis(sc,[1.2,3.2,5.2])
>>> chords = a['fullScore']['stream'].flat.getElementsByClass('Chord')
>>> chords[0].color != None
True
>>> chords[1].color != None
True
>>> chords[2].color != None
True
>>> chords[3].color in [None, '#cc3300']
True
>>> sc2 = alpha.webapps.commands.generateChords(4)
>>> a = alpha.webapps.commands.runPerceivedDissonanceAnalysis(sc2,[1.2,3.2])
>>> chords = a['fullScore']['stream'].flat.getElementsByClass('Chord')
>>> chords[0].color != None
True
>>> chords[1].color != None
True
>>> chords[2].color in [None, '#cc3300']
True
>>> chords[3].color in [None, '#cc3300']
True
'''
sc = stream.Score()
p = stream.Part()
scl = scale.MajorScale('C')
#possibleChordTypes = [l[0] for l in harmony.CHORD_TYPES.values()]
possibleChordTypes =['53','63','64']
if kind == 'diatonicTriads':
for i in range(numChords):
startDegree = random.randrange(0,8)
inversion = random.randrange(0,3)
chordPitches = []
#testDegrees = [d+startDegree-1 for d in traidInversions[inversion] ]
chordPitches = [scl.pitchFromDegree(d+startDegree-1) for d in traidInversions[inversion] ]
chordType = possibleChordTypes[random.randrange(0,len(possibleChordTypes))]
c = chord.Chord(chordPitches)
c.quarterLength=2
p.append(c)
p.makeMeasures(inPlace=True)
sc.append(p)
return sc
else:
for i in range(numChords):
loPs = pitch.Pitch("C4").ps
hiPs = pitch.Pitch("C#5").ps
startPs = random.randrange(loPs,hiPs)
startPitch = pitch.Pitch(ps=startPs)
startPitchName = startPitch.name
chordType = possibleChordTypes[random.randrange(0,len(possibleChordTypes))]
c = harmony.ChordSymbol(startPitchName+','+chordType)
c.writeAsChord = True
c.quarterLength=2
c.volume.velocity = 127
p.append(c)
p.makeMeasures(inPlace=True)
sc.append(p)
return sc
def runPerceivedDissonanceAnalysis(scoreIn, offsetList, keyStr=None):
'''
Perceived Dissonances: Demo app for NEMCOG meeting, April 28 2012
webapp for determining the accuracy of aural identification of dissonances
the user listens to a piece of music and clicks when they think they hear a dissonance. this
information is then passed to this method, which compares the score to the list of offsets corresponding
to when the user clicked. Music21 then identifies the dissonant vertical slices, and outputs results as a
dictionary including the score, colored by vertical slices of interest as below:
Green: both music21 and the user identified as dissonant
Blue: only the user identified as dissonant
Red: only music21 identified as dissonant
This example runs two analysis, the first is a comparison with the unmodified score and user's offsets, the second
with the passing tones and neighbor tones of the score removed. Results are returned as nested dictionaries of the
following form:
{fullScore , nonharmonicTonesRemovedScore}
each of which is a dictionary containing these keys:
{'stream', 'numUserIdentified', 'numMusic21Identified', 'numBothIdentified', 'accuracy', 'romans', 'key'}
>>> piece = corpus.parse('bwv7.7').measures(0,3)
>>> offsetList = [
... 1.19166,
... 2.364166,
... 3.604166,
... 4.58083,
... 6.13166,
... 8.804166,
... 10.14833,
... 11.700833,
... ]
>>> analysisDict = alpha.webapps.commands.runPerceivedDissonanceAnalysis(piece, offsetList)
>>> a = analysisDict['fullScore']
>>> a['numMusic21Identified']
7
>>> a['numBothIdentified']
3
>>> a['numUserIdentified']
8
>>> a['romans']
['v43', 'iio65', 'bVIIb73']
>>> b = analysisDict['nonharmonicTonesRemovedScore']
>>> b['numMusic21Identified']
5
>>> b['numBothIdentified']
2
>>> b['accuracy']
40.0
Return dictionary.
'''
withoutNonharmonictonesScore = copy.deepcopy(scoreIn)
theoryAnalyzer.removePassingTones(withoutNonharmonictonesScore)
theoryAnalyzer.removeNeighborTones(withoutNonharmonictonesScore)
withoutNonharmonictonesScore.sliceByGreatestDivisor(addTies=True, inPlace=True)
withoutNonharmonictonesScore.stripTies(inPlace=True, matchByPitch=True, retainContainers=False)
dissonanceAnalysisDict = {'fullScore': determineDissonantIdentificationAccuracy(scoreIn, offsetList,keyStr), \
'nonharmonicTonesRemovedScore':determineDissonantIdentificationAccuracy(withoutNonharmonictonesScore, offsetList,keyStr)}
return dissonanceAnalysisDict
def _withinRange(dataList, lowLim, upperLim):
'''helper function: returns true if there exists a number in dataList
for which the inequality lowLim <= number < upperLim
>>> alpha.webapps.commands._withinRange([1,5.5,8], 2,3)
False
>>> alpha.webapps.commands._withinRange([1,5.5,8], 4,6)
True
'''
dataList.sort()
for index, offset in enumerate(dataList):
if lowLim <= offset and offset < upperLim:
return True
return False
def determineDissonantIdentificationAccuracy(scoreIn, offsetList, keyStr=None):
'''
runs comparison on score to identify dissonances, then compares to the user's offsetList of identified
dissonances. The score is colored according to the results, and appropriate information is returned
as a dictionary. See runPerceivedDissonanceAnalysis for full details and an example.
*Color key*
* Green: the user also recognizes this as a dissonant vertical slice GREEN
* Red: the user did not recognize as a dissonant vertical slice RED
* Blue: the user recognized it as a dissonant vertical slice BLUE
>>> s = stream.Score()
>>> p = stream.Part()
>>> c1 = chord.Chord(['C3','E3','G3'])
>>> c1.isConsonant()
True
>>> p.append(c1)
>>> c2 = chord.Chord(['C3','B3','D#'])
>>> c2.isConsonant()
False
>>> p.append(c2)
>>> c3 = chord.Chord(['D3','F#3','A'])
>>> c3.isConsonant()
True
>>> p.append(c3)
>>> c4 = chord.Chord(['B-4','F#4','A-3'])
>>> c4.isConsonant()
False
>>> p.append(c4)
>>> p.makeMeasures(inPlace=True)
>>> s.append(p)
>>> aData = alpha.webapps.commands.determineDissonantIdentificationAccuracy(s, [2.3, 3.2])
>>> chords = aData['stream'].flat.getElementsByClass('Chord')
>>> chords[0].color == None #BLACK (by default)
True
>>> chords[1].color #RED
'#cc3300'
>>> chords[2].color #BLUE
'#0033cc'
>>> chords[3].color #GREEN
'#00cc33'
'''
from music21 import roman
score = scoreIn.sliceByGreatestDivisor(addTies=True)
vsList = theoryAnalyzer.getVerticalities(score)
user = len(offsetList)
music21VS = 0
both = 0
romanFigureList = []
if keyStr == None:
pieceKey = scoreIn.analyze('key')
else:
pieceKey = key.Key(keyStr)
for (vsNum, vs) in enumerate(vsList):
currentVSOffset = vs.offset(leftAlign=False)
if vsNum + 1 == len(vsList):
nextVSOffset = scoreIn.highestTime
else:
nextVSOffset = vsList[vsNum+1].offset(leftAlign=False)
if not vs.isConsonant(): #music21 recognizes this as a dissonant vertical slice
music21VS+=1
if _withinRange(offsetList, currentVSOffset, nextVSOffset):
vs.color = '#00cc33' # the user also recognizes this as a dissonant vertical slice GREEN
both+=1
c = vs.getChord()
romanFigureList.append(roman.romanNumeralFromChord(c, pieceKey).figure)
else:
vs.color = '#cc3300' #the user did not recognize as a dissonant vertical slice RED
else: #music21 did not recognize this as a dissonant vertical slice
if _withinRange(offsetList, currentVSOffset, nextVSOffset):
vs.color = '#0033cc' #the user recognized it as a dissonant vertical slice BLUE
score.insert(metadata.Metadata())
score.metadata.composer = scoreIn.metadata.composer
score.metadata.movementName = scoreIn.metadata.movementName
analysisData = {'stream': score, 'numUserIdentified': user, 'numMusic21Identified':music21VS, 'numBothIdentified':both, 'accuracy': both*100.0/music21VS if music21VS!= 0 else 100, 'romans': romanFigureList, 'key': pieceKey}
return analysisData
## Shortcuts - temporary procedures used for re-implementation of hackday demo. Will be moved
## to new home or removed when commandList can accommodate more complex structures (arrays, for loops...)
def createMensuralCanon(sc):
'''
Implements music21 example of creating a mensural canon
'''
melody = sc.parts[0].flat.notesAndRests
canonStream = stream.Score()
for scalar, t in [(1, 'p1'), (2, 'p-5'), (.5, 'p-11'), (1.5, -24)]:
part = melody.augmentOrDiminish(scalar)
part.transpose(t, inPlace=True)
canonStream.insert(0, part)
return canonStream
def correctChordSymbols(worksheet, studentResponse):
'''Written for hackday demo: accepts as parameters a stream with chord symbols (the worksheet)
and the student's attempt to write out the pitches for each chord symbol of the worksheet.
The student's work is returned with annotations, and the percentage correct is also returned
>>> worksheet = stream.Stream()
>>> worksheet.append(harmony.ChordSymbol('C'))
>>> worksheet.append(harmony.ChordSymbol('G7'))
>>> worksheet.append(harmony.ChordSymbol('B-'))
>>> worksheet.append(harmony.ChordSymbol('D7/A'))
>>> studentResponse = stream.Stream()
>>> studentResponse.append(clef.TrebleClef())
>>> studentResponse.append(chord.Chord(['C','E','G']))
>>> studentResponse.append(chord.Chord(['G', 'B', 'D5', 'F5']))
>>> studentResponse.append(chord.Chord(['B-', 'C']))
>>> studentResponse.append(chord.Chord(['D4', 'F#4', 'A4', 'C5']))
>>> newScore, percentCorrect = alpha.webapps.commands.correctChordSymbols(
... worksheet, studentResponse)
>>> for x in newScore.notes:
... x.lyric
':)'
':)'
'PITCHES'
'INVERSION'
>>> percentCorrect
50.0
Return object.
'''
numCorrect = 0
chords1 = worksheet.flat.getElementsByClass(harmony.ChordSymbol)
totalNumChords = len(chords1)
chords2 = studentResponse.flat.notes
isCorrect = False
for chord1, chord2 in zip(chords1, chords2):
if chord1 not in studentResponse:
studentResponse.insertAndShift(chord2.offset, chord1)
if not('Chord' in chord2.classes):
chord2.lyric = "NOT A CHORD"
continue
newPitches = []
for x in chord2.pitches:
newPitches.append(str(x.name))
for pitch in chord1:
if pitch.name in newPitches:
isCorrect = True
else:
isCorrect = False
break
if isCorrect == True:
newPitches1 = []
for y in chord1.pitches:
newPitches1.append(str(y.name))
p = chord1.sortDiatonicAscending()
o = chord2.sortDiatonicAscending()
a = []
b = []
for d in p.pitches:
a.append(str(d.name))
for k in o.pitches:
b.append(str(k.name))
if a != b:
chord2.lyric = "INVERSION"
else:
numCorrect = numCorrect + 1
chord2.lyric = ":)"
if isCorrect == False:
chord2.lyric = "PITCHES"
percentCorrect = numCorrect*1.0/totalNumChords * 100
return (studentResponse, percentCorrect) #student's corrected score
def checkLeadSheetPitches(worksheet, returnType=''):
'''
checker routine for hack day demo lead sheet chord symbols exercise. Accepts
a stream with both the chord symbols and student's chords, and returns the corrected
stream. if returnType=answerkey, the score is returned with the leadsheet pitches realized
>>> worksheet = stream.Stream()
>>> worksheet.append(harmony.ChordSymbol('C'))
>>> worksheet.append(harmony.ChordSymbol('G7'))
>>> worksheet.append(harmony.ChordSymbol('B'))
>>> worksheet.append(harmony.ChordSymbol('D7/A'))
>>> answerKey = alpha.webapps.commands.checkLeadSheetPitches( worksheet, returnType = 'answerkey' )
>>> for x in answerKey.notes:
... [str(p) for p in x.pitches]
['C3', 'E3', 'G3']
['G2', 'B2', 'D3', 'F3']
['B2', 'D#3', 'F#3']
['A2', 'C3', 'D3', 'F#3']
'''
#nicePiece = sc
#incorrectPiece = sc
#incorrectPiece = messageconverter.parse('C:\Users\sample.xml')
#sopranoLine = nicePiece.getElementsByClass(stream.Part)[0]
#chordLine = nicePiece.getElementsByClass(stream.Part)[1]
#chordLine.show('text')
#bassLine = nicePiece.part(2)
studentsAnswers = worksheet.flat.getElementsByClass(chord.Chord)
answerKey = worksheet.flat.getElementsByClass(harmony.ChordSymbol)
correctedAssignment, unused_numCorrect = correctChordSymbols(answerKey, studentsAnswers)
if returnType == 'answerkey':
for chordSymbol in answerKey:
chordSymbol.writeAsChord = True
#message = 'answer key displayed'
return answerKey
else:
#message = 'you got '+str(numCorrect)+' percent correct'
return correctedAssignment
def colorAllNotes(sc, color):
'''
Iterate through all notes and change their color to the given color -
used for testing color rendering in noteflight
'''
for n in sc.flat.getElementsByClass('Note'):
n.color = color
return sc
def colorAllChords(sc, color):
'''
Iterate through all chords and change their color to the given color -
used for testing color rendering in noteflight
'''
for c in sc.flat.getElementsByClass('Chord'):
c.color = color
return sc
def writeMIDIFileToServer(sc):
'''
Iterate through all notes and change their color to the given color -
used for testing color rendering in noteflight
'''
# For now, the document root is hard coded, future implementations could
# try to use environment variables
#documentRoot = environ['DOCUMENT_ROOT']
#documentRoot = '/Library/WebServer/Documents'
documentRoot = '/Library/Server/Web/Data/Sites/Default'
urlPath = "/music21/OutputFiles/cognitionEx.mid"
writePath = documentRoot + urlPath
sc.write('mid',writePath)
return urlPath
#------------------------------------------------------------------------
# Tests
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
|
|
#############################################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
import sys
import random
import shutil
import numpy
from numpy import sign, where, array, ones
import parse
import utils
from poim import compute_poims
import shogun
from shogun.Kernel import GaussianKernel, WeightedDegreePositionStringKernel
from shogun.Kernel import WeightedDegreeStringKernel
from shogun.Kernel import LinearKernel, PolyKernel, LocalAlignmentStringKernel
from shogun.Kernel import LocalityImprovedStringKernel
from shogun.Kernel import CommWordStringKernel, WeightedCommWordStringKernel, CommUlongStringKernel
from shogun.Kernel import CombinedKernel
from shogun.Kernel import SLOWBUTMEMEFFICIENT
from shogun.Kernel import AvgDiagKernelNormalizer
from shogun.Features import RealFeatures, BinaryLabels, StringCharFeatures, DNA, StringWordFeatures, StringUlongFeatures, PROTEIN
from shogun.Features import CombinedFeatures
from shogun.Classifier import LibSVM,GPBTSVM
DefaultSVM = LibSVM
try:
from shogun.Classifier import SVMLight
LinAddSVM = SVMLight
LinearSVM = SVMLight
except:
LinAddSVM = GPBTSVM
LinearSVM = LibSVM
from shogun.Preprocessor import SortWordString, SortUlongString
from utils import calcprc, calcroc, accuracy
from utils import getPartitionedSet, getCurrentSplit
import plots
import re
from poim import reshape_normalize_contribs, compute_weight_mass
################################################################################
def non_atcg_convert(seq, nuc_con):
""" Converts Non ATCG characters from DNA sequence """
if nuc_con == '':sys.stderr.write("usage: Provide a choice for non ACGT nucleotide conversion [T|A|C|G|R|Y|N] at last\n");sys.exit(-1)
if re.match(r'[^ATCGRYN]', nuc_con):sys.stderr.write("usage: Conversion nucleotide choice -"+ nuc_con +"- failed. pick one from [T|A|C|G|R|Y|N]\n");sys.exit(-1)
nuc_con = nuc_con.upper()
mod_seq = []
for i in range(len(seq)):
if re.search(r'[^ACTG]', seq[i], re.IGNORECASE):
if nuc_con == 'A' or nuc_con == 'T' or nuc_con == 'C' or nuc_con == 'G':
seq[i] = re.sub(r'[^ATCG|actg]', nuc_con, seq[i])
seq[i] = seq[i].upper()
mod_seq.append(seq[i])
continue
if nuc_con == 'N':(nucleotide, line) = ('ATCG', '')
if nuc_con == 'R':(nucleotide, line) = ('AG', '')
if nuc_con == 'Y':(nucleotide, line) = ('TC', '')
for single_nuc in seq[i]:
if re.match(r'[^ACGT]', single_nuc, re.IGNORECASE):
line += random.choice(nucleotide)
else:
line += single_nuc.upper()
mod_seq.append(line)
else:
seq[i] = seq[i].upper()
mod_seq.append(seq[i])
return mod_seq
def non_aminoacid_converter(seq, amino_con):
""" Converts Non amino acid characters from protein sequence """
if amino_con == '':sys.stderr.write("usage: Provide a choice for replacing non amino acid characters\n");sys.exit(-1)
flag = 0
if len(amino_con)>1:
if amino_con != 'random':flag = 1
else:
if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', amino_con, re.IGNORECASE):flag = 1
if flag == 1:sys.stderr.write("usage: Replace aminoacid chioce -"+ amino_con +"- failed. Pick a valid aminoacid single letter code/random\n");sys.exit(-1)
amino_con = amino_con.upper()
opt_seq = []
for i in range(len(seq)):
if re.search(r'[^GPAVLIMCFYWHKRQNEDST]', seq[i], re.IGNORECASE):
if amino_con == 'RANDOM':
aminoacid = 'GPAVLIMCFYWHKRQNEDST'
line = ''
for single_amino in seq[i]:
if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', single_amino, re.IGNORECASE):
r_amino = random.choice(aminoacid)
line += r_amino
else:
single_amino = single_amino.upper()
line += single_amino
opt_seq.append(line)
else:
seq[i] = re.sub(r'[^GPAVLIMCFYWHKRQNEDST|gpavlimcfywhkrqnedst]', amino_con, seq[i])
seq[i] = seq[i].upper()
opt_seq.append(seq[i])
else:
seq[i] = seq[i].upper()
opt_seq.append(seq[i])
return opt_seq
# helper functions
def create_features(kname, examples, kparam, train_mode, preproc, seq_source, nuc_con):
"""Converts numpy arrays or sequences into shogun features"""
if kname == 'gauss' or kname == 'linear' or kname == 'poly':
examples = numpy.array(examples)
feats = RealFeatures(examples)
elif kname == 'wd' or kname == 'localalign' or kname == 'localimprove':
if seq_source == 'dna':
examples = non_atcg_convert(examples, nuc_con)
feats = StringCharFeatures(examples, DNA)
elif seq_source == 'protein':
examples = non_aminoacid_converter(examples, nuc_con)
feats = StringCharFeatures(examples, PROTEIN)
else:
sys.stderr.write("Sequence source -"+seq_source+"- is invalid. select [dna|protein]\n")
sys.exit(-1)
elif kname == 'spec' or kname == 'cumspec':
if seq_source == 'dna':
examples = non_atcg_convert(examples, nuc_con)
feats = StringCharFeatures(examples, DNA)
elif seq_source == 'protein':
examples = non_aminoacid_converter(examples, nuc_con)
feats = StringCharFeatures(examples, PROTEIN)
else:
sys.stderr.write("Sequence source -"+seq_source+"- is invalid. select [dna|protein]\n")
sys.exit(-1)
wf = StringUlongFeatures( feats.get_alphabet() )
wf.obtain_from_char(feats, kparam['degree']-1, kparam['degree'], 0, kname=='cumspec')
del feats
if train_mode:
preproc = SortUlongString()
preproc.init(wf)
wf.add_preprocessor(preproc)
ret = wf.apply_preprocessor()
#assert(ret)
feats = wf
elif kname == 'spec2' or kname == 'cumspec2':
# spectrum kernel on two sequences
feats = {}
feats['combined'] = CombinedFeatures()
reversed = kname=='cumspec2'
(ex0,ex1) = zip(*examples)
f0 = StringCharFeatures(list(ex0), DNA)
wf = StringWordFeatures(f0.get_alphabet())
wf.obtain_from_char(f0, kparam['degree']-1, kparam['degree'], 0, reversed)
del f0
if train_mode:
preproc = SortWordString()
preproc.init(wf)
wf.add_preprocessor(preproc)
ret = wf.apply_preprocessor()
assert(ret)
feats['combined'].append_feature_obj(wf)
feats['f0'] = wf
f1 = StringCharFeatures(list(ex1), DNA)
wf = StringWordFeatures( f1.get_alphabet() )
wf.obtain_from_char(f1, kparam['degree']-1, kparam['degree'], 0, reversed)
del f1
if train_mode:
preproc = SortWordString()
preproc.init(wf)
wf.add_preprocessor(preproc)
ret = wf.apply_preprocessor()
assert(ret)
feats['combined'].append_feature_obj(wf)
feats['f1'] = wf
else:
print 'Unknown kernel %s' % kname
return (feats,preproc)
def create_kernel(kname,kparam,feats_train):
"""Call the corresponding constructor for the kernel"""
if kname == 'gauss':
kernel = GaussianKernel(feats_train, feats_train, kparam['width'])
elif kname == 'linear':
kernel = LinearKernel(feats_train, feats_train)
kernel.set_normalizer(AvgDiagKernelNormalizer(kparam['scale']))
elif kname == 'poly':
kernel = PolyKernel(feats_train, feats_train, kparam['degree'], kparam['inhomogene'], kparam['normal'])
elif kname == 'wd':
kernel=WeightedDegreePositionStringKernel(feats_train, feats_train, kparam['degree'])
kernel.set_normalizer(AvgDiagKernelNormalizer(float(kparam['seqlength'])))
kernel.set_shifts(kparam['shift']*numpy.ones(kparam['seqlength'],dtype=numpy.int32))
#kernel=WeightedDegreeStringKernel(feats_train, feats_train, kparam['degree'])
elif kname == 'spec':
kernel = CommUlongStringKernel(feats_train, feats_train)
elif kname == 'cumspec':
kernel = WeightedCommWordStringKernel(feats_train, feats_train)
kernel.set_weights(numpy.ones(kparam['degree']))
elif kname == 'spec2':
kernel = CombinedKernel()
k0 = CommWordStringKernel(feats_train['f0'], feats_train['f0'])
k0.io.disable_progress()
kernel.append_kernel(k0)
k1 = CommWordStringKernel(feats_train['f1'], feats_train['f1'])
k1.io.disable_progress()
kernel.append_kernel(k1)
elif kname == 'cumspec2':
kernel = CombinedKernel()
k0 = WeightedCommWordStringKernel(feats_train['f0'], feats_train['f0'])
k0.set_weights(numpy.ones(kparam['degree']))
k0.io.disable_progress()
kernel.append_kernel(k0)
k1 = WeightedCommWordStringKernel(feats_train['f1'], feats_train['f1'])
k1.set_weights(numpy.ones(kparam['degree']))
k1.io.disable_progress()
kernel.append_kernel(k1)
elif kname == 'localalign':
kernel = LocalAlignmentStringKernel(feats_train, feats_train)
elif kname == 'localimprove':
kernel = LocalityImprovedStringKernel(feats_train, feats_train, kparam['length'],\
kparam['indeg'], kparam['outdeg'])
else:
print 'Unknown kernel %s' % kname
kernel.set_cache_size(32)
return kernel
def create_combined_kernel(kname, kparam, examples, train_mode, preproc):
"""A wrapper for creating combined kernels.
kname, kparam and examples are lists.
"""
num_kernels = len(kname)
feats['combined'] = CombinedFeatures()
kernel = CombinedKernel()
for kix in xrange(num_kernels):
cur_kname = '%s%d' % (kname[kix],kix)
(cur_feats, cur_preproc) = create_features(kname[kix], examples[kix], kparam[kix], train_mode, preproc)
feats[cur_kname] = cur_feats
cur_kernel = create_kernel(kname[kix], kparam[kix], cur_feats)
kernel.append_kernel(cur_kernel)
return (feats,kernel)
def model2str(kparam,C,kp,shownames=True):
"""Generates a string describing the model parameters"""
if kparam["modelsel_name"]==None or len(kparam["modelsel_params"])==1:
if shownames:
str="\tC=%1.1f" % C
else:
str="\t%1.2f" % C
else:
if type(kp)==type(int(0)):
if shownames:
str="\tC=%1.1f\t%s=%i" %(C, kparam["modelsel_name"], kp)
else:
str="\t%1.1f\t%i" %(C, kp)
else:
if shownames:
str="\tC=%1.1f\t%s=%1.2f" %(C, kparam["modelsel_name"], kp)
else:
str="\t%1.1f\t%1.2f" %(C, kp)
return str
def train(trainex,trainlab,C,kname,kparam,seq_source,nuc_con):
"""Trains a SVM with the given kernel"""
(feats_train, preproc) = create_features(kname,trainex, kparam, True, None, seq_source, nuc_con)
if kname == 'wd':
kparam['seqlength'] = len(trainex[0])
kernel = create_kernel(kname,kparam,feats_train)
if kname == 'spec2' or kname == 'cumspec2':
kernel.init(feats_train['combined'], feats_train['combined'])
else:
kernel.init(feats_train, feats_train)
kernel.io.disable_progress()
kernel.set_optimization_type(SLOWBUTMEMEFFICIENT)
labels = BinaryLabels(numpy.array(trainlab,numpy.double))
# libsvm is fine for most kernels
if kname in ('wd', 'spec', 'cumspec', 'spec2', 'cumspec2'):
# for the string kernels there exist specific optimizations that are only effective when using
# a LinAdd SVM implementation (e.g. SVM-light or GPBT-SVM)
SVMClass = LinAddSVM
elif kname == 'linear':
SVMClass = LinearSVM
else:
SVMClass=DefaultSVM
svm = SVMClass(C, kernel, labels)
svm.io.disable_progress()
svm.set_batch_computation_enabled(True)
svm.set_linadd_enabled(True)
svm.set_epsilon(1e-5)
svm.parallel.set_num_threads(svm.parallel.get_num_cpus())
svm.train()
return (svm, kernel, feats_train, preproc)
def train_and_test(trainex,trainlab,testex,C,kname,kparam, seq_source, nuc_con):
"""Trains a SVM with the given kernel, and predict on the test examples"""
(svm, kernel, feats_train, preproc) = train(trainex,trainlab,C,kname,kparam,seq_source,nuc_con)
(feats_test, preproc) = create_features(kname, testex, kparam, False, preproc, seq_source, nuc_con)
if kname == 'spec2' or kname == 'cumspec2':
for feats in feats_train.values():
feats.io.disable_progress()
for feats in feats_test.values():
feats.io.disable_progress()
kernel.init(feats_train['combined'], feats_test['combined'])
else:
feats_train.io.disable_progress()
feats_test.io.disable_progress()
kernel.init(feats_train, feats_test)
kernel.set_optimization_type(SLOWBUTMEMEFFICIENT)
output = svm.apply().get_labels()
return output
def crossvalidation(cv, kname, kparam, C, all_examples, all_labels, seq_source, nuc_con):
"""Perform cross validation using an SVM
cv -- the number of folds
kernel -- the kernel used
data -- the dataset, assumed to be compatible to kernel, label is in the first column
"""
print 'Using %i-fold crossvalidation' % cv
partitions = getPartitionedSet(len(all_labels), cv)
error = []
sum_accuracy = 0.0
sum_roc = 0.0
all_outputs=[0.0] * len(all_labels)
all_split=[-1] * len(all_labels)
for repetition in xrange(cv):
XT, LT, XTE, LTE = getCurrentSplit(repetition, partitions, all_labels, all_examples)
numpos = len(where(array(LTE)>0)[0])
svmout = train_and_test(XT, LT, XTE, C, kname, kparam, seq_source, nuc_con)
for i in xrange(len(svmout)):
all_outputs[partitions[repetition][i]] = svmout[i]
all_split[partitions[repetition][i]] = repetition ;
return (all_outputs, all_split)
def evaluate(predictions, splitassignments, labels, roc_fname=None, prc_fname=None):
"""Evaluate prediction results
"""
res_str = ""
cv = 1
if splitassignments!=None:
for split in splitassignments:
if split+1>cv:
cv=int(split+1)
if cv>1:
res_str = "Evaluating on %i examples in %i splits\n" % (len(labels),cv)
else:
res_str = "Evaluating on %i examples\n" % len(labels)
output_splits = cv* [[]]
label_splits = cv* [[]]
for i in xrange(cv):
label_splits[i]=[]
output_splits[i]=[]
for i in xrange(0,len(labels)):
if cv>1:
split=int(splitassignments[i])
else:
split=0
output_splits[split].append(predictions[i])
label_splits[split].append(labels[i])
error = []
sum_accuracy = 0.0
sum_roc = 0.0
sum_prc = 0.0
for split in xrange(cv):
res_str += 'Split %d\n' % (split+1)
LTE = label_splits[split] ;
svmout = output_splits[split]
numpos=0
for l in LTE:
if l==1:
numpos+=1
istwoclass = numpos>0 and numpos<len(LTE)
res_str += ' number of positive examples = %i\n' % numpos
res_str += ' number of negative examples = %i\n' % (len(LTE)-numpos)
if istwoclass:
auROC = calcroc(svmout,LTE)
res_str += ' Area under ROC curve = %2.1f %%\n' % (100.0*auROC)
sum_roc += auROC
if roc_fname!=None:
if split!=cv-1:
plots.plotroc(svmout, LTE, split==cv-1, None, "ROC curve of SVM, split %i" % (split+1))
else:
plots.plotroc(svmout, LTE, split==cv-1, roc_fname, "ROC curve of SVM, split %i" % (split+1))
auPRC = calcprc(svmout,LTE)
res_str += ' Area under PRC curve = %2.1f %%\n' % (100.0*auPRC)
sum_prc += auPRC
if prc_fname!=None:
if split!=cv-1:
plots.plotprc(svmout, LTE, None, "PRC curve of SVM, split %i" % (split+1))
else:
plots.plotprc(svmout, LTE, prc_fname, "PRC curve of SVM, split %i" % (split+1))
acc = accuracy(svmout, LTE)
res_str += ' accuracy (at threshold 0) = %2.1f %% \n' % (100.0*acc)
sum_accuracy += acc
numpos=0
for l in labels:
if l==1:
numpos+=1
mean_roc = sum_roc/cv
mean_prc = sum_prc/cv
mean_acc = sum_accuracy/cv
res_str += 'Averages\n'
res_str += ' number of positive examples = %i\n' % round(numpos/cv)
res_str += ' number of negative examples = %i\n' % round((len(labels)-numpos)/cv)
res_str += ' Area under ROC curve = %2.1f %%\n' % (100.0*mean_roc)
res_str += ' Area under PRC curve = %2.1f %%\n' % (100.0*mean_prc)
res_str += ' accuracy (at threshold 0) = %2.1f %% \n' % (100.0*mean_acc)
return (res_str,mean_roc,mean_prc,mean_acc)
def svm_cv(argv):
"""A top level script to parse input parameters and run cross validation"""
assert(argv[1]=='cv')
if len(argv)<5:sys.stderr.write("usage: %s cv repeat C kernelname [kernelparameters] [arff|fasta] inputfiles outputfile [dna|protein] non(nucleotide|amino)converter \n" % argv[0]);sys.exit(-1)
# parse input parameters
cv = int(argv[2])
C = float(argv[3])
(kernelname,kparam,argv_rest) = parse.parse_kernel_param(argv[4:],False)
(examples,labels,argv_rest) = parse.parse_input_file_train(kernelname, argv_rest)
(seq_source, nuc_con) = ('', '')
if kernelname == 'spec' or kernelname == 'wd':
if len(argv_rest)<1:sys.stderr.write("outputfile [dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<2:sys.stderr.write("[dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<3:
if argv_rest[-1] == 'dna':
sys.stderr.write("non-nucleotide converter like [A|T|C|G|R|Y|N] is missing. Cannot continue.\n")
sys.exit(-1)
elif argv_rest[-1] == 'protein':
sys.stderr.write("non-amino acid converter like [G|P|A|V|L|I|M|C|F|Y|W|H|K|R|Q|N|E|D|S|T|random] is missing. Cannot continue.\n")
sys.exit(-1)
else:
sys.stderr.write("Here expect FASTA sequence type as [dna|protein] instead of -"+ argv_rest[-1] +"- Cannot continue.\n")
sys.exit(-1)
if len(argv_rest)>3:sys.stderr.write("Too many arguments\n");sys.exit(-1)
seq_source = argv_rest[1]
nuc_con = argv_rest[2]
if kernelname == 'linear' or kernelname == 'gauss' or kernelname == 'poly':
if len(argv_rest)<1:sys.stderr.write("outputfile misssing\n");sys.exit(-1)
if len(argv_rest)>1:sys.stderr.write("Too many arguments\n");sys.exit(-1)
outfilename = argv_rest[0]
utils.check_params(kparam, C, len(examples[0]))
# run cross-validation
(all_outputs, all_split) = crossvalidation(cv, kernelname, kparam, C, examples, labels, seq_source, nuc_con)
try:
f = open(outfilename, 'w+')
except:
sys.stderr.write('Fails to open the outputfile at ' + outfilename + ' Cannot continue.\n')
sys.exit(-1)
res_str = '#example\toutput\tsplit\n'
f.write(res_str)
for ix in xrange(len(all_outputs)):
res_str = '%d\t%2.7f\t%d\n' % (ix,all_outputs[ix],all_split[ix])
f.write(res_str)
f.close()
def svm_modelsel(argv):
"""A top level script to parse input parameters and run model selection"""
assert(argv[1]=='modelsel')
if len(argv)<5:sys.stderr.write("usage: %s modelsel repeat Cs kernelname [kernelparameters] [arff|fasta] inputfiles outputfile [dna|protein] non(nucleotide|amino)converter\n" % argv[0]);sys.exit(-1)
# parse input parameters
cv = int(argv[2])
Cs = parse.parse_float_list(argv[3])
(kernelname,kparam,argv_rest) = parse.parse_kernel_param(argv[4:], True)
(examples,labels,argv_rest) = parse.parse_input_file_train(kernelname, argv_rest)
(seq_source, nuc_con) = ('', '')
if kernelname == 'spec' or kernelname == 'wd':
if len(argv_rest)<1:sys.stderr.write("outputfile [dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<2:sys.stderr.write("[dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<3:
if argv_rest[-1] == 'dna':
sys.stderr.write("non-nucleotide converter like [A|T|C|G|R|Y|N] is missing. Cannot continue.\n")
sys.exit(-1)
elif argv_rest[-1] == 'protein':
sys.stderr.write("non-amino acid converter like [G|P|A|V|L|I|M|C|F|Y|W|H|K|R|Q|N|E|D|S|T|random] is missing. Cannot continue.\n")
sys.exit(-1)
else:
sys.stderr.write("Here expect FASTA sequence type as [dna|protein] instead of -"+ argv_rest[-1] +"- Cannot continue.\n")
sys.exit(-1)
if len(argv_rest)>3:sys.stderr.write("Too many arguments\n");sys.exit(-1)
seq_source = argv_rest[1]
nuc_con = argv_rest[2]
if kernelname == 'linear' or kernelname == 'gauss' or kernelname== 'poly':
if len(argv_rest)<1:sys.stderr.write("outputfile missing\n");sys.exit(-1)
if len(argv_rest)>1:sys.stderr.write("Too many arguments\n");sys.exit(-1)
outfilename = argv_rest[0]
# run cross-validation
mean_rocs=[] ;
mean_prcs=[] ;
mean_accs=[] ;
all_Cs = [] ;
all_kparam=[] ;
if kparam["modelsel_name"]==None:
for C in Cs:
utils.check_params(kparam, C, len(examples[0]))
(all_outputs, all_split) = crossvalidation(cv, kernelname, kparam, C, examples, labels, seq_source, nuc_con)
(res_str, mean_roc, mean_prc, mean_acc) = evaluate(all_outputs, all_split, labels)
mean_rocs.append(mean_roc)
mean_prcs.append(mean_prc)
mean_accs.append(mean_acc)
all_Cs.append(C)
all_kparam.append(None)
else: # also optimize one kernel parameter
for C in Cs:
for kp in kparam["modelsel_params"]:
kparam[kparam["modelsel_name"]] = kp
utils.check_params(kparam, C, len(examples[0]))
(all_outputs, all_split) = crossvalidation(cv, kernelname, kparam, C, examples, labels, seq_source, nuc_con)
(res_str, mean_roc, mean_prc, mean_acc) = evaluate(all_outputs, all_split, labels)
mean_rocs.append(mean_roc)
mean_prcs.append(mean_prc)
mean_accs.append(mean_acc)
all_Cs.append(C)
all_kparam.append(kp)
max_roc=numpy.max(numpy.array(mean_rocs))
max_prc=numpy.max(numpy.array(mean_prcs))
max_acc=numpy.max(numpy.array(mean_accs))
try:
f = open(outfilename, 'w+')
except:
sys.stderr.write('Fails to open the outputfile at ' + outfilename + ' Cannot continue.\n')
sys.exit(-1)
if kparam["modelsel_name"]==None or len(kparam["modelsel_params"])==1:
detail_str = "\tC\tROC\tPRC\tAccuracy (at threshold 0)\n"
else:
detail_str = "\tC\t%s\tROC\tPRC\tAccuracy (at threshold 0)\n" % kparam["modelsel_name"]
best_roc_str=''
best_prc_str=''
best_acc_str=''
for i in xrange(len(all_Cs)):
# determine the best parameter combinations
if mean_rocs[i]==max_roc:
rocsym='+'
best_roc_str+=model2str(kparam, all_Cs[i], all_kparam[i])+'\n'
else:
rocsym=' '
if mean_prcs[i]==max_prc:
prcsym='+'
best_prc_str+=model2str(kparam, all_Cs[i], all_kparam[i])+'\n'
else:
prcsym=' '
if mean_accs[i]==max_acc:
accsym='+'
best_acc_str+=model2str(kparam, all_Cs[i], all_kparam[i])+'\n'
else:
accsym=' '
detail_str+=model2str(kparam, all_Cs[i], all_kparam[i], False)+'\t'
if kparam["modelsel_name"]==None or len(kparam["modelsel_params"])==1:
detail_str += '%c%2.1f%%\t%c%2.1f%%\t%c%2.1f%%\n' % (rocsym, 100*mean_rocs[i], prcsym, 100*mean_prcs[i], accsym, 100*mean_accs[i])
else:
detail_str += '%c%2.1f%%\t%c%2.1f%%\t%c%2.1f%%\n' % (rocsym, 100*mean_rocs[i], prcsym, 100*mean_prcs[i], accsym, 100*mean_accs[i])
f.write('Best model(s) according to ROC measure:\n%s' % best_roc_str)
f.write('\nBest model(s) according to PRC measure:\n%s' % best_prc_str)
f.write('\nBest model(s) according to accuracy measure:\n%s' % best_acc_str)
f.write('\nDetailed results:\n')
f.write(detail_str)
f.close()
def svm_pred(argv):
"""A top level script to parse input parameters and train and predict"""
assert(argv[1]=='pred')
if len(argv)<6:sys.stderr.write("usage: %s pred C kernelname kernelparameters [arff|fasta] inputfiles outputfile [dna|protein] non(nucleotide|amino)converter\n" % argv[0]);sys.exit(-1)
# parse input parameters
C = float(argv[2])
(kernelname,kparam,argv_rest) = parse.parse_kernel_param(argv[3:],False)
(trainex, trainlab, testex, argv_rest) = parse.parse_input_file_train_test(kernelname, argv_rest)
(seq_source, nuc_con) = ('', '')
if kernelname == 'spec' or kernelname == 'wd' or kernelname == 'localimprove' or kernelname == 'localalign':
if len(argv_rest)<1:sys.stderr.write("outputfile [dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<2:sys.stderr.write("[dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<3:
if argv_rest[-1] == 'dna':
sys.stderr.write("non-nucleotide converter like [A|T|C|G|R|Y|N] is missing. Cannot continue.\n")
sys.exit(-1)
elif argv_rest[-1] == 'protein':
sys.stderr.write("non-amino acid converter like [G|P|A|V|L|I|M|C|F|Y|W|H|K|R|Q|N|E|D|S|T|random] is missing. Cannot continue.\n")
sys.exit(-1)
else:
sys.stderr.write("Here expect FASTA sequence type as [dna|protein] instead of -"+ argv_rest[-1] +"- Cannot continue.\n")
sys.exit(-1)
if len(argv_rest)>3:sys.stderr.write("Too many arguments\n");sys.exit(-1)
seq_source = argv_rest[1]
nuc_con = argv_rest[2]
if kernelname == 'linear' or kernelname== 'poly' or kernelname == 'gauss':
if len(argv_rest)<1:sys.stderr.write("outputfile missing\n");sys.exit(-1)
if len(argv_rest)>1:sys.stderr.write("Too many arguments\n");sys.exit(-1)
outfilename = argv_rest[0]
utils.check_params(kparam, C, len(trainex[0]))
# run training and testing
svmout = train_and_test(trainex, trainlab, testex, C, kernelname, kparam, seq_source, nuc_con)
# write output file
try:
f = open(outfilename,'w')
except:
sys.stderr.write('Fails to open the outputfile at ' + outfilename + ' Cannot continue.\n')
sys.exit(-1)
res_str = '#example\toutput\n'
f.write(res_str)
for ix in xrange(len(svmout)):
res_str = str(ix)+'\t'+str(svmout[ix])+'\n'
f.write(res_str)
f.close()
def svm_eval(argv):
"""A top level script to parse input parameters and evaluate"""
assert(argv[1]=='eval')
if len(argv)<6:sys.stderr.write("usage: %s eval predictionfile [arff|fasta] inputfiles outputfile [roc|prc figure.png]\n" % argv[0]);sys.exit(-1)
# parse input parameters
(predictions, splitassignments) = parse.parse_prediction_file(argv[2])
(trainex, trainlab, argv_rest) = parse.parse_input_file_train(None, argv[3:])
if len(argv_rest)<1:sys.stderr.write("Output file missing\n");sys.exit(-1)
if len(argv_rest)>3:sys.stderr.write("Too many arguments\n");sys.exit(-1)
outfilename = argv_rest[0]
roc_fname = None
prc_fname = None
if len(argv_rest)>2:
if argv_rest[1]=='roc':
roc_fname=argv_rest[2]
elif argv_rest[1]=='prc':
prc_fname=argv_rest[2]
else:
sys.stderr.write('Usage: [roc|prc]')
sys.exit(-1)
# run training and testing
(res_str,mean_roc,mean_prc,mean_acc) = evaluate(predictions, splitassignments, trainlab, roc_fname, prc_fname)
# write output file
try:
f = open(outfilename,'w')
except:
sys.stderr.write('Fails to open the outputfile at ' + outfilename + ' Cannot continue.\n')
sys.exit(-1)
f.write(res_str)
f.close()
def svm_poim(argv):
"""A top level script to parse input parameters and plot poims"""
assert(argv[1]=='poim')
if len(argv)<7:sys.stderr.write("usage: %s poim C poimdegree wd [kernelparameters] [arff|fasta] inputfiles poim.png [dna|protein] non(nucleotide|amino)converter\n" % argv[0]);sys.exit(-1)
# parse input parameters
C = float(argv[2])
poimdegree = int(argv[3])
(kernelname,kparam,argv_rest) = parse.parse_kernel_param(argv[4:], False)
(examples,labels,argv_rest) = parse.parse_input_file_train(kernelname, argv_rest)
if len(argv_rest)<1:sys.stderr.write("poim.png [dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<2:sys.stderr.write("[dna|protein] non(nucleotide|amino)converter are missing\n");sys.exit(-1)
if len(argv_rest)<3:
if argv_rest[-1] == 'dna':
sys.stderr.write("non-nucleotide converter like [A|T|C|G|R|Y|N] is missing. Cannot continue.\n")
sys.exit(-1)
elif argv_rest[-1] == 'protein':
sys.stderr.write("non-amino acid converter like [G|P|A|V|L|I|M|C|F|Y|W|H|K|R|Q|N|E|D|S|T|random] is missing. Cannot continue.\n")
sys.exit(-1)
else:
sys.stderr.write("Here expect FASTA sequence type as [dna|protein] instead of -"+ argv_rest[-1] +"- Cannot continue.\n")
sys.exit(-1)
if len(argv_rest)>3:sys.stderr.write("Too many arguments\n");sys.exit(-1)
poimfilename = argv_rest[0]
seq_source = argv_rest[1]
nuc_con = argv_rest[2]
utils.check_params(kparam, C, len(examples[0]))
# train svm and compute POIMs
(svm, kernel, feats_train, preproc) = train(examples,labels,C,kernelname,kparam,seq_source,nuc_con)
(poim, max_poim, diff_poim, poim_totalmass) = compute_poims(svm, kernel, poimdegree, len(examples[0]))
# plot poims
plots.plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, len(examples[0]))
|
|
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Context Validator Tests."""
import random
import unittest
import linear_algebra
import sympy
import context_validator
from context_validator import ValidationError
from parallel_accel.shared import schemas
def get_acyclic_graph(num_discretes, depth, num_params):
"""Returns a valid acyclic_graph for the given number of discretes."""
if depth * num_discretes < num_params:
raise Exception("Can only have as many parameters as building_blocks.")
acyclic_graph = linear_algebra.Graph()
discretes = linear_algebra.GridSpace.rect(1, num_discretes)
if num_params < 1:
for d in range(depth):
for q in discretes:
acyclic_graph += linear_algebra.flip_x_axis(q)
else:
params = [sympy.Symbol(f"s_{n}") for n in range(num_params)]
params *= (depth * num_discretes + num_params) // num_params
for d in range(depth):
for q, s in zip(
discretes, params[d * num_discretes : (d + 1) * num_discretes]
):
acyclic_graph += linear_algebra.flip_x_axis(q) ** s
return acyclic_graph
def get_operators(num_discretes, num_ops, num_terms):
"""Returns a valid list of operators for the given number of discretes."""
if 2 ** (num_discretes + 1) <= num_terms:
raise Exception("No more than 2**num_discretes terms are possible.")
operators = []
discretes = linear_algebra.GridSpace.rect(1, num_discretes)
for _ in range(num_ops):
this_op = linear_algebra.ProbBasisAxisSum()
for term_num in range(num_terms):
term = random.random() * linear_algebra.I(discretes[0])
temp_term_num = int(term_num)
if term_num <= 2 ** num_discretes:
for i in range(num_discretes):
if temp_term_num % 2:
term *= linear_algebra.flip_x_axis(discretes[i])
temp_term_num //= 2
else:
temp_term_num //= 2
for i in range(num_discretes):
if temp_term_num % 2:
term *= linear_algebra.flip_y_axis(discretes[i])
temp_term_num //= 2
this_op += term
operators.append(this_op)
return operators
def get_param_resolver(num_params):
"""Returns a valid param_resolver for the given number of discretes."""
params = {f"s_{n}": n for n in range(num_params)}
return linear_algebra.ParamResolver(params)
class ValidatorTestCase(unittest.TestCase):
def validate_pass(self):
self.validator.validate()
def validate_fail(self):
with self.assertRaises(ValidationError):
self.validator.validate()
class BaseValidatorTest(ValidatorTestCase):
@classmethod
def setUpClass(cls):
if cls is BaseValidatorTest:
raise unittest.SkipTest("Skip Base Tests")
super(BaseValidatorTest, cls).setUpClass()
def test_valid_context(self):
for discretes in range(
self.validator.min_num_discretes, self.validator.max_num_discretes + 1
):
with self.subTest(discretes=discretes):
self.context.acyclic_graph = get_acyclic_graph(discretes, 10, 10)
self.validate_pass()
def test_max_num_discretes(self):
num_discretes = self.validator.max_num_discretes + 1
self.context.acyclic_graph = get_acyclic_graph(num_discretes, 10, 10)
self.validate_fail()
def test_max_depth(self):
self.context.acyclic_graph = get_acyclic_graph(
10, self.validator.acyclic_graph_depth_limit + 1, 10
)
self.validate_fail()
def test_terminal_observation(self):
self.context.acyclic_graph.append(
[linear_algebra.measure(q) for q in self.context.acyclic_graph.all_discretes()]
)
self.validate_pass()
self.context.acyclic_graph.append(
[linear_algebra.flip_x_axis(q) for q in self.context.acyclic_graph.all_discretes()]
)
self.validate_fail()
def test_num_params(self):
self.context.acyclic_graph = get_acyclic_graph(
10, 4000, self.validator.param_resolver_limit + 1
)
self.context.param_resolver = get_param_resolver(
self.validator.param_resolver_limit + 1
)
self.validate_fail()
def test_non_matching_params(self):
self.context.acyclic_graph = get_acyclic_graph(10, 10, 0)
self.validate_fail()
def test_bad_building_block(self):
bad_op = linear_algebra.PhasedXZGate(
x_exponent=1, z_exponent=1, axis_phase_exponent=1
)
self.context.acyclic_graph.append(
[bad_op(q) for q in self.context.acyclic_graph.all_discretes()]
)
self.validate_fail()
def test_bad_building_block_symbol_exp(self):
bad_exp = sympy.Symbol("a") * sympy.Symbol("b")
bad_op = linear_algebra.flip_x_axis ** bad_exp
self.context.acyclic_graph.append(
[bad_op(q) for q in self.context.acyclic_graph.all_discretes()]
)
self.validate_fail()
def test_bad_building_block_num_params(self):
bad_op = linear_algebra.rotate_on_xy_plane(theta=sympy.Symbol("a"), phi=sympy.Symbol("b"))
discretes = list(self.context.acyclic_graph.all_discretes())
self.context.acyclic_graph.append([bad_op(discretes[0], discretes[1])])
self.validate_fail()
class SampleValidatorTest(BaseValidatorTest):
context_schema = schemas.SampleJobContext
def setUp(self) -> None:
acyclic_graph = get_acyclic_graph(10, 10, 10)
param_resolver = get_param_resolver(10)
self.context = self.context_schema(
acyclic_graph=acyclic_graph, param_resolver=param_resolver
)
self.validator = context_validator.SampleValidator(self.context)
def test_bad_num_reps(self):
self.context.repetitions = self.validator.repetition_limit + 1
self.validate_fail()
class ExpectationValidatorTest(BaseValidatorTest):
context_schema = schemas.ExpectationJobContext
def setUp(self) -> None:
acyclic_graph = get_acyclic_graph(21, 10, 10)
param_resolver = get_param_resolver(10)
operators = get_operators(21, 4, 10)
self.context = self.context_schema(
acyclic_graph=acyclic_graph, param_resolver=param_resolver, operators=operators
)
self.validator = context_validator.ExpectationValidator(self.context)
def test_operator_num(self):
self.context.operators = get_operators(21, 10, 1)
self.validate_fail()
def test_operator_terms(self):
self.context.operators = get_operators(21, 1, 22)
self.validate_fail()
class SampleSweepValidatorTest(SampleValidatorTest):
context_schema = schemas.SampleSweepJobContext
def setUp(self) -> None:
acyclic_graph = get_acyclic_graph(10, 10, 10)
params = [get_param_resolver(10)]
self.context = self.context_schema(acyclic_graph=acyclic_graph, params=params)
self.validator = context_validator.SampleSweepValidator(self.context)
def test_num_params(self):
self.context.acyclic_graph = get_acyclic_graph(
10, 4000, self.validator.param_resolver_limit + 1
)
self.context.params = [
get_param_resolver(self.validator.param_resolver_limit),
get_param_resolver(self.validator.param_resolver_limit + 1),
]
self.validate_fail()
def test_non_matching_params(self):
self.context.acyclic_graph = get_acyclic_graph(10, 10, 0)
self.validate_fail()
def test_num_sweepables(self):
self.context.params = [get_param_resolver(10) for _ in range(11)]
self.validate_fail()
class ExpectationSweepValidatorTest(ExpectationValidatorTest):
context_schema = schemas.ExpectationSweepJobContext
def setUp(self) -> None:
acyclic_graph = get_acyclic_graph(21, 10, 10)
params = [get_param_resolver(10)]
operators = get_operators(21, 4, 10)
self.context = self.context_schema(
acyclic_graph=acyclic_graph, params=params, operators=operators
)
self.validator = context_validator.ExpectationSweepValidator(
self.context
)
def test_num_params(self):
self.context.acyclic_graph = get_acyclic_graph(
10, 4000, self.validator.param_resolver_limit + 1
)
self.context.params = [
get_param_resolver(self.validator.param_resolver_limit),
get_param_resolver(self.validator.param_resolver_limit + 1),
]
self.validate_fail()
def test_non_matching_params(self):
self.context.acyclic_graph = get_acyclic_graph(10, 10, 0)
self.validate_fail()
def test_num_sweepables(self):
self.context.params = [get_param_resolver(10) for _ in range(11)]
self.validate_fail()
class SweepBatchValidatorTest(ValidatorTestCase):
context_schema = schemas.SampleBatchJobContext
def setUp(self) -> None:
batch_size = 10
acyclic_graphs = [get_acyclic_graph(10, 10, 10) for _ in range(batch_size)]
params = [
[get_param_resolver(10) for _ in range(batch_size)]
for _ in range(batch_size)
]
repetitions = [1] * batch_size
self.context = self.context_schema(
acyclic_graphs=acyclic_graphs, params=params, repetitions=repetitions
)
self.validator = context_validator.SampleBatchValidator(self.context)
def test_valid(self):
self.validate_pass()
def test_num_batches(self):
batch_size = 11
self.context.acyclic_graphs = [
get_acyclic_graph(21, 10, 10) for _ in range(batch_size)
]
self.context.params = [
[get_param_resolver(10) for _ in range(batch_size)]
for _ in range(batch_size)
]
self.validate_fail()
class ExpectationBatchValidatorTest(ValidatorTestCase):
context_schema = schemas.ExpectationBatchJobContext
def setUp(self) -> None:
batch_size = 10
acyclic_graphs = [get_acyclic_graph(21, 10, 10) for _ in range(batch_size)]
params = [
[get_param_resolver(10) for _ in range(batch_size)]
for _ in range(batch_size)
]
operators = [get_operators(21, 4, 4) for _ in range(batch_size)]
self.context = self.context_schema(
acyclic_graphs=acyclic_graphs, params=params, operators=operators
)
self.validator = context_validator.ExpectationBatchValidator(
self.context
)
def test_valid(self):
self.validate_pass()
def test_num_batches(self):
batch_size = 11
self.context.acyclic_graphs = [
get_acyclic_graph(21, 10, 10) for _ in range(batch_size)
]
self.context.params = [
[get_param_resolver(10) for _ in range(batch_size)]
for _ in range(batch_size)
]
self.context.operators = [
get_operators(21, 4, 4) for _ in range(batch_size)
]
self.validate_fail()
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import contextlib
import copy
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import versionutils
import six
from nova import availability_zones
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import image
from nova import manager
from nova import network
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova import profiler
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def targets_cell(fn):
"""Wrap a method and automatically target the instance's cell.
This decorates a method with signature func(self, context, instance, ...)
and automatically targets the context with the instance's cell
mapping. It does this by looking up the InstanceMapping.
"""
@functools.wraps(fn)
def wrapper(self, context, *args, **kwargs):
instance = kwargs.get('instance') or args[0]
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
LOG.error(_LE('InstanceMapping not found, unable to target cell'),
instance=instance)
im = None
else:
LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
{'cell': im.cell_mapping.identity,
'meth': fn.__name__})
# NOTE(danms): Target our context to the cell for the rest of
# this request, so that none of the subsequent code needs to
# care about it.
nova_context.set_target_cell(context, im.cell_mapping)
return fn(self, context, *args, **kwargs)
return wrapper
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
# NOTE(hanlind): Do not convert older than requested objects,
# see bug #1596119.
if isinstance(result, nova_object.NovaObject):
target_version = object_versions[objname]
requested_version = versionutils.convert_version_to_tuple(
target_version)
actual_version = versionutils.convert_version_to_tuple(
result.VERSION)
do_backport = requested_version < actual_version
other_major_version = requested_version[0] != actual_version[0]
if do_backport or other_major_version:
result = result.obj_to_primitive(
target_version=target_version,
version_manifest=object_versions)
return result
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
@contextlib.contextmanager
def try_target_cell(context, cell):
"""If cell is not None call func with context.target_cell.
This is a method to help during the transition period. Currently
various mappings may not exist if a deployment has not migrated to
cellsv2. If there is no mapping call the func as normal, otherwise
call it in a target_cell context.
"""
if cell:
with nova_context.target_cell(context, cell) as cell_context:
yield cell_context
else:
yield context
@contextlib.contextmanager
def obj_target_cell(obj, cell):
"""Run with object's context set to a specific cell"""
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
yield
@profiler.trace_cls("rpc")
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.16')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.network_api = network.API()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.notifier = rpc.get_notifier('compute', CONF.host)
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.UnsupportedPolicyException)
@targets_cell
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations, clean_shutdown, request_spec)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown, request_spec):
image = utils.get_image_from_system_metadata(
instance.system_metadata)
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload.
if not request_spec:
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone)
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec,
reservations, clean_shutdown)
# TODO(sbauza): Provide directly the RequestSpec object once
# _set_vm_state_and_notify() accepts it
legacy_spec = request_spec.to_legacy_request_spec_dict()
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, legacy_spec)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = 'live-migration'
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error(_LE('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.'),
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
_set_vm_state(context, instance, ex, vm_states.ERROR,
instance.task_state)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.scheduler_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor,
request_spec, reservations,
clean_shutdown):
return migrate.MigrationTask(context, instance, flavor,
request_spec,
reservations, clean_shutdown,
self.compute_rpcapi,
self.scheduler_client)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
try:
build_request = objects.BuildRequest.get_by_instance_uuid(context,
instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
except exception.BuildRequestNotFound:
with excutils.save_and_reraise_exception() as exc_ctxt:
service_version = objects.Service.get_minimum_version(
context, 'nova-osapi_compute')
if service_version >= 12:
# A BuildRequest was created during the boot process, the
# NotFound exception indicates a delete happened which
# should abort the boot.
pass
else:
LOG.debug('BuildRequest not found for instance %(uuid)s, '
'likely due to an older nova-api service '
'running.', {'uuid': instance.uuid})
exc_ctxt.reraise = False
return
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host['host'])
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
# NOTE(danms): This is never cell-targeted because it is only used for
# cellsv1 (which does not target cells directly) and n-cpu reschedules
# (which go to the cell conductor and thus are always cell-specific).
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
request_spec = {}
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
request_spec = scheduler_utils.build_request_spec(
context, image, instances)
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
hosts = self._schedule_instances(
context, request_spec, filter_properties)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, request_spec)
try:
# If the BuildRequest stays around then instance show/lists
# will pull from it rather than the errored instance.
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
pass
self._cleanup_allocated_networks(
context, instance, requested_networks)
return
for (instance, host) in six.moves.zip(instances, hosts):
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host['host']))
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
instance.save()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# This is populated in scheduler_utils.populate_retry
num_attempts = local_filter_props.get('retry',
{}).get('num_attempts', 1)
if num_attempts <= 1:
# If this is a reschedule the instance is already mapped to
# this cell and the BuildRequest is already deleted so ignore
# the logic below.
inst_mapping = self._populate_instance_mapping(context,
instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in
# the API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _schedule_instances(self, context, request_spec, filter_properties):
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
# TODO(sbauza): Hydrate here the object until we modify the
# scheduler.utils methods to directly use the RequestSpec object
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self.scheduler_client.select_destinations(context, spec_obj)
return hosts
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is
# old. We need to mock that the old way
filter_properties = {}
request_spec = scheduler_utils.build_request_spec(
context, image, [instance])
else:
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances(),
# populate_filter_properties and populate_retry()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.\
to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning(_LW("No valid host found for unshelve instance"),
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error(_LE("Unshelve attempted but an error "
"has occurred"), instance=instance)
else:
LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
@targets_cell
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
if not host:
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
else:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = request_spec.ignore_hosts or []
request_spec.ignore_hosts.append(instance.host)
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when _schedule_instances() and _set_vm_state_and_notify()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.to_legacy_request_spec_dict()
try:
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_dict = hosts.pop(0)
host, node, limits = (host_dict['host'],
host_dict['nodename'],
host_dict['limits'])
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("No valid host found for rebuild"),
instance=instance)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning(_LW("Server with unsupported policy "
"cannot be rebuilt"),
instance=instance)
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits)
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
return instance_block_device_mapping
def _bury_in_cell0(self, context, request_spec, exc,
build_requests=None, instances=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
schedule them somewhere real. Requests that don't yet have instances
will get a new instance, created in cell0. Instances that have not yet
been created will be created in cell0. All build requests are destroyed
after we're done. Failure to delete a build request will trigger the
instance deletion, just like the happy path in
schedule_and_build_instances() below.
"""
try:
cell0 = objects.CellMapping.get_by_uuid(
context, objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
LOG.error(_LE('No cell mapping found for cell0 while '
'trying to record scheduling failure. '
'Setup is incomplete.'))
return
build_requests = build_requests or []
instances = instances or []
instances_by_uuid = {inst.uuid: inst for inst in instances}
for build_request in build_requests:
if build_request.instance_uuid not in instances_by_uuid:
# This is an instance object with no matching db entry.
instance = build_request.get_new_instance(context)
instances_by_uuid[instance.uuid] = instance
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
legacy_spec = request_spec.to_legacy_request_spec_dict()
for instance in instances_by_uuid.values():
with obj_target_cell(instance, cell0):
instance.create()
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates,
exc, legacy_spec)
try:
inst_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell0
inst_mapping.save()
except exception.InstanceMappingNotFound:
pass
for build_request in build_requests:
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# Instance was deleted before we finished scheduling
inst = instances_by_uuid[build_request.instance_uuid]
with obj_target_cell(inst, cell0):
inst.destroy()
def schedule_and_build_instances(self, context, build_requests,
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping):
legacy_spec = request_specs[0].to_legacy_request_spec_dict()
try:
hosts = self._schedule_instances(context, legacy_spec,
request_specs[0].to_legacy_filter_properties_dict())
except Exception as exc:
LOG.exception(_LE('Failed to schedule instances'))
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests)
return
host_mapping_cache = {}
for (build_request, request_spec, host) in six.moves.zip(
build_requests, request_specs, hosts):
filter_props = request_spec.to_legacy_filter_properties_dict()
instance = build_request.get_new_instance(context)
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# Convert host from the scheduler into a cell record
if host['host'] not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host['host'])
host_mapping_cache[host['host']] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error(_LE('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.'),
{'host': host['host']})
self._bury_in_cell0(context, request_spec, exc,
build_requests=[build_request],
instances=[instance])
continue
else:
host_mapping = host_mapping_cache[host['host']]
cell = host_mapping.cell_mapping
# Before we create the instance, let's make one final check that
# the build request is still around and wasn't deleted by the user
# already.
try:
objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
except exception.BuildRequestNotFound:
# the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance)
continue
else:
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host['host']))
with obj_target_cell(instance, cell):
instance.create()
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="conductor")
with obj_target_cell(instance, cell):
objects.InstanceAction.action_start(
context, instance.uuid, instance_actions.CREATE,
want_result=False)
instance_bdms = self._create_block_device_mapping(
instance.flavor, instance.uuid, block_device_mapping)
# Update mapping for instance. Normally this check is guarded by
# a try/except but if we're here we know that a newer nova-api
# handled the build process and would have created the mapping
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
if not self._delete_build_request(
context, build_request, instance, cell, instance_bdms):
# The build request was deleted before/during scheduling so
# the instance is gone and we don't have anything to build for
# this one.
continue
# NOTE(danms): Compute RPC expects security group names or ids
# not objects, so convert this to a list of names until we can
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell):
self.compute_rpcapi.build_and_run_instance(
context, instance=instance, image=image,
request_spec=request_spec,
filter_properties=filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host['host'], node=host['nodename'],
limits=host['limits'])
def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms):
"""Delete a build request after creating the instance in the cell.
This method handles cleaning up the instance in case the build request
is already deleted by the time we try to delete it.
:param context: the context of the request being handled
:type context: nova.context.RequestContext'
:param build_request: the build request to delete
:type build_request: nova.objects.BuildRequest
:param instance: the instance created from the build_request
:type instance: nova.objects.Instance
:param cell: the cell in which the instance was created
:type cell: nova.objects.CellMapping
:param instance_bdms: list of block device mappings for the instance
:type instance_bdms: nova.objects.BlockDeviceMappingList
:returns: True if the build request was successfully deleted, False if
the build request was already deleted and the instance is now gone.
"""
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# This indicates an instance deletion request has been
# processed, and the build should halt here. Clean up the
# bdm and instance record.
with obj_target_cell(instance, cell):
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance):
try:
instance.destroy()
except exception.InstanceNotFound:
pass
except exception.ObjectActionError:
# NOTE(melwitt): Instance became scheduled during
# the destroy, "host changed". Refresh and re-destroy.
try:
instance.refresh()
instance.destroy()
except exception.InstanceNotFound:
pass
for bdm in instance_bdms:
with obj_target_cell(bdm, cell):
try:
bdm.destroy()
except exception.ObjectActionError:
pass
return False
return True
|
|
"""Support for Copy Number Variations (CNVs) with GATK4
https://software.broadinstitute.org/gatk/documentation/article?id=11682
https://gatkforums.broadinstitute.org/dsde/discussion/11683/
"""
import glob
import os
import shutil
import numpy as np
import toolz as tz
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.variation import bedutils, vcfutils
def run(items, background=None):
"""Detect copy number variations from batched set of samples using GATK4 CNV calling.
TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller
"""
if not background: background = []
paired = vcfutils.get_paired(items + background)
if paired:
out = _run_paired(paired)
else:
out = items
logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" %
", ".join([dd.get_sample_name(d) for d in items + background]))
return out
def _run_paired(paired):
"""Run somatic variant calling pipeline.
"""
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out
def call_copy_numbers(seg_file, work_dir, data):
"""Call copy numbers from a normalized and segmented input file.
"""
out_file = os.path.join(work_dir, "%s-call.seg" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CallCopyRatioSegments",
"-I", seg_file, "-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def plot_model_segments(seg_files, work_dir, data):
"""Diagnostic plots of segmentation and inputs.
"""
from bcbio.heterogeneity import chromhacks
out_file = os.path.join(work_dir, "%s.modeled.png" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
dict_file = utils.splitext_plus(dd.get_ref_file(data))[0] + ".dict"
plot_dict = os.path.join(os.path.dirname(tx_out_file), os.path.basename(dict_file))
with open(dict_file) as in_handle:
with open(plot_dict, "w") as out_handle:
for line in in_handle:
if line.startswith("@SQ"):
cur_chrom = [x.split(":", 1)[1].strip()
for x in line.split("\t") if x.startswith("SN:")][0]
if chromhacks.is_autosomal_or_sex(cur_chrom):
out_handle.write(line)
else:
out_handle.write(line)
params = ["-T", "PlotModeledSegments",
"--denoised-copy-ratios", tz.get_in(["depth", "bins", "normalized"], data),
"--segments", seg_files["final_seg"],
"--allelic-counts", seg_files["tumor_hets"],
"--sequence-dictionary", plot_dict,
"--minimum-contig-length", "10",
"--output-prefix", dd.get_sample_name(data),
"-O", os.path.dirname(tx_out_file)]
_run_with_memory_scaling(params, tx_out_file, data)
return {"seg": out_file}
def model_segments(copy_file, work_dir, paired):
"""Perform segmentation on input copy number log2 ratio file.
"""
out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data))
tumor_counts, normal_counts = heterogzygote_counts(paired)
if not utils.file_exists(out_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
params = ["-T", "ModelSegments",
"--denoised-copy-ratios", copy_file,
"--allelic-counts", tumor_counts,
"--output-prefix", dd.get_sample_name(paired.tumor_data),
"-O", os.path.dirname(tx_out_file)]
if normal_counts:
params += ["--normal-allelic-counts", normal_counts]
_run_with_memory_scaling(params, tx_out_file, paired.tumor_data)
for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),
"%s*" % dd.get_sample_name(paired.tumor_data))):
shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))
return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"),
"final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")}
def denoise(data, pon, work_dir):
"""Normalize read counts using panel of normal background or GC/mappability
"""
std_file = os.path.join(work_dir, "%s-crstandardized.tsv" % dd.get_sample_name(data))
denoise_file = os.path.join(work_dir, "%s-crdenoised.tsv" % dd.get_sample_name(data))
if not utils.file_exists(std_file):
with file_transaction(data, std_file, denoise_file) as (tx_std_file, tx_denoise_file):
params = ["-T", "DenoiseReadCounts",
"-I", tz.get_in(["depth", "bins", "target"], data),
"--standardized-copy-ratios", tx_std_file,
"--denoised-copy-ratios", tx_denoise_file]
if pon:
params += ["--count-panel-of-normals", pon]
else:
params += ["--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], data)]
_run_with_memory_scaling(params, tx_std_file, data)
return denoise_file if pon else std_file
def create_panel_of_normals(items, group_id, work_dir):
"""Create a panel of normals from one or more background read counts.
"""
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file
def pon_to_bed(pon_file, out_dir, data):
"""Extract BED intervals from a GATK4 hdf5 panel of normal file.
"""
out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0]))
if not utils.file_uptodate(out_file, pon_file):
import h5py
with file_transaction(data, out_file) as tx_out_file:
with h5py.File(pon_file, "r") as f:
with open(tx_out_file, "w") as out_handle:
intervals = f["original_data"]["intervals"]
for i in range(len(intervals["transposed_index_start_end"][0])):
chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]]
if isinstance(chrom, bytes):
chrom = chrom.decode("utf-8")
start = int(intervals["transposed_index_start_end"][1][i]) - 1
end = int(intervals["transposed_index_start_end"][2][i])
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
return out_file
def prepare_intervals(data, region_file, work_dir):
"""Prepare interval regions for targeted and gene based regions.
"""
target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data))
if not utils.file_uptodate(target_file, region_file):
with file_transaction(data, target_file) as tx_out_file:
params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data),
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file]
if dd.get_coverage_interval(data) == "genome":
params += ["--bin-length", "1000", "--padding", "0"]
else:
params += ["-L", region_file, "--bin-length", "0", "--padding", "250"]
_run_with_memory_scaling(params, tx_out_file, data)
return target_file
def annotate_intervals(target_file, data):
"""Provide GC annotated intervals for error correction during panels and denoising.
TODO: include mappability and segmentation duplication inputs
"""
out_file = "%s-gcannotated.tsv" % utils.splitext_plus(target_file)[0]
if not utils.file_uptodate(out_file, target_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "AnnotateIntervals", "-R", dd.get_ref_file(data),
"-L", target_file,
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def collect_read_counts(data, work_dir):
"""Count reads in defined bins using CollectReadCounts.
"""
out_file = os.path.join(work_dir, "%s-target-coverage.hdf5" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CollectReadCounts", "-I", dd.get_align_bam(data),
"-L", tz.get_in(["regions", "bins", "target"], data),
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file, "--format", "HDF5"]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def heterogzygote_counts(paired):
"""Provide tumor/normal counts at population heterozyogte sites with CollectAllelicCounts.
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(paired.tumor_data), "structural", "counts"))
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
vr = bedutils.population_variant_regions([x for x in [paired.tumor_data, paired.normal_data] if x])
cur_het_bed = bedutils.intersect_two(het_bed, vr, work_dir, paired.tumor_data)
tumor_counts = _run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.tumor_data)
normal_counts = (_run_collect_allelic_counts(cur_het_bed, key, work_dir, paired.normal_data)
if paired.normal_data else None)
if normal_counts:
tumor_counts, normal_counts = _filter_by_normal(tumor_counts, normal_counts, paired.tumor_data)
return tumor_counts, normal_counts
def _filter_by_normal(tumor_counts, normal_counts, data):
"""Filter count files based on normal frequency and median depth, avoiding high depth regions.
For frequency, restricts normal positions to those between 0.4 and 0.65
For depth, matches approach used in AMBER to try and avoid problematic genomic regions
with high count in the normal:
https://github.com/hartwigmedical/hmftools/tree/master/amber#usage
"""
from bcbio.heterogeneity import bubbletree
fparams = bubbletree.NORMAL_FILTER_PARAMS
tumor_out = "%s-normfilter%s" % utils.splitext_plus(tumor_counts)
normal_out = "%s-normfilter%s" % utils.splitext_plus(normal_counts)
if not utils.file_uptodate(tumor_out, tumor_counts):
with file_transaction(data, tumor_out, normal_out) as (tx_tumor_out, tx_normal_out):
median_depth = _get_normal_median_depth(normal_counts)
min_normal_depth = median_depth * fparams["min_depth_percent"]
max_normal_depth = median_depth * fparams["max_depth_percent"]
with open(tumor_counts) as tumor_handle:
with open(normal_counts) as normal_handle:
with open(tx_tumor_out, "w") as tumor_out_handle:
with open(tx_normal_out, "w") as normal_out_handle:
header = None
for t, n in zip(tumor_handle, normal_handle):
if header is None:
if not n.startswith("@"):
header = n.strip().split()
tumor_out_handle.write(t)
normal_out_handle.write(n)
elif (_normal_passes_depth(header, n, min_normal_depth, max_normal_depth) and
_normal_passes_freq(header, n, fparams)):
tumor_out_handle.write(t)
normal_out_handle.write(n)
return tumor_out, normal_out
def _normal_passes_freq(header, line, fparams):
vals = dict(zip(header, line.strip().split()))
cur_depth = float(vals["REF_COUNT"]) + int(vals["ALT_COUNT"])
if cur_depth > 0:
cur_freq = float(vals["ALT_COUNT"]) / cur_depth
else:
cur_freq = 0.0
return cur_freq >= fparams["min_freq_narrow"] and cur_freq <= fparams["max_freq_narrow"]
def _normal_passes_depth(header, line, min_normal_depth, max_normal_depth):
vals = dict(zip(header, line.strip().split()))
cur_depth = int(vals["REF_COUNT"]) + int(vals["ALT_COUNT"])
return cur_depth >= min_normal_depth and cur_depth <= max_normal_depth
def _get_normal_median_depth(normal_counts):
depths = []
with open(normal_counts) as in_handle:
header = None
for line in in_handle:
if header is None and not line.startswith("@"):
header = line.strip().split()
elif header:
n_vals = dict(zip(header, line.strip().split()))
depths.append(int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]))
return np.median(depths)
def _run_collect_allelic_counts(pos_file, pos_name, work_dir, data):
"""Counts by alleles for a specific sample and set of positions.
"""
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "counts"))
out_file = os.path.join(out_dir, "%s-%s-counts.tsv" % (dd.get_sample_name(data), pos_name))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "CollectAllelicCounts", "-L", pos_file, "-I", dd.get_align_bam(data),
"-R", dd.get_ref_file(data), "-O", tx_out_file]
_run_with_memory_scaling(params, tx_out_file, data)
return out_file
def _run_with_memory_scaling(params, tx_out_file, data, ld_preload=False):
num_cores = dd.get_num_cores(data)
memscale = {"magnitude": 0.9 * num_cores, "direction": "increase"} if num_cores > 1 else None
# Ignore tools_off: [gatk4], since it doesn't apply to GATK CNV calling
config = utils.deepish_copy(data["config"])
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, ld_preload=ld_preload)
# ## VCF output
def _get_seg_header(in_handle):
for line in in_handle:
if not line.startswith("@"):
break
return line.strip().split("\t"), in_handle
def _seg_to_vcf(vals):
"""Convert GATK CNV calls seg output to a VCF line.
"""
call_to_cn = {"+": 3, "-": 1}
call_to_type = {"+": "DUP", "-": "DEL"}
if vals["CALL"] not in ["0"]:
info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"],
"PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"],
"SVTYPE=%s" % call_to_type[vals["CALL"]],
"SVLEN=%s" % (int(vals["END"]) - int(vals["START"])),
"END=%s" % vals["END"],
"CN=%s" % call_to_cn[vals["CALL"]]]
return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".",
".", ";".join(info), "GT", "0/1"]
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "gatk-cnv"))
|
|
"""Test the onboarding views."""
import asyncio
import os
from unittest.mock import patch
import pytest
from homeassistant.components import onboarding
from homeassistant.components.onboarding import const, views
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.helpers import area_registry as ar
from homeassistant.setup import async_setup_component
from . import mock_storage
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI, register_auth_provider
from tests.components.met.conftest import mock_weather # noqa: F401
@pytest.fixture(autouse=True)
def always_mock_weather(mock_weather): # noqa: F811
"""Mock the Met weather provider."""
pass
@pytest.fixture(autouse=True)
def auth_active(hass):
"""Ensure auth is always active."""
hass.loop.run_until_complete(
register_auth_provider(hass, {"type": "homeassistant"})
)
@pytest.fixture(name="rpi")
async def rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "raspberrypi3"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="no_rpi")
async def no_rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "odroid-n2"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="mock_supervisor")
async def mock_supervisor_fixture(hass, aioclient_mock):
"""Mock supervisor."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=True,
), patch(
"homeassistant.components.hassio.HassIO.get_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_host_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_store",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value={"diagnostics": True},
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_ingress_panels",
return_value={"panels": {}},
), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
yield
async def test_onboarding_progress(hass, hass_storage, hass_client_no_auth):
"""Test fetching progress."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
with patch.object(views, "STEPS", ["hello", "world"]):
resp = await client.get("/api/onboarding")
assert resp.status == 200
data = await resp.json()
assert len(data) == 2
assert data[0] == {"step": "hello", "done": True}
assert data[1] == {"step": "world", "done": False}
async def test_onboarding_user_already_done(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user when user step already done."""
mock_storage(hass_storage, {"done": [views.STEP_USER]})
with patch.object(onboarding, "STEPS", ["hello", "world"]):
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == HTTP_FORBIDDEN
async def test_onboarding_user(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user."""
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 200
assert const.STEP_USER in hass_storage[const.DOMAIN]["data"]["done"]
data = await resp.json()
assert "auth_code" in data
users = await hass.auth.async_get_users()
assert len(users) == 1
user = users[0]
assert user.name == "Test Name"
assert len(user.credentials) == 1
assert user.credentials[0].data["username"] == "test-user"
assert len(hass.data["person"][1].async_items()) == 1
# Validate refresh token 1
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Validate created areas
area_registry = ar.async_get(hass)
assert len(area_registry.areas) == 3
assert sorted(area.name for area in area_registry.async_list_areas()) == [
"Bedroom",
"Kitchen",
"Living Room",
]
async def test_onboarding_user_invalid_name(hass, hass_storage, hass_client_no_auth):
"""Test not providing name."""
mock_storage(hass_storage, {"done": []})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 400
async def test_onboarding_user_race(hass, hass_storage, hass_client_no_auth):
"""Test race condition on creating new user."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp1 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 1",
"username": "1-user",
"password": "1-pass",
"language": "en",
},
)
resp2 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 2",
"username": "2-user",
"password": "2-pass",
"language": "es",
},
)
res1, res2 = await asyncio.gather(resp1, resp2)
assert sorted([res1.status, res2.status]) == [200, HTTP_FORBIDDEN]
async def test_onboarding_integration(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 200
data = await resp.json()
assert "auth_code" in data
# Validate refresh token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Onboarding refresh token and new refresh token
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 2, user
async def test_onboarding_integration_missing_credential(
hass, hass_storage, hass_client, hass_access_token
):
"""Test that we fail integration step if user is missing credentials."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
refresh_token.credential = None
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 403
async def test_onboarding_integration_invalid_redirect_uri(
hass, hass_storage, hass_client
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": "http://invalid-redirect.uri"},
)
assert resp.status == 400
# We will still mark the last step as done because there is nothing left.
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
# Only refresh token from onboarding should be there
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 1, user
async def test_onboarding_integration_requires_auth(
hass, hass_storage, hass_client_no_auth
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/integration", json={"client_id": CLIENT_ID}
)
assert resp.status == 401
async def test_onboarding_core_sets_up_met(hass, hass_storage, hass_client):
"""Test finishing the core step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 1
async def test_onboarding_core_sets_up_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, rpi
):
"""Test that the core step sets up rpi_power on RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert rpi_power_state
async def test_onboarding_core_no_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, no_rpi
):
"""Test that the core step do not set up rpi_power on non RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert not rpi_power_state
async def test_onboarding_analytics(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing analytics step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 200
assert const.STEP_ANALYTICS in hass_storage[const.DOMAIN]["data"]["done"]
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 403
|
|
from collections import Counter
import numpy as np
import random
from ..parser import tokenize
import codecs
import os
from itertools import chain
from scipy.spatial import distance
# Data types - Filled in by sub modules
dtypes = {
}
class Dataset(object):
def __init__(self, vocabulary_size=50000, store_data=True):
self.data = []
self.labels = []
self.counter = Counter()
self.dictionary = dict()
self.reverse_dictionary = dict()
self.vocabulary_size = vocabulary_size
self.size = 0
self.store_data = store_data
@classmethod
def load(cls, path, dtype="auto"):
if dtype == "auto":
for dt, dcls in dtypes.items():
if dcls.identify(path):
dtype = dt
break
return dtypes[dtype](path)
@classmethod
def identify(cls, path, dtype="auto"):
if dtype == "auto":
for dt, dcls in dtypes.items():
if dcls.identify(path):
dtype = dt
break
return dtypes[dtype]
def train(self):
return self._train
def test(self):
return self._test
def valid(self):
return self._valid
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(chain(*iter(self.data)))
def weight(self, aggregator=sum, metric='cosine'):
distfn = lambda x, y: distance.cdist([x], [y], metric=metric)[0]
for s in self:
if "weights" in s:
return
vec = s["vector"]
vsum = sum(vec)
vlen = len(vec)
s["weights"] = [distfn(vsum / vlen, (vsum - v) / (vlen - 1)) for v in vec]
def add_sentence(self, sid, sent):
if type(sent) == list:
pos = self.add_tokens(sent)
else:
pos = self.add_text(sent)
self.sids[sent] = pos
def add_text(self, text):
return self.add_tokens(tokenize(text))
def add_tokens(self, words):
self.size += len(words)
if self.store_data:
self.data.append(words)
off = len(self.data) - 1
else:
#TODO(mattea): maybe make this more useful? If I find a use for it
off = self.size
return off
def add_label(self, label):
return self.labels.append(label)
def build_dictionary(self):
count = [['UNK', -1]]
count.extend(self.counter.most_common(self.vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
count[0][1] = self.size - sum([x[1] for x in count])
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
self.count = count
self.dictionary = dictionary
self.reverse_dictionary = reverse_dictionary
return count, dictionary, reverse_dictionary
def index_data(self):
dictionary = self.dictionary
for data in self.data:
for wind in range(len(data)):
word = data[wind]
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
data[wind] = index
self.data = np.array(self.data)
self.labels = np.array(self.labels)
def split(self, p=.8, k=None):
if k is None:
if type(p) != list:
p = [p, 1 - p]
else:
p = [1 / k] * k
dlen = len(self.data)
shuf = self.data[:]
shuf = random.shuffle(shuf)
spl = []
off = 0
for x in p:
spl.append(shuf[int(dlen * off):int(dlen * x)])
self._split = spl
return spl
#def text(self):
# for pair in self.data:
# for word in pair:
# yield word
def wv_text(self):
for itm in self.data:
for word in itm.wv_text():
yield word
def wv_sentences(self):
for itm in self.data:
for sent in itm.wv_sentences():
yield sent
def sentences(self):
for itm in self.data:
for sent in itm.sentences():
yield sent
def vocab(self):
try:
return self._vocab
except AttributeError:
pass
res = Counter()
for sent in self.sentences():
res.update(sent)
self._vocab = res
return res
def wv_vocab(self):
try:
return self._wv_vocab
except AttributeError:
pass
res = Counter()
res.update(self.wv_text())
self._wv_vocab = res
res.total = sum(res.values())
return res
def normalize(self, matcher, df):
for itm in self.data:
itm.normalize(matcher, df)
def vectorize(self, wordvec):
for itm in self.data:
itm.vectorize(wordvec)
def maxShortSentence(self):
l = 0
for pair in self.data:
cl = min(len(pair.s1["wv_tokens"]), len(pair.s2["wv_tokens"]))
if cl > l:
l = cl
return l
@classmethod
def writer(self, *args, **kwargs):
return self._writer(*args, **kwargs)
class MatchWriter(object):
def __init__(self, sf):
self.sh = codecs.open(sf or os.devnull, 'w', 'utf-8')
self.writeheader()
def __enter__(self):
if self.sh:
self.sh.__enter__()
return self
def __exit__(self, ctype, value, traceback):
if self.sh:
self.sh.__exit__(ctype, value, traceback)
def writeheader(self):
print >>self.sh, "\t".join(("QueryID", "UpdateID", "NuggetID", "Start",
"End", "AutoP", "Score", "Label", "Update_Text",
"Nugget_Text"))
def write(self, pair, match):
qid = pair.s1["query_id"]
nugget = pair.s1
update = pair.s2
print >>self.sh, "\t".join((qid, update["id"], nugget["id"],
str(match.start), str(match.end),
str(match.autop), "%g" % match.score,
"%g" % pair.label,
update["text"], nugget["text"]))
class SentencePair(object):
def __init__(self, s1, s2, sid1=None, sid2=None, pid=None, label=None):
self.s1 = {}
self.s2 = {}
if type(s1) == list:
s1toks = s1
s1 = " ".join(s1toks)
elif type(s1) == dict:
self.s1 = s1
else:
s1toks = tokenize(s1)
if type(s2) == list:
s2toks = s2
s2 = " ".join(s2toks)
elif type(s2) == dict:
self.s2 = s2
else:
s2toks = tokenize(s2)
if not self.s1:
self.s1 = {"tokens": s1toks, "text": s1}
self.s2 = {"tokens": s2toks, "text": s2}
if sid1 is None and pid is not None:
sid1 = pid + "_1"
if sid2 is None and pid is not None:
sid2 = pid + "_2"
else:
sid1 = s1["id"]
sid2 = s2["id"]
pid = sid1 + "_" + sid2
s1toks = s1["tokens"]
s2toks = s2["tokens"]
self.sid1 = sid1
self.sid2 = sid2
self.pid = pid
self.label = label
self.__len = len(s1toks) + len(s2toks)
def __str__(self):
return " ".join(self.s1["tokens"]) + " <s1e> " + " ".join(self.s2["tokens"])
def __iter__(self):
for itm in (self.s1, self.s2):
yield itm
def wv_text(self):
for word in chain(self.s1["wv_tokens"], self.s2["wv_tokens"]):
yield word
def wv_sentences(self):
return [self.s1["wv_tokens"], self.s2["wv_tokens"]]
def sentences(self):
return [self.s1["tokens"], self.s2["tokens"]]
def __getitem__(self, index):
s1l = len(self.s1["tokens"])
if s1l > index:
return self.s1["tokens"][index]
else:
return self.s2["tokens"][index - s1l]
def __setitem__(self, index, val):
s1l = len(self.s1["tokens"])
if s1l > index:
self.s1["tokens"][index] = val
else:
self.s2["tokens"][index - s1l] = val
def __len__(self):
return self.__len
def normalize(self, matcher, df):
self.s1["vector"], self.s1["vector_sum"] = matcher.normalize(self.s1["vector"], df)
self.s2["vector"], self.s2["vector_sum"] = matcher.normalize(self.s2["vector"], df)
def vectorize(self, wordvec):
self.s1["vector"], self.s1["wv_tokens"] = wordvec.get_sentvec(self.s1["tokens"])
self.s2["vector"], self.s2["wv_tokens"] = wordvec.get_sentvec(self.s2["tokens"])
|
|
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from dask.async import get_sync
from dask.dataframe.core import _Frame
from dask.dataframe.methods import concat
from dask.dataframe.multi import (align_partitions, merge_indexed_dataframes,
hash_join, concat_indexed_dataframes,
_maybe_align_partitions)
from dask.dataframe.utils import (assert_eq, assert_divisions, make_meta,
has_known_categories, clear_known_categories)
import pytest
def test_align_partitions():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.repartition(A, [10, 40, 60])
B = pd.DataFrame({'x': [1, 2, 3, 4], 'y': list('abda')},
index=[30, 70, 80, 100])
b = dd.repartition(B, [30, 80, 100])
s = dd.core.Scalar({('s', 0): 10}, 's', 'i8')
(aa, bb), divisions, L = align_partitions(a, b)
def _check(a, b, aa, bb):
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert isinstance(aa, dd.DataFrame)
assert isinstance(bb, dd.DataFrame)
assert_eq(a, aa)
assert_eq(b, bb)
assert divisions == (10, 30, 40, 60, 80, 100)
assert isinstance(L, list)
assert len(divisions) == 1 + len(L)
_check(a, b, aa, bb)
assert L == [[(aa._name, 0), (bb._name, 0)],
[(aa._name, 1), (bb._name, 1)],
[(aa._name, 2), (bb._name, 2)],
[(aa._name, 3), (bb._name, 3)],
[(aa._name, 4), (bb._name, 4)]]
(aa, ss, bb), divisions, L = align_partitions(a, s, b)
_check(a, b, aa, bb)
assert L == [[(aa._name, 0), None, (bb._name, 0)],
[(aa._name, 1), None, (bb._name, 1)],
[(aa._name, 2), None, (bb._name, 2)],
[(aa._name, 3), None, (bb._name, 3)],
[(aa._name, 4), None, (bb._name, 4)]]
assert_eq(ss, 10)
ldf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
rdf = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]})
for lhs, rhs in [(dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),
(dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2))]:
(lresult, rresult), div, parts = align_partitions(lhs, rhs)
assert_eq(lresult, ldf)
assert_eq(rresult, rdf)
# different index
ldf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
rdf = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('fghijkl'))
for lhs, rhs in [(dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),
(dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2))]:
(lresult, rresult), div, parts = align_partitions(lhs, rhs)
assert_eq(lresult, ldf)
assert_eq(rresult, rdf)
def test_align_partitions_unknown_divisions():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
# One known, one unknown
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf2.known_divisions
with pytest.raises(ValueError):
align_partitions(ddf, ddf2)
# Both unknown
ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)
ddf2 = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert not ddf2.known_divisions
with pytest.raises(ValueError):
align_partitions(ddf, ddf2)
def test__maybe_align_partitions():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
# Both known, same divisions
ddf = dd.from_pandas(df + 1, npartitions=2)
ddf2 = dd.from_pandas(df, npartitions=2)
a, b = _maybe_align_partitions([ddf, ddf2])
assert a is ddf
assert b is ddf2
# Both unknown, same divisions
ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)
ddf2 = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert not ddf2.known_divisions
a, b = _maybe_align_partitions([ddf, ddf2])
assert a is ddf
assert b is ddf2
# Both known, different divisions
ddf = dd.from_pandas(df + 1, npartitions=2)
ddf2 = dd.from_pandas(df, npartitions=3)
a, b = _maybe_align_partitions([ddf, ddf2])
assert a.divisions == b.divisions
# Both unknown, different divisions
ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)
ddf2 = dd.from_pandas(df, npartitions=3, sort=False)
assert not ddf.known_divisions
assert not ddf2.known_divisions
with pytest.raises(ValueError):
_maybe_align_partitions([ddf, ddf2])
# One known, one unknown
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf2.known_divisions
with pytest.raises(ValueError):
_maybe_align_partitions([ddf, ddf2])
def test_merge_indexed_dataframe_to_indexed_dataframe():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6]},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'y': list('abcdef')},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
c = merge_indexed_dataframes(a, b, how='left')
assert c.divisions[0] == a.divisions[0]
assert c.divisions[-1] == max(a.divisions + b.divisions)
assert_eq(c, A.join(B))
c = merge_indexed_dataframes(a, b, how='right')
assert c.divisions[0] == b.divisions[0]
assert c.divisions[-1] == b.divisions[-1]
assert_eq(c, A.join(B, how='right'))
c = merge_indexed_dataframes(a, b, how='inner')
assert c.divisions[0] == 1
assert c.divisions[-1] == max(a.divisions + b.divisions)
assert_eq(c.compute(), A.join(B, how='inner'))
c = merge_indexed_dataframes(a, b, how='outer')
assert c.divisions[0] == 1
assert c.divisions[-1] == 8
assert_eq(c.compute(), A.join(B, how='outer'))
assert (sorted(merge_indexed_dataframes(a, b, how='inner').dask) ==
sorted(merge_indexed_dataframes(a, b, how='inner').dask))
assert (sorted(merge_indexed_dataframes(a, b, how='inner').dask) !=
sorted(merge_indexed_dataframes(a, b, how='outer').dask))
def list_eq(aa, bb):
if isinstance(aa, dd.DataFrame):
a = aa.compute(get=get_sync)
else:
a = aa
if isinstance(bb, dd.DataFrame):
b = bb.compute(get=get_sync)
else:
b = bb
tm.assert_index_equal(a.columns, b.columns)
if isinstance(a, pd.DataFrame):
av = a.sort_values(list(a.columns)).values
bv = b.sort_values(list(b.columns)).values
else:
av = a.sort_values().values
bv = b.sort_values().values
tm.assert_numpy_array_equal(av, bv)
@pytest.mark.parametrize('how', ['inner', 'left', 'right', 'outer'])
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_hash_join(how, shuffle):
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
c = hash_join(a, 'y', b, 'y', how)
result = c.compute()
expected = pd.merge(A, B, how, 'y')
list_eq(result, expected)
# Different columns and npartitions
c = hash_join(a, 'x', b, 'z', 'outer', npartitions=3, shuffle=shuffle)
assert c.npartitions == 3
result = c.compute()
expected = pd.merge(A, B, 'outer', None, 'x', 'z')
list_eq(result, expected)
assert (hash_join(a, 'y', b, 'y', 'inner', shuffle=shuffle)._name ==
hash_join(a, 'y', b, 'y', 'inner', shuffle=shuffle)._name)
assert (hash_join(a, 'y', b, 'y', 'inner', shuffle=shuffle)._name !=
hash_join(a, 'y', b, 'y', 'outer', shuffle=shuffle)._name)
@pytest.mark.parametrize('join', ['inner', 'outer'])
def test_indexed_concat(join):
A = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7], 'y': list('abcdef')},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'x': [10, 20, 40, 50, 60, 80]},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
result = concat_indexed_dataframes([a, b], join=join)
expected = pd.concat([A, B], axis=0, join=join)
assert_eq(result, expected)
assert (sorted(concat_indexed_dataframes([a, b], join=join).dask) ==
sorted(concat_indexed_dataframes([a, b], join=join).dask))
assert (sorted(concat_indexed_dataframes([a, b], join='inner').dask) !=
sorted(concat_indexed_dataframes([a, b], join='outer').dask))
@pytest.mark.parametrize('join', ['inner', 'outer'])
def test_concat(join):
pdf1 = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7],
'y': list('abcdef')},
index=[1, 2, 3, 4, 6, 7])
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7],
'y': list('abcdef')},
index=[8, 9, 10, 11, 12, 13])
ddf2 = dd.from_pandas(pdf2, 2)
# different columns
pdf3 = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7],
'z': list('abcdef')},
index=[8, 9, 10, 11, 12, 13])
ddf3 = dd.from_pandas(pdf3, 2)
for (dd1, dd2, pd1, pd2) in [(ddf1, ddf2, pdf1, pdf2),
(ddf1, ddf3, pdf1, pdf3)]:
result = dd.concat([dd1, dd2], join=join)
expected = pd.concat([pd1, pd2], join=join)
assert_eq(result, expected)
# test outer only, inner has a problem on pandas side
for (dd1, dd2, pd1, pd2) in [(ddf1, ddf2, pdf1, pdf2),
(ddf1, ddf3, pdf1, pdf3),
(ddf1.x, ddf2.x, pdf1.x, pdf2.x),
(ddf1.x, ddf3.z, pdf1.x, pdf3.z),
(ddf1.x, ddf2.x, pdf1.x, pdf2.x),
(ddf1.x, ddf3.z, pdf1.x, pdf3.z)]:
result = dd.concat([dd1, dd2])
expected = pd.concat([pd1, pd2])
assert_eq(result, expected)
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_merge(how, shuffle):
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
assert_eq(dd.merge(a, b, left_index=True, right_index=True,
how=how, shuffle=shuffle),
pd.merge(A, B, left_index=True, right_index=True, how=how))
result = dd.merge(a, b, on='y', how=how)
list_eq(result, pd.merge(A, B, on='y', how=how))
assert all(d is None for d in result.divisions)
list_eq(dd.merge(a, b, left_on='x', right_on='z', how=how, shuffle=shuffle),
pd.merge(A, B, left_on='x', right_on='z', how=how))
list_eq(dd.merge(a, b, left_on='x', right_on='z', how=how,
suffixes=('1', '2'), shuffle=shuffle),
pd.merge(A, B, left_on='x', right_on='z', how=how,
suffixes=('1', '2')))
list_eq(dd.merge(a, b, how=how, shuffle=shuffle), pd.merge(A, B, how=how))
list_eq(dd.merge(a, B, how=how, shuffle=shuffle), pd.merge(A, B, how=how))
list_eq(dd.merge(A, b, how=how, shuffle=shuffle), pd.merge(A, B, how=how))
list_eq(dd.merge(A, B, how=how, shuffle=shuffle), pd.merge(A, B, how=how))
list_eq(dd.merge(a, b, left_index=True, right_index=True, how=how,
shuffle=shuffle),
pd.merge(A, B, left_index=True, right_index=True, how=how))
list_eq(dd.merge(a, b, left_index=True, right_index=True, how=how,
suffixes=('1', '2'), shuffle=shuffle),
pd.merge(A, B, left_index=True, right_index=True, how=how,
suffixes=('1', '2')))
list_eq(dd.merge(a, b, left_on='x', right_index=True, how=how,
shuffle=shuffle),
pd.merge(A, B, left_on='x', right_index=True, how=how))
list_eq(dd.merge(a, b, left_on='x', right_index=True, how=how,
suffixes=('1', '2'), shuffle=shuffle),
pd.merge(A, B, left_on='x', right_index=True, how=how,
suffixes=('1', '2')))
# pandas result looks buggy
# list_eq(dd.merge(a, B, left_index=True, right_on='y'),
# pd.merge(A, B, left_index=True, right_on='y'))
def test_merge_tasks_passes_through():
a = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
b = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]})
aa = dd.from_pandas(a, npartitions=3)
bb = dd.from_pandas(b, npartitions=2)
cc = aa.merge(bb, left_on='a', right_on='d', shuffle='tasks')
assert not any('partd' in k[0] for k in cc.dask)
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
def test_merge_by_index_patterns(how, shuffle):
pdf1l = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
pdf1r = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]})
pdf2l = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf2r = pd.DataFrame({'c': [7, 6, 5, 4, 3, 2, 1],
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf3l = pdf2l
pdf3r = pd.DataFrame({'c': [6, 7, 8, 9],
'd': [5, 4, 3, 2]},
index=list('abdg'))
pdf4l = pdf2l
pdf4r = pd.DataFrame({'c': [9, 10, 11, 12],
'd': [5, 4, 3, 2]},
index=list('abdg'))
# completely different index
pdf5l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('lmnopqr'))
pdf5r = pd.DataFrame({'c': [1, 1, 1, 1],
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf6l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('cdefghi'))
pdf6r = pd.DataFrame({'c': [1, 2, 1, 2],
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf7l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf7r = pd.DataFrame({'c': [5, 6, 7, 8],
'd': [5, 4, 3, 2]},
index=list('fghi'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r),
(pdf4l, pdf4r), (pdf5l, pdf5r), (pdf6l, pdf6r),
(pdf7l, pdf7r)]:
for lpart, rpart in [(2, 2), # same partition
(3, 2), # left npartition > right npartition
(2, 3)]: # left npartition < right npartition
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
assert_eq(dd.merge(ddl, ddr, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_index=True,
right_index=True))
assert_eq(dd.merge(ddr, ddl, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_index=True,
right_index=True))
assert_eq(dd.merge(ddl, ddr, how=how, left_index=True,
right_index=True, shuffle=shuffle,
indicator=True),
pd.merge(pdl, pdr, how=how, left_index=True,
right_index=True, indicator=True))
assert_eq(dd.merge(ddr, ddl, how=how, left_index=True,
right_index=True, shuffle=shuffle,
indicator=True),
pd.merge(pdr, pdl, how=how, left_index=True,
right_index=True, indicator=True))
assert_eq(ddr.merge(ddl, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pdr.merge(pdl, how=how, left_index=True,
right_index=True))
assert_eq(ddl.merge(ddr, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pdl.merge(pdr, how=how, left_index=True,
right_index=True))
# hash join
list_eq(dd.merge(ddl, ddr, how=how, left_on='a', right_on='c',
shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_on='a', right_on='c'))
list_eq(dd.merge(ddl, ddr, how=how, left_on='b', right_on='d',
shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_on='b', right_on='d'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='c', right_on='a',
shuffle=shuffle, indicator=True),
pd.merge(pdr, pdl, how=how, left_on='c', right_on='a',
indicator=True))
list_eq(dd.merge(ddr, ddl, how=how, left_on='d', right_on='b',
shuffle=shuffle, indicator=True),
pd.merge(pdr, pdl, how=how, left_on='d', right_on='b',
indicator=True))
list_eq(dd.merge(ddr, ddl, how=how, left_on='c', right_on='a',
shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_on='c', right_on='a'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='d', right_on='b',
shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_on='d', right_on='b'))
list_eq(ddl.merge(ddr, how=how, left_on='a', right_on='c',
shuffle=shuffle),
pdl.merge(pdr, how=how, left_on='a', right_on='c'))
list_eq(ddl.merge(ddr, how=how, left_on='b', right_on='d',
shuffle=shuffle),
pdl.merge(pdr, how=how, left_on='b', right_on='d'))
list_eq(ddr.merge(ddl, how=how, left_on='c', right_on='a',
shuffle=shuffle),
pdr.merge(pdl, how=how, left_on='c', right_on='a'))
list_eq(ddr.merge(ddl, how=how, left_on='d', right_on='b',
shuffle=shuffle),
pdr.merge(pdl, how=how, left_on='d', right_on='b'))
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_join_by_index_patterns(how, shuffle):
# Similar test cases as test_merge_by_index_patterns,
# but columns / index for join have same dtype
pdf1l = pd.DataFrame({'a': list('abcdefg'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf1r = pd.DataFrame({'c': list('abcdefg'),
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf2l = pdf1l
pdf2r = pd.DataFrame({'c': list('gfedcba'),
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf3l = pdf1l
pdf3r = pd.DataFrame({'c': list('abdg'),
'd': [5, 4, 3, 2]},
index=list('abdg'))
pdf4l = pd.DataFrame({'a': list('abcabce'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf4r = pd.DataFrame({'c': list('abda'),
'd': [5, 4, 3, 2]},
index=list('abdg'))
# completely different index
pdf5l = pd.DataFrame({'a': list('lmnopqr'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('lmnopqr'))
pdf5r = pd.DataFrame({'c': list('abcd'),
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf6l = pd.DataFrame({'a': list('cdefghi'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('cdefghi'))
pdf6r = pd.DataFrame({'c': list('abab'),
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf7l = pd.DataFrame({'a': list('aabbccd'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf7r = pd.DataFrame({'c': list('aabb'),
'd': [5, 4, 3, 2]},
index=list('fghi'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r),
(pdf4l, pdf4r), (pdf5l, pdf5r), (pdf6l, pdf6r),
(pdf7l, pdf7r)]:
for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
assert_eq(ddl.join(ddr, how=how, shuffle=shuffle),
pdl.join(pdr, how=how))
assert_eq(ddr.join(ddl, how=how, shuffle=shuffle),
pdr.join(pdl, how=how))
assert_eq(ddl.join(ddr, how=how, lsuffix='l', rsuffix='r',
shuffle=shuffle),
pdl.join(pdr, how=how, lsuffix='l', rsuffix='r'))
assert_eq(ddr.join(ddl, how=how, lsuffix='l', rsuffix='r',
shuffle=shuffle),
pdr.join(pdl, how=how, lsuffix='l', rsuffix='r'))
"""
# temporary disabled bacause pandas may incorrectly raise
# IndexError for empty DataFrame
# https://github.com/pydata/pandas/pull/10826
list_assert_eq(ddl.join(ddr, how=how, on='a', lsuffix='l', rsuffix='r'),
pdl.join(pdr, how=how, on='a', lsuffix='l', rsuffix='r'))
list_eq(ddr.join(ddl, how=how, on='c', lsuffix='l', rsuffix='r'),
pdr.join(pdl, how=how, on='c', lsuffix='l', rsuffix='r'))
# merge with index and columns
list_eq(ddl.merge(ddr, how=how, left_on='a', right_index=True),
pdl.merge(pdr, how=how, left_on='a', right_index=True))
list_eq(ddr.merge(ddl, how=how, left_on='c', right_index=True),
pdr.merge(pdl, how=how, left_on='c', right_index=True))
list_eq(ddl.merge(ddr, how=how, left_index=True, right_on='c'),
pdl.merge(pdr, how=how, left_index=True, right_on='c'))
list_eq(ddr.merge(ddl, how=how, left_index=True, right_on='a'),
pdr.merge(pdl, how=how, left_index=True, right_on='a'))
"""
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_merge_by_multiple_columns(how, shuffle):
pdf1l = pd.DataFrame({'a': list('abcdefghij'),
'b': list('abcdefghij'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf1r = pd.DataFrame({'d': list('abcdefghij'),
'e': list('abcdefghij'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefghij'))
pdf2l = pd.DataFrame({'a': list('abcdeabcde'),
'b': list('abcabcabca'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf2r = pd.DataFrame({'d': list('edcbaedcba'),
'e': list('aaabbbcccd'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('fghijklmno'))
pdf3l = pd.DataFrame({'a': list('aaaaaaaaaa'),
'b': list('aaaaaaaaaa'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf3r = pd.DataFrame({'d': list('aaabbbccaa'),
'e': list('abbbbbbbbb'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('ABCDEFGHIJ'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r)]:
for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
assert_eq(ddl.join(ddr, how=how, shuffle=shuffle),
pdl.join(pdr, how=how))
assert_eq(ddr.join(ddl, how=how, shuffle=shuffle),
pdr.join(pdl, how=how))
assert_eq(dd.merge(ddl, ddr, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_index=True,
right_index=True))
assert_eq(dd.merge(ddr, ddl, how=how, left_index=True,
right_index=True, shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_index=True,
right_index=True))
# hash join
list_eq(dd.merge(ddl, ddr, how=how, left_on='a', right_on='d',
shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_on='a', right_on='d'))
list_eq(dd.merge(ddl, ddr, how=how, left_on='b', right_on='e',
shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_on='b', right_on='e'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='d', right_on='a',
shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_on='d', right_on='a'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='e', right_on='b',
shuffle=shuffle),
pd.merge(pdr, pdl, how=how, left_on='e', right_on='b'))
list_eq(dd.merge(ddl, ddr, how=how, left_on=['a', 'b'],
right_on=['d', 'e'], shuffle=shuffle),
pd.merge(pdl, pdr, how=how, left_on=['a', 'b'], right_on=['d', 'e']))
def test_melt():
pdf = pd.DataFrame({'A': list('abcd') * 5,
'B': list('XY') * 10,
'C': np.random.randn(20)})
ddf = dd.from_pandas(pdf, 4)
list_eq(dd.melt(ddf),
pd.melt(pdf))
list_eq(dd.melt(ddf, id_vars='C'),
pd.melt(pdf, id_vars='C'))
list_eq(dd.melt(ddf, value_vars='C'),
pd.melt(pdf, value_vars='C'))
list_eq(dd.melt(ddf, value_vars=['A', 'C'], var_name='myvar'),
pd.melt(pdf, value_vars=['A', 'C'], var_name='myvar'))
list_eq(dd.melt(ddf, id_vars='B', value_vars=['A', 'C'], value_name='myval'),
pd.melt(pdf, id_vars='B', value_vars=['A', 'C'], value_name='myval'))
def test_cheap_inner_merge_with_pandas_object():
a = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
da = dd.from_pandas(a, npartitions=3)
b = pd.DataFrame({'x': [1, 2, 3, 4], 'z': list('abda')})
dc = da.merge(b, on='x', how='inner')
assert all('shuffle' not in k[0] for k in dc.dask)
list_eq(da.merge(b, on='x', how='inner'),
a.merge(b, on='x', how='inner'))
def test_cheap_single_partition_merge():
a = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
aa = dd.from_pandas(a, npartitions=3)
b = pd.DataFrame({'x': [1, 2, 3, 4], 'z': list('abda')})
bb = dd.from_pandas(b, npartitions=1, sort=False)
cc = aa.merge(bb, on='x', how='inner')
assert all('shuffle' not in k[0] for k in cc.dask)
assert len(cc.dask) == len(aa.dask) * 2 + len(bb.dask)
list_eq(aa.merge(bb, on='x', how='inner'),
a.merge(b, on='x', how='inner'))
def test_cheap_single_partition_merge_divisions():
a = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
aa = dd.from_pandas(a, npartitions=3)
b = pd.DataFrame({'x': [1, 2, 3, 4], 'z': list('abda')})
bb = dd.from_pandas(b, npartitions=1, sort=False)
actual = aa.merge(bb, on='x', how='inner')
assert not actual.known_divisions
assert_divisions(actual)
actual = bb.merge(aa, on='x', how='inner')
assert not actual.known_divisions
assert_divisions(actual)
def test_cheap_single_partition_merge_on_index():
a = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
aa = dd.from_pandas(a, npartitions=3)
b = pd.DataFrame({'x': [1, 2, 3, 4], 'z': list('abda')})
bb = dd.from_pandas(b, npartitions=1, sort=False)
actual = aa.merge(bb, left_index=True, right_on='x', how='inner')
expected = a.merge(b, left_index=True, right_on='x', how='inner')
assert actual.known_divisions
assert_eq(actual, expected)
actual = bb.merge(aa, right_index=True, left_on='x', how='inner')
expected = b.merge(a, right_index=True, left_on='x', how='inner')
assert actual.known_divisions
assert_eq(actual, expected)
def test_merge_maintains_columns():
lhs = pd.DataFrame({'A': [1, 2, 3],
'B': list('abc'),
'C': 'foo',
'D': 1.0},
columns=list('DCBA'))
rhs = pd.DataFrame({'G': [4, 5],
'H': 6.0,
'I': 'bar',
'B': list('ab')},
columns=list('GHIB'))
ddf = dd.from_pandas(lhs, npartitions=1)
merged = dd.merge(ddf, rhs, on='B').compute()
assert tuple(merged.columns) == ('D', 'C', 'B', 'A', 'G', 'H', 'I')
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_merge_index_without_divisions(shuffle):
a = pd.DataFrame({'x': [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])
b = pd.DataFrame({'y': [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])
aa = dd.from_pandas(a, npartitions=3, sort=False)
bb = dd.from_pandas(b, npartitions=2)
result = aa.join(bb, how='inner', shuffle=shuffle)
expected = a.join(b, how='inner')
assert_eq(result, expected)
def test_half_indexed_dataframe_avoids_shuffle():
a = pd.DataFrame({'x': np.random.randint(100, size=1000)})
b = pd.DataFrame({'y': np.random.randint(100, size=100)},
index=np.random.randint(100, size=100))
aa = dd.from_pandas(a, npartitions=100)
bb = dd.from_pandas(b, npartitions=2)
c = pd.merge(a, b, left_index=True, right_on='y')
cc = dd.merge(aa, bb, left_index=True, right_on='y', shuffle='tasks')
list_eq(c, cc)
assert len(cc.dask) < 500
def test_errors_for_merge_on_frame_columns():
a = pd.DataFrame({'x': [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])
b = pd.DataFrame({'y': [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])
aa = dd.from_pandas(a, npartitions=3, sort=False)
bb = dd.from_pandas(b, npartitions=2)
with pytest.raises(NotImplementedError):
dd.merge(aa, bb, left_on='x', right_on=bb.y)
with pytest.raises(NotImplementedError):
dd.merge(aa, bb, left_on=aa.x, right_on=bb.y)
def test_concat_unknown_divisions():
a = pd.Series([1, 2, 3, 4])
b = pd.Series([4, 3, 2, 1])
aa = dd.from_pandas(a, npartitions=2, sort=False)
bb = dd.from_pandas(b, npartitions=2, sort=False)
assert not aa.known_divisions
assert_eq(pd.concat([a, b], axis=1),
dd.concat([aa, bb], axis=1))
cc = dd.from_pandas(b, npartitions=1, sort=False)
with pytest.raises(ValueError):
dd.concat([aa, cc], axis=1)
def test_concat_unknown_divisions_errors():
a = pd.Series([1, 2, 3, 4, 5, 6])
b = pd.Series([4, 3, 2, 1])
aa = dd.from_pandas(a, npartitions=2, sort=False)
bb = dd.from_pandas(b, npartitions=2, sort=False)
with pytest.raises(ValueError):
dd.concat([aa, bb], axis=1).compute()
def test_concat2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
a = dd.DataFrame(dsk, 'x', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
b = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
meta = make_meta({'b': 'i8', 'c': 'i8'})
c = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60],
'd': [70, 80, 90]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10],
'd': [90, 80, 70]},
index=[3, 4, 5])}
meta = make_meta({'b': 'i8', 'c': 'i8', 'd': 'i8'},
index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'y', meta, [0, 3, 5])
cases = [[a, b], [a, c], [a, d]]
assert dd.concat([a]) is a
for case in cases:
result = dd.concat(case)
pdcase = [_c.compute() for _c in case]
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert_eq(pd.concat(pdcase), result)
assert set(result.dask) == set(dd.concat(case).dask)
result = dd.concat(case, join='inner')
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert_eq(pd.concat(pdcase, join='inner'), result)
assert set(result.dask) == set(dd.concat(case, join='inner').dask)
def test_concat3():
pdf1 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCDE'), index=list('abcdef'))
pdf2 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCFG'), index=list('ghijkl'))
pdf3 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCHI'), index=list('mnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
result = dd.concat([ddf1, ddf2])
assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions
assert result.npartitions == ddf1.npartitions + ddf2.npartitions
assert_eq(result, pd.concat([pdf1, pdf2]))
assert_eq(dd.concat([ddf1, ddf2], interleave_partitions=True),
pd.concat([pdf1, pdf2]))
result = dd.concat([ddf1, ddf2, ddf3])
assert result.divisions == (ddf1.divisions[:-1] + ddf2.divisions[:-1] +
ddf3.divisions)
assert result.npartitions == (ddf1.npartitions + ddf2.npartitions +
ddf3.npartitions)
assert_eq(result, pd.concat([pdf1, pdf2, pdf3]))
assert_eq(dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),
pd.concat([pdf1, pdf2, pdf3]))
def test_concat4_interleave_partitions():
pdf1 = pd.DataFrame(np.random.randn(10, 5),
columns=list('ABCDE'), index=list('abcdefghij'))
pdf2 = pd.DataFrame(np.random.randn(13, 5),
columns=list('ABCDE'), index=list('fghijklmnopqr'))
pdf3 = pd.DataFrame(np.random.randn(13, 6),
columns=list('CDEXYZ'), index=list('fghijklmnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
msg = ('All inputs have known divisions which cannot be '
'concatenated in order. Specify '
'interleave_partitions=True to ignore order')
cases = [[ddf1, ddf1], [ddf1, ddf2], [ddf1, ddf3], [ddf2, ddf1],
[ddf2, ddf3], [ddf3, ddf1], [ddf3, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case)
assert_eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert_eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
msg = "'join' must be 'inner' or 'outer'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat([ddf1, ddf1], join='invalid', interleave_partitions=True)
def test_concat5():
pdf1 = pd.DataFrame(np.random.randn(7, 5),
columns=list('ABCDE'), index=list('abcdefg'))
pdf2 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('abcdefg'))
pdf3 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('cdefghi'))
pdf4 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('cdefghi'))
pdf5 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('fklmnop'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
ddf4 = dd.from_pandas(pdf4, 2)
ddf5 = dd.from_pandas(pdf5, 3)
cases = [[ddf1, ddf2], [ddf1, ddf3], [ddf1, ddf4], [ddf1, ddf5],
[ddf3, ddf4], [ddf3, ddf5], [ddf5, ddf1, ddf4], [ddf5, ddf3],
[ddf1.A, ddf4.A], [ddf2.F, ddf3.F], [ddf4.A, ddf5.A],
[ddf1.A, ddf4.F], [ddf2.F, ddf3.H], [ddf4.A, ddf5.B],
[ddf1, ddf4.A], [ddf3.F, ddf2], [ddf5, ddf1.A, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
assert_eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert_eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert_eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
# Dask + pandas
cases = [[ddf1, pdf2], [ddf1, pdf3], [pdf1, ddf4],
[pdf1.A, ddf4.A], [ddf2.F, pdf3.F],
[ddf1, pdf4.A], [ddf3.F, pdf2], [ddf2, pdf1, ddf3.F]]
for case in cases:
pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]
assert_eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert_eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert_eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
@pytest.mark.parametrize('known, cat_index, divisions',
[(True, True, False), (True, False, True),
(True, False, False), (False, True, False),
(False, False, True), (False, False, False)])
def test_concat_categorical(known, cat_index, divisions):
frames = [pd.DataFrame({'w': list('xxxxx'),
'x': np.arange(5),
'y': list('abcbc'),
'z': np.arange(5, dtype='f8')}),
pd.DataFrame({'w': list('yyyyy'),
'x': np.arange(5, 10),
'y': list('abbba'),
'z': np.arange(5, 10, dtype='f8')}),
pd.DataFrame({'w': list('zzzzz'),
'x': np.arange(10, 15),
'y': list('bcbcc'),
'z': np.arange(10, 15, dtype='f8')})]
for df in frames:
df.w = df.w.astype('category')
df.y = df.y.astype('category')
if cat_index:
frames = [df.set_index(df.y) for df in frames]
dframes = [dd.from_pandas(p, npartitions=2, sort=divisions) for p in frames]
if not known:
dframes[0]._meta = clear_known_categories(dframes[0]._meta, ['y'],
index=True)
def check_and_return(ddfs, dfs, join):
sol = concat(dfs, join=join)
res = dd.concat(ddfs, join=join, interleave_partitions=divisions)
assert_eq(res, sol)
if known:
for p in [i.iloc[:0] for i in res._get(res.dask, res._keys())]:
res._meta == p # will error if schemas don't align
assert not cat_index or has_known_categories(res.index) == known
return res
for join in ['inner', 'outer']:
# Frame
res = check_and_return(dframes, frames, join)
assert has_known_categories(res.w)
assert has_known_categories(res.y) == known
# Series
res = check_and_return([i.y for i in dframes],
[i.y for i in frames], join)
assert has_known_categories(res) == known
# Non-cat series with cat index
if cat_index:
res = check_and_return([i.x for i in dframes],
[i.x for i in frames], join)
# Partition missing columns
res = check_and_return([dframes[0][['x', 'y']]] + dframes[1:],
[frames[0][['x', 'y']]] + frames[1:], join)
assert not hasattr(res, 'w') or has_known_categories(res.w)
assert has_known_categories(res.y) == known
def test_append():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
df3 = pd.DataFrame({'b': [1, 2, 3, 4, 5, 6],
'c': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
ddf = dd.from_pandas(df, 2)
ddf2 = dd.from_pandas(df2, 2)
ddf3 = dd.from_pandas(df3, 2)
s = pd.Series([7, 8], name=6, index=['a', 'b'])
assert_eq(ddf.append(s), df.append(s))
assert_eq(ddf.append(ddf2), df.append(df2))
assert_eq(ddf.a.append(ddf2.a), df.a.append(df2.a))
# different columns
assert_eq(ddf.append(ddf3), df.append(df3))
assert_eq(ddf.a.append(ddf3.b), df.a.append(df3.b))
# dask + pandas
assert_eq(ddf.append(df2), df.append(df2))
assert_eq(ddf.a.append(df2.a), df.a.append(df2.a))
assert_eq(ddf.append(df3), df.append(df3))
assert_eq(ddf.a.append(df3.b), df.a.append(df3.b))
df4 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[4, 5, 6, 7, 8, 9])
ddf4 = dd.from_pandas(df4, 2)
with pytest.raises(ValueError):
ddf.append(ddf4)
def test_append2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
ddf1 = dd.DataFrame(dsk, 'x', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
ddf2 = dd.DataFrame(dsk, 'y', meta, [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
meta = make_meta({'b': 'i8', 'c': 'i8'})
ddf3 = dd.DataFrame(dsk, 'y', meta, [None, None])
assert_eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute()))
assert_eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert_eq(ddf1.a.append(ddf2), ddf1.a.compute().append(ddf2.compute()))
assert_eq(ddf2.a.append(ddf1), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert_eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute()))
assert_eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert_eq(ddf1.a.append(ddf3), ddf1.a.compute().append(ddf3.compute()))
assert_eq(ddf3.b.append(ddf1), ddf3.b.compute().append(ddf1.compute()))
# Dask + pandas
assert_eq(ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute()))
assert_eq(ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert_eq(ddf1.a.append(ddf2.compute()), ddf1.a.compute().append(ddf2.compute()))
assert_eq(ddf2.a.append(ddf1.compute()), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert_eq(ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute()))
assert_eq(ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert_eq(ddf1.a.append(ddf3.compute()), ddf1.a.compute().append(ddf3.compute()))
assert_eq(ddf3.b.append(ddf1.compute()), ddf3.b.compute().append(ddf1.compute()))
def test_append_categorical():
frames = [pd.DataFrame({'x': np.arange(5, 10),
'y': list('abbba'),
'z': np.arange(5, 10, dtype='f8')}),
pd.DataFrame({'x': np.arange(10, 15),
'y': list('bcbcc'),
'z': np.arange(10, 15, dtype='f8')})]
frames2 = []
for df in frames:
df.y = df.y.astype('category')
df2 = df.copy()
df2.y = df2.y.cat.set_categories(list('abc'))
df.index = df.y
frames2.append(df2.set_index(df2.y))
df1, df2 = frames2
for known in [True, False]:
dframes = [dd.from_pandas(p, npartitions=2, sort=False) for p in frames]
if not known:
dframes[0]._meta = clear_known_categories(dframes[0]._meta,
['y'], index=True)
ddf1, ddf2 = dframes
res = ddf1.append(ddf2)
assert_eq(res, df1.append(df2))
assert has_known_categories(res.index) == known
assert has_known_categories(res.y) == known
res = ddf1.y.append(ddf2.y)
assert_eq(res, df1.y.append(df2.y))
assert has_known_categories(res.index) == known
assert has_known_categories(res) == known
res = ddf1.index.append(ddf2.index)
assert_eq(res, df1.index.append(df2.index))
assert has_known_categories(res) == known
|
|
import unittest
from test import support
import binascii
import random
import sys
from test.support import bigmemtest, _1G, _4G
zlib = support.import_module('zlib')
try:
import mmap
except ImportError:
mmap = None
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(b""), zlib.crc32(b"", 0))
self.assertTrue(zlib.crc32(b"abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32(b"", 0), 0)
self.assertEqual(zlib.crc32(b"", 1), 1)
self.assertEqual(zlib.crc32(b"", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(b""), zlib.adler32(b"", 1))
self.assertTrue(zlib.adler32(b"abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32(b"", 0), 0)
self.assertEqual(zlib.adler32(b"", 1), 1)
self.assertEqual(zlib.adler32(b"", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFF, expected & 0x0FFFFFFFF)
def test_penguins(self):
self.assertEqual32(zlib.crc32(b"penguin", 0), 0x0e5c1a120)
self.assertEqual32(zlib.crc32(b"penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32(b"penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32(b"penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0))
self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1))
def test_crc32_adler32_unsigned(self):
foo = b'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), 2486878355)
self.assertEqual(zlib.crc32(b'spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), 3573550353)
self.assertEqual(zlib.adler32(b'spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = b'abcdefghijklmnop'
crc = 2486878355
self.assertEqual(binascii.crc32(foo), crc)
self.assertEqual(zlib.crc32(foo), crc)
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
def setUp(self):
with open(support.TESTFN, "wb+") as f:
f.seek(_4G)
f.write(b"asdf")
f.flush()
self.mapping = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def tearDown(self):
self.mapping.close()
support.unlink(support.TESTFN)
@unittest.skipUnless(mmap, "mmap() is not available.")
@unittest.skipUnless(sys.maxsize > _4G, "Can't run on a 32-bit system.")
@unittest.skipUnless(support.is_resource_enabled("largefile"),
"May use lots of disk space.")
def test_big_buffer(self):
self.assertEqual(zlib.crc32(self.mapping), 3058686908)
self.assertEqual(zlib.adler32(self.mapping), 82837919)
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, b'ERROR', 10)
def test_badargs(self):
self.assertRaises(TypeError, zlib.adler32)
self.assertRaises(TypeError, zlib.crc32)
self.assertRaises(TypeError, zlib.compress)
self.assertRaises(TypeError, zlib.decompress)
for arg in (42, None, '', 'abc', (), []):
self.assertRaises(TypeError, zlib.adler32, arg)
self.assertRaises(TypeError, zlib.crc32, arg)
self.assertRaises(TypeError, zlib.compress, arg)
self.assertRaises(TypeError, zlib.decompress, arg)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = b'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b'x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)
def test_incomplete_stream(self):
# An useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegex(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
@bigmemtest(size=_4G + 100, memuse=1)
def test_length_overflow(self, size):
if size < _4G + 100:
self.skipTest("not enough free memory, need at least 4 GB")
data = b'x' * size
try:
self.assertRaises(OverflowError, zlib.compress, data, 1)
self.assertRaises(OverflowError, zlib.decompress, data)
finally:
data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
datazip = zlib.compress(datasrc)
# should compress both bytes and bytearray data
for data in (datasrc, bytearray(datasrc)):
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
dco = zlib.decompressobj()
y1 = dco.decompress(v1 + v2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
self.assertIsInstance(dco.unconsumed_tail, bytes)
self.assertIsInstance(dco.unused_data, bytes)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(b''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
decombuf = zlib.decompress(combuf)
# Test type of return value
self.assertIsInstance(decombuf, bytes)
self.assertEqual(data, decombuf)
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual(b'', dco.unconsumed_tail, ########
"(A) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress(b'')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual(b'', dco.unconsumed_tail, ########
"(B) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
self.assertEqual(data, b''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.assertFalse(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress(b'', max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, b"", -1)
self.assertEqual(b'', dco.unconsumed_tail)
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
cdata = b"x\x9cKLJ\x06\x00\x02M\x01" # "abc"
dco = zlib.decompressobj()
ddata = dco.decompress(cdata, 1)
ddata += dco.decompress(dco.unconsumed_tail)
self.assertEqual(dco.unconsumed_tail, b"")
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(b''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.assertTrue(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), b"") # Returns nothing
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
self.assertEqual(zlib.decompress(x), b'foo')
self.assertRaises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
self.assertEqual(y, b'foo')
if hasattr(zlib.compressobj(), "copy"):
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = bytes(str(HAMLET_SCENE, "ascii").swapcase(), "ascii")
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = b''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = b''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
if hasattr(zlib.decompressobj(), "copy"):
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
# Test type of return value
self.assertIsInstance(comp, bytes)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = b''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = b''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
c = zlib.compressobj(1)
compress = lambda s: c.compress(s) + c.flush()
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
d = zlib.decompressobj()
decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress)
@bigmemtest(size=_4G + 100, memuse=1)
def test_length_overflow(self, size):
if size < _4G + 100:
self.skipTest("not enough free memory, need at least 4 GB")
data = b'x' * size
c = zlib.compressobj(1)
d = zlib.decompressobj()
try:
self.assertRaises(OverflowError, c.compress, data)
self.assertRaises(OverflowError, d.decompress, data)
finally:
data = None
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = bytes()
for i in range(0, length, step):
blocks += bytes(randint(0, 255) for x in range(step))
return blocks
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = b"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
support.run_unittest(
ChecksumTestCase,
ChecksumBigBufferTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
unittest.main() # XXX
###test_main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from mock import Mock, call
from preggy import expect
import lxml.html
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.validators.css_requests import CSSRequestsValidator
from tests.unit.base import ValidatorTestCase
from tests.fixtures import PageFactory
class TestCSSRequestsValidator(ValidatorTestCase):
def test_can_validate_css_requests_on_globo_html(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[],
cache=self.sync_cache
)
reviewer.violation_definitions = {
'total.requests.css': {'default_value': 1},
'total.size.css': {'default_value': 0.0},
}
content = self.get_file('globo.html')
result = {
'url': page.url,
'status': 200,
'content': content,
'html': lxml.html.fromstring(content)
}
reviewer.responses[page.url] = result
reviewer.get_response = Mock(return_value=result)
validator = CSSRequestsValidator(reviewer)
css = {
'url': 'some_style.css',
'status': 200,
'content': '#id{display:none}',
'html': None
}
validator.get_response = Mock(return_value=css)
validator.add_violation = Mock()
validator.review.data = {
'total.requests.css': 7,
'total.size.css.gzipped': 0.05
}
validator.validate()
expect(validator.add_violation.call_args_list).to_include(
call(
key='total.requests.css',
value={'over_limit': 6, 'total_css_files': 7},
points=30
))
expect(validator.add_violation.call_args_list).to_include(
call(
key='total.size.css',
value=0.05,
points=0
))
def test_can_validate_css_requests_zero_requests(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[],
cache=self.sync_cache
)
reviewer.violation_definitions = {
'total.requests.css': {'default_value': 1},
'total.size.css': {'default_value': 0.0},
}
content = "<html></html>"
result = {
'url': page.url,
'status': 200,
'content': content,
'html': lxml.html.fromstring(content)
}
reviewer.responses[page.url] = result
reviewer.get_response = Mock(return_value=result)
validator = CSSRequestsValidator(reviewer)
validator.add_violation = Mock()
validator.validate()
expect(validator.add_violation.called).to_be_false()
def test_can_validate_css_requests_empty_html(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[],
cache=self.sync_cache
)
reviewer.violation_definitions = {
'total.requests.css': {'default_value': 1},
'total.size.css': {'default_value': 0.0},
}
result = {
'url': page.url,
'status': 200,
'content': None,
'html': None
}
reviewer.responses[page.url] = result
reviewer.get_response = Mock(return_value=result)
validator = CSSRequestsValidator(reviewer)
validator.add_violation = Mock()
validator.validate()
expect(validator.add_violation.called).to_be_false()
def test_can_get_violation_definitions(self):
reviewer = Mock()
validator = CSSRequestsValidator(reviewer)
definitions = validator.get_violation_definitions()
total_size_message = definitions['total.size.css']['description'] % (0.05)
expect(total_size_message).to_equal(
'There\'s 0.05kb of CSS in this page and that adds up to more '
'download time slowing down the page rendering to the user.'
)
requests_css_message = definitions['total.requests.css']['description'] % ({
'total_css_files': 7,
'over_limit': 6
})
expect(requests_css_message).to_equal(
'This page has 7 CSS request (6 over limit). Having too many '
'requests impose a tax in the browser due to handshakes.'
)
expect(definitions).to_length(2)
expect('total.size.css' in definitions).to_be_true()
expect('total.requests.css' in definitions).to_be_true()
def test_get_css_requests(self):
reviewer = Mock()
validator = CSSRequestsValidator(reviewer)
css1 = Mock()
css2 = Mock()
validator.review.data = {
'page.css': [css1, css2]
}
css_requests = validator.get_css_requests()
expect(css_requests).to_equal([css1, css2])
def test_can_get_default_violations_values(self):
config = Config()
config.MAX_CSS_KB_PER_PAGE_AFTER_GZIP = 70
config.MAX_CSS_REQUESTS_PER_PAGE = 5
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=config,
validators=[]
)
validator = CSSRequestsValidator(reviewer)
violations_values = validator.get_default_violations_values(config)
expect(violations_values).to_include('total.size.css')
expect(violations_values['total.size.css']).to_length(2)
expect(violations_values['total.size.css']).to_be_like({
'value': config.MAX_CSS_KB_PER_PAGE_AFTER_GZIP,
'description': config.get_description('MAX_CSS_KB_PER_PAGE_AFTER_GZIP')
})
expect(violations_values).to_include('total.requests.css')
expect(violations_values['total.requests.css']).to_length(2)
expect(violations_values['total.requests.css']).to_be_like({
'value': config.MAX_CSS_REQUESTS_PER_PAGE,
'description': config.get_description('MAX_CSS_REQUESTS_PER_PAGE')
})
|
|
# # # # # # # # # # # # # # # # #
# Self-Care Bot 0.3a #
# Designed by Kelly Maere #
# Programmed by Colin Diener #
# # # # # # # # # # # # # # # # #
"""
TODO: Update code to reflect new Telegram API 2.0
Update code to reflect new version of telepot
Remove threading, replace with asynchronous calling
"""
import telepot
from datetime import datetime, time
from time import sleep
from random import randint
import emoji
# To ask people at the same time
import threading
try:
from ConfigParser import SafeConfigParser
except:
from configparser import *
import json
# Get the API key
parser = SafeConfigParser()
parser.read('settings.ini')
api = parser.get('scb_settings', 'api_key')
# Create the Bot, global vars
bot = telepot.Bot(api);
bot.setWebhook()
users = json.loads(parser.get("users","toImport"))
userreply = []
meduser = json.loads(parser.get("users","medUsers"))
runToday = False
# We're good to go!
print('SCB is up and running, adding users to base...')
# Enter their last response
for i in users:
userreply.append(0)
# Messages
greeting = []
water = []
otherdrink = []
eat_yes = []
eat_no = []
med_yes = []
med_no = []
bye = []
greeting.append("Just checking in! Have you eaten today?")
greeting.append("Howdy! You've eaten already, right?")
greeting.append("Hey there! Have you eaten?")
greeting.append(emoji.emojize(":smile: Hello~ Have you eaten today?", use_aliases=True))
water.append("Great! Don't forget to drink enough!")
water.append("That's fantastic, I'm proud of you :)")
water.append("Look at you! You're doing great!")
water.append("Nice! Don't forget to stay hydrated!")
otherdrink.append("That's fine, but you should have some water, too!")
otherdrink.append("That's alright, don't forget to have some water!")
otherdrink.append("You should go have a glass of water while you're thinking of it!")
otherdrink.append("Alright... Don't forget to have some water~")
eat_yes.append(emoji.emojize(":strawberry: Great!", use_aliases=True))
eat_yes.append(emoji.emojize("Good! You're doing great :ok_hand:", use_aliases=True))
eat_yes.append("Remember to eat healthy! You're doing great :)")
eat_yes.append("That's fantastic, keep it up!")
eat_no.append("You really should eat something. Maybe go get something now while you're thinking of it?")
eat_no.append("Home cooking is a cheap alternative to eating out, and it's fun! You should try it!")
eat_no.append("Please do! I love you and want you to be healthy and strong!")
eat_no.append("Aw :( Please eat something... for me?")
med_yes.append(emoji.emojize(":pill: Way to go!", use_aliases=True))
med_yes.append(emoji.emojize(":thumbsup: Good!", use_aliases=True))
med_yes.append("That's great!")
med_yes.append("I'm so proud of you :)")
med_no.append(":( Please take them, they're there to help you and we love you and we don't want you to get hurt somehow...")
med_no.append("Don't forget to take them... We don't want something to happen to you...")
med_no.append("Remember that we love you and we care about you, and that you should take them...")
med_no.append("Hey! Take them! I care about you and I want you to be okay!")
bye.append(emoji.emojize("Alright :revolving_hearts: Don't forget to take care of yourself! You're super important to us :blush:", use_aliases=True))
bye.append("You're doing great. Don't forget, we're all super proud of you and we love you :) See you tomorrow!")
bye.append("I'm so proud of you! Don't forget to take care of yourself <3")
bye.append(emoji.emojize("Alright :revolving_hearts: Don't forget to take care of yourself! You're our sunshine :sunny:", use_aliases=True))
print('All set. Executing...')
# Threading to send to different people!
class ThreadingObj(threading.Thread):
def __init__(self, id):
threading.Thread.__init__(self)
self.id = id
def run(self): #lmao what am I even doing
checkup(self.id, self) #what is this, what is you?
# Function that will actually send the messages
# userid is going to be the person it sends it to
def checkup(counter, thisthread):
no = {'hide_keyboard': True}
yesno = {'keyboard': [['Yes','No']]}
drinks= {'keyboard': [['Water','Tea'],['Coffee','Soda','Nothing/Other']]}
started = datetime.now()
bypass = False
def checkKill():
delta = datetime.now() - started
if delta.total_seconds() > 72000: # 72000 seconds is 20 hours
# if delta.total_seconds() > 10:
bypass = True
userreply[counter] = 0
userid = users[counter];
userreply[counter] = 0 # In case they messaged it in between or something
delay = 1 # So that my computer doesn't explode constantly checking
bot.sendMessage(userid, greeting[randint(0,3)], reply_markup=yesno)
while (userreply[counter] == 0 and bypass == False):
sleep(delay)
checkKill()
if bypass == False:
if userreply[counter] == 'Yes':
bot.sendMessage(userid, eat_yes[randint(0,3)], reply_markup=no)
if userreply[counter] == 'No':
bot.sendMessage(userid, eat_no[randint(0,3)], reply_markup=no)
userreply[counter] = 0
bot.sendMessage(userid, "What have you had to drink?", reply_markup=drinks)
while (userreply[counter] == 0 and bypass == False):
sleep(delay)
checkKill()
if bypass == False:
if userreply[counter] == 'Water':
bot.sendMessage(userid, water[randint(0,3)], reply_markup=no)
if userreply[counter] == 'Tea' or userreply[counter] == "Soda" or userreply[counter] == 'Coffee':
bot.sendMessage(userid, otherdrink[randint(0,3)], reply_markup=no)
if userreply[counter] == 'Nothing/Other':
bot.sendMessage(userid, "Don't forget to hydrate with some water!", reply_markup=no)
userreply[counter] = 0
# This is below a bypass statement because it will skip both if bypass is true
if userid in meduser:
bot.sendMessage(userid, "Are you taking your medications?", reply_markup=yesno)
while (userreply[counter] == 0 and bypass == False):
sleep(delay)
checkKill()
if bypass == False:
if userreply[counter] == "Yes":
bot.sendMessage(userid, med_yes[randint(0,3)], reply_markup=no)
if userreply[counter] == "No":
bot.sendMessage(userid, med_no[randint(0,3)], reply_markup=no)
userreply[counter] = 0 # Just in case <3
# Bye!
bot.sendMessage(userid, bye[randint(0,3)], reply_markup=no)
def setup():
parser.read('settings.ini')
users = json.loads(parser.get("users","toImport"))
del userreply[:]
meduser = json.loads(parser.get("users","medUsers"))
for i in users:
userreply.append(0)
print("User list updated, all set to go.")
def runme():
print('Creating threads, asking questions...')
usercount = 0
for i in users:
thread = ThreadingObj(usercount)
usercount = usercount+1
thread.start()
def writeConfig():
parser.set('users','toImport', json.dumps(users))
parser.set('users','medUsers', json.dumps(meduser))
# Now that we've made the modifications, write to file
with open('settings.ini','w') as configfile:
parser.write(configfile)
def handle(msg):
chat_id = msg['chat']['id']
command = msg['text']
if chat_id in users:
if '/broadcast' in msg['text']:
bmsg = msg['text']
bmsg = bmsg[10:]
for i in users:
bot.sendMessage(i,bmsg)
userreply[users.index(msg['chat']['id'])] = msg['text']
if '/rem' in msg['text']:
userreply.pop(users.index(msg['chat']['id']))
users.remove(chat_id)
# For good measure!
meduser.remove(chat_id)
writeConfig()
print("%s has opted out of the SCB service." % chat_id)
bot.sendMessage(chat_id,"Sorry to see you go. I won't contact you any more. Send me /start if you change your mind<3")
if '/start' in msg['text']:
if chat_id not in users:
userreply.append(0)
users.append(chat_id)
writeConfig()
print("%s has opted into the SCB service." % chat_id)
bot.sendMessage(chat_id,"Hi! You've been added to my list of people to contact everyday. You can send me /rem if you no longer wish to recieve messages. Send me /med if you wish to be asked about medications each day.")
if '/med' in msg['text']:
meduser.append(chat_id)
writeConfig()
bot.sendMessage(chat_id,"I'll ask you if you've taken your meds each day :)")
# Listen for commands
bot.message_loop(handle)
# This loop will run forever
while True:
now = datetime.now()
now_time = now.time()
if now_time >= time(14,00) and now_time <= time(17,00) and runToday is False:
setup()
runme() # Call the real function
runToday = True
if now_time >= time(20,00) and now_time <= time(23,00):
runToday = False
sleep(7200) # There are 7200 seconds in 2 hours
|
|
#!/usr/bin/env python
# @package update_storage_schemas.py
# Correct/Update storage schemas\n
# @code
# # Usage example for update_storage_schemas.py
# sudo ./update_storage_schemas.py --path /opt/graphite/whisper --cfg /opt/graphite/conf/schemas
# @endcode
import sys
import os
import logging
import subprocess
import argparse
import re
import time
from multiprocessing import Pool, cpu_count
from configobj import ConfigObj
# Assuming Python 2, we'll want scandir if possible, it's much faster
try:
from scandir import scandir
except ImportError:
from os import listdir as scandir
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
SCHEMA_LIST = {}
# The very basic default retentions
DEFAULT_SCHEMA = {'match': re.compile('.*'),
'retentions': '1m:7d'}
DEBUG = False
DRY_RUN = False
ROOT_PATH = ""
def config_schemas(cfg):
schema_conf = ConfigObj(cfg)
for schema in schema_conf.items():
item = schema[1]['pattern']
if item == '.*':
DEFAULT_SCHEMA['retentions'] = schema[1]['retentions']
else:
if item[0] == '^':
item = item[1:]
SCHEMA_LIST[item] = {'retentions': schema[1]['retentions'],
'match': re.compile(item)}
def _convert_seconds(time):
seconds_dict = {'s': 1, 'm': 60, 'h': 3600, 'min': 60,
'd': 86400, 'w': 604800, 'y': 31536000}
(points, time) = time.split(':')
if str.isalpha(time[-1]):
time = int(time[:-1]) * seconds_dict[time[-1]]
return time
def _compare_retention(retention, tmp_path):
# Get the new retention as [(secondsPerPoint, numPoints), ...]
new_retention = [_convert_seconds(item) for item in list(retention)]
info_string = [INFO_BIN, tmp_path]
cur_ret_list = subprocess.Popen(info_string, stdout=subprocess.PIPE)
cur_ret_list = cur_ret_list.communicate()[0].split('\n')
cur_retention = [int(line.split(':')[1]) for line in cur_ret_list
if 'retention' in line]
return cur_retention == new_retention
def _find_metrics(path):
for f in scandir(path):
if f.is_dir(follow_symlinks=False):
for sf in _find_metrics(f.path):
yield sf
else:
if not f.is_file(follow_symlinks=False) or \
not f.name.endswith('.wsp'):
continue
yield f.path
def fix_metric(metric):
if not SCHEMA_LIST:
LOG.error("Didn't initialize schemas!")
return []
if DEBUG:
LOG.info("Testing %s for modification" % metric)
devnull = open(os.devnull, 'w')
command_string = list(BASE_COMMAND) + [metric]
retention = DEFAULT_SCHEMA['retentions']
matching = metric[len(ROOT_PATH):].replace('/', '.')
for schema, info in SCHEMA_LIST.iteritems():
if info['match'].search(matching):
retention = info['retentions']
break
command_string.extend(list(retention))
if DEBUG:
LOG.info("Created command: %s" % command_string)
if _compare_retention(retention, metric):
LOG.debug('%s has the same retention as before!' % metric)
return [(False, metric)]
if DRY_RUN:
res = 0
else:
LOG.debug('Retention will be %s' % retention)
# record file owner/group and perms to set properly after whisper-resize.py is complete
st = os.stat(metric)
if DEBUG:
res = subprocess.check_call(command_string)
else:
res = subprocess.check_call(command_string,
stdout=devnull)
os.chmod(metric, st.st_mode)
os.chown(metric, st.st_uid, st.st_gid)
devnull.close()
# wait for a second, so we don't kill I/O on the host
time.sleep(SLEEP)
"""
We have manual commands for every failed file from these
errors, so we can just go through each of these errors
after a completed run. There shouldn't be many
"""
if res != 0:
LOG.error('Failed to update schemas for %s' % metric)
LOG.error('Attempted retention: %s' % retention)
LOG.error('Attempted command string: %s' % command_string)
return [(False, metric)]
else:
return [(True, metric)]
def search_and_fix(subdir):
if not SCHEMA_LIST:
LOG.error("Didn't initialize schemas!")
return
fpath = os.path.join(ROOT_PATH, subdir)
pool = Pool(cpu_count())
LOG.info('Creating new storage schemas for metrics under %s ...' % fpath)
results = pool.map(fix_metric, _find_metrics(fpath), 100)
pool.close()
pool.join()
return results
# Parse command line options sent to the script
def cli_opts():
parser = argparse.ArgumentParser("Correct storage settings on multiple whisper files")
parser.add_argument('--cfg', action='store', dest='cfg',
help='The storage-schemas.conf file path',
required=True)
parser.add_argument('--path', action='store', dest='path',
help='The root path to find metrics in',
required=True)
parser.add_argument('--debug', action='store_true', dest='debug',
help='Display debug information',
default=False)
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help="Don't actually do anything",
default=False)
parser.add_argument('--subdir', action='store', dest='subdir',
help="If you only want to process a particular subdir",
default='')
parser.add_argument('--nobackup', action='store_true', dest='nobackup',
help="Passed through to whisper-resize.py, don't create a backup",
default=False)
parser.add_argument('--aggregate', action='store_true', dest='aggregate',
help="Passed through to whisper-resize.py, roll up values",
default=False)
parser.add_argument('--bindir', action='store', dest='bindir',
help="The root path to whisper-resize.py and whisper-info.py",
default='/opt/graphite/bin')
parser.add_argument('--sleep', action='store', type=float, dest='sleep',
help="Sleep this amount of time in seconds between metric comparisons",
default=0.3)
return parser.parse_args()
if __name__ == '__main__':
i_args = cli_opts()
if os.getenv('USER') != 'root':
print("You must run this script as root!")
sys.exit(1)
if i_args.debug:
LOG.setLevel(logging.DEBUG)
soh = logging.StreamHandler(sys.stdout)
LOG.addHandler(soh)
ROOT_PATH = i_args.path
DEBUG = i_args.debug
DRY_RUN = i_args.dry_run
BINDIR = i_args.bindir
SLEEP = i_args.sleep
RESIZE_BIN = BINDIR + "/whisper-resize.py"
INFO_BIN = BINDIR + "/whisper-info.py"
BASE_COMMAND = [RESIZE_BIN]
if i_args.nobackup:
BASE_COMMAND.append('--nobackup')
if i_args.aggregate:
BASE_COMMAND.append('--aggregate')
config_schemas(i_args.cfg)
search_and_fix(i_args.subdir)
|
|
import asyncio
import logging
from PyQt5.QtCore import QObject, Qt
from aiohttp import ClientError
from asyncio import TimeoutError
from sakia.gui.widgets.dialogs import dialog_async_exec, QAsyncFileDialog, QMessageBox
from duniterpy.api.errors import DuniterError
from duniterpy.documents import MalformedDocumentError
from sakia.data.connectors import BmaConnector
from sakia.data.processors import IdentitiesProcessor, NodesProcessor
from sakia.decorators import asyncify
from sakia.errors import NoPeerAvailable
from sakia.helpers import detect_non_printable
from .model import ConnectionConfigModel
from .view import ConnectionConfigView
class ConnectionConfigController(QObject):
"""
The AccountConfigController view
"""
CONNECT = 0
REGISTER = 1
WALLET = 2
PUBKEY = 3
def __init__(self, parent, view, model):
"""
Constructor of the AccountConfigController component
:param sakia.gui.dialogs.connection_cfg.view.ConnectionConfigView: the view
:param sakia.gui.dialogs.connection_cfg.model.ConnectionConfigView model: the model
"""
super().__init__(parent)
self.view = view
self.model = model
self.mode = -1
self.step_node = asyncio.Future()
self.step_licence = asyncio.Future()
self.step_key = asyncio.Future()
self.view.button_connect.clicked.connect(
lambda: self.step_node.set_result(ConnectionConfigController.CONNECT))
self.view.button_register.clicked.connect(
lambda: self.step_node.set_result(ConnectionConfigController.REGISTER))
self.view.button_wallet.clicked.connect(
lambda: self.step_node.set_result(ConnectionConfigController.WALLET))
self.view.button_pubkey.clicked.connect(
lambda: self.step_node.set_result(ConnectionConfigController.PUBKEY))
self.view.values_changed.connect(lambda: self.view.button_next.setEnabled(self.check_key()))
self.view.values_changed.connect(lambda: self.view.button_generate.setEnabled(self.check_key()))
self._logger = logging.getLogger('sakia')
@classmethod
def create(cls, parent, app):
"""
Instanciate a AccountConfigController component
:param sakia.gui.component.controller.ComponentController parent:
:param sakia.app.Application app:
:return: a new AccountConfigController controller
:rtype: AccountConfigController
"""
view = ConnectionConfigView(parent.view if parent else None)
model = ConnectionConfigModel(None, app, None,
IdentitiesProcessor.instanciate(app))
account_cfg = cls(parent, view, model)
model.setParent(account_cfg)
view.set_license(app.currency)
return account_cfg
@classmethod
def create_connection(cls, parent, app):
"""
Open a dialog to create a new account
:param parent:
:param app:
:return:
"""
connection_cfg = cls.create(parent, app)
connection_cfg.view.set_creation_layout(app.currency)
asyncio.ensure_future(connection_cfg.process())
return connection_cfg
def init_nodes_page(self):
self.view.set_steps_buttons_visible(True)
model = self.model.init_nodes_model()
self.view.tree_peers.customContextMenuRequested(self.show_context_menu)
self.view.set_nodes_model(model)
self.view.button_previous.setEnabled(False)
self.view.button_next.setText(self.config_dialog.tr("Ok"))
def init_name_page(self):
"""
Initialize an account name page
"""
if self.model.connection:
self.view.set_account_name(self.model.connection.uid)
self.view.button_previous.setEnabled(False)
self.view.button_next.setEnabled(False)
def check_name(self):
return len(self.view.edit_account_name.text()) > 2
async def process(self):
self._logger.debug("Begin process")
if self.model.connection:
self.mode = await self.step_node
else:
while not self.model.connection:
self.mode = await self.step_node
self._logger.debug("Create connection")
try:
self.view.button_connect.setEnabled(False)
self.view.button_register.setEnabled(False)
await self.model.create_connection()
except (ClientError, MalformedDocumentError, ValueError, TimeoutError) as e:
self._logger.debug(str(e))
self.view.display_info(self.tr("Could not connect. Check hostname, ip address or port : <br/>"
+ str(e)))
self.step_node = asyncio.Future()
self.view.button_connect.setEnabled(True)
self.view.button_register.setEnabled(True)
self._logger.debug("Licence step")
self.view.stacked_pages.setCurrentWidget(self.view.page_licence)
self.view.button_accept.clicked.connect(lambda: self.step_licence.set_result(True))
await self.step_licence
self.view.button_accept.disconnect()
self._logger.debug("Key step")
self.view.set_currency(self.model.connection.currency)
connection_identity = None
self.view.button_next.setEnabled(self.check_key())
if self.mode == ConnectionConfigController.REGISTER:
self._logger.debug("Registering mode")
self.view.groupbox_pubkey.hide()
self.view.button_next.clicked.connect(self.check_register)
self.view.stacked_pages.setCurrentWidget(self.view.page_connection)
connection_identity = await self.step_key
elif self.mode == ConnectionConfigController.CONNECT:
self._logger.debug("Connect mode")
self.view.button_next.setText(self.tr("Next"))
self.view.groupbox_pubkey.hide()
self.view.button_next.clicked.connect(self.check_connect)
self.view.stacked_pages.setCurrentWidget(self.view.page_connection)
connection_identity = await self.step_key
elif self.mode == ConnectionConfigController.WALLET:
self._logger.debug("Wallet mode")
self.view.button_next.setText(self.tr("Next"))
self.view.button_next.clicked.connect(self.check_wallet)
self.view.edit_uid.hide()
self.view.label_action.hide()
self.view.groupbox_pubkey.hide()
self.view.stacked_pages.setCurrentWidget(self.view.page_connection)
connection_identity = await self.step_key
elif self.mode == ConnectionConfigController.PUBKEY:
self._logger.debug("Pubkey mode")
self.view.button_next.setText(self.tr("Next"))
self.view.button_next.clicked.connect(self.check_pubkey)
if not self.view.label_action.text().endswith(self.tr(" (Optional)")):
self.view.label_action.setText(self.view.label_action.text() + self.tr(" (Optional)"))
self.view.groupbox_key.hide()
self.view.stacked_pages.setCurrentWidget(self.view.page_connection)
connection_identity = await self.step_key
self.view.stacked_pages.setCurrentWidget(self.view.page_services)
self.view.set_progress_steps(6)
try:
if self.mode == ConnectionConfigController.REGISTER:
self.view.display_info(self.tr("Broadcasting identity..."))
self.view.stream_log("Broadcasting identity...")
result = await self.model.publish_selfcert(connection_identity)
if result[0]:
await self.view.show_success(self.model.notification())
else:
self.view.show_error(self.model.notification(), result[1])
raise StopIteration()
self.view.set_step(1)
if self.mode in (ConnectionConfigController.REGISTER,
ConnectionConfigController.CONNECT,
ConnectionConfigController.PUBKEY) and connection_identity:
self.view.stream_log("Saving identity...")
self.model.connection.blockstamp = connection_identity.blockstamp
self.model.insert_or_update_connection()
self.model.insert_or_update_identity(connection_identity)
self.view.stream_log("Initializing identity informations...")
await self.model.initialize_identity(connection_identity,
log_stream=self.view.stream_log,
progress=self.view.progress)
self.view.stream_log("Initializing certifications informations...")
self.view.set_step(2)
await self.model.initialize_certifications(connection_identity,
log_stream=self.view.stream_log,
progress=self.view.progress)
self.view.set_step(3)
self.view.stream_log("Initializing transactions history...")
transactions = await self.model.initialize_transactions(self.model.connection,
log_stream=self.view.stream_log,
progress=self.view.progress)
self.view.set_step(4)
self.view.stream_log("Initializing dividends history...")
dividends = await self.model.initialize_dividends(self.model.connection, transactions,
log_stream=self.view.stream_log,
progress=self.view.progress)
self.view.set_step(5)
await self.model.initialize_sources(log_stream=self.view.stream_log,
progress=self.view.progress)
self.view.set_step(6)
self._logger.debug("Validate changes")
self.model.insert_or_update_connection()
self.model.app.db.commit()
if self.mode == ConnectionConfigController.REGISTER:
await self.view.show_register_message(self.model.blockchain_parameters())
except (NoPeerAvailable, DuniterError, StopIteration) as e:
if not isinstance(e, StopIteration):
self.view.show_error(self.model.notification(), str(e))
self._logger.debug(str(e))
self.view.stacked_pages.setCurrentWidget(self.view.page_connection)
self.step_node = asyncio.Future()
self.step_node.set_result(self.mode)
self.step_key = asyncio.Future()
self.view.button_next.disconnect()
self.view.edit_uid.show()
asyncio.ensure_future(self.process())
return
self.accept()
def check_key(self):
if self.mode == ConnectionConfigController.PUBKEY:
if len(self.view.edit_pubkey.text()) < 42:
self.view.label_info.setText(self.tr("Forbidden : pubkey is too short"))
return False
if len(self.view.edit_pubkey.text()) > 45:
self.view.label_info.setText(self.tr("Forbidden : pubkey is too long"))
return False
else:
if self.view.edit_password.text() != \
self.view.edit_password_repeat.text():
self.view.label_info.setText(self.tr("Error : passwords are different"))
return False
if self.view.edit_salt.text() != \
self.view.edit_salt_bis.text():
self.view.label_info.setText(self.tr("Error : secret keys are different"))
return False
if detect_non_printable(self.view.edit_salt.text()):
self.view.label_info.setText(self.tr("Forbidden : Invalid characters in salt field"))
return False
if detect_non_printable(self.view.edit_password.text()):
self.view.label_info.setText(
self.tr("Forbidden : Invalid characters in password field"))
return False
if self.model.app.parameters.expert_mode:
self.view.label_info.setText(
self.tr(""))
return True
if len(self.view.edit_salt.text()) < 6:
self.view.label_info.setText(self.tr("Forbidden : salt is too short"))
return False
if len(self.view.edit_password.text()) < 6:
self.view.label_info.setText(self.tr("Forbidden : password is too short"))
return False
self.view.label_info.setText("")
return True
async def action_save_revocation(self):
raw_document, identity = self.model.generate_revocation()
# Testable way of using a QFileDialog
selected_files = await QAsyncFileDialog.get_save_filename(self.view, self.tr("Save a revocation document"),
"", self.tr("All text files (*.txt)"))
if selected_files:
path = selected_files[0]
if not path.endswith('.txt'):
path = "{0}.txt".format(path)
with open(path, 'w') as save_file:
save_file.write(raw_document)
dialog = QMessageBox(QMessageBox.Information, self.tr("Revokation file"),
self.tr("""<div>Your revocation document has been saved.</div>
<div><b>Please keep it in a safe place.</b></div>
The publication of this document will remove your identity from the network.</p>"""), QMessageBox.Ok)
dialog.setTextFormat(Qt.RichText)
return True, identity
return False, identity
@asyncify
async def check_pubkey(self, checked=False):
self._logger.debug("Is valid ? ")
self.view.display_info(self.tr("connecting..."))
try:
self.model.set_pubkey(self.view.edit_pubkey.text(), self.view.scrypt_params)
self.model.set_uid(self.view.edit_uid.text())
if not self.model.key_exists():
try:
registered, found_identity = await self.model.check_registered()
self.view.button_connect.setEnabled(True)
self.view.button_register.setEnabled(True)
if self.view.edit_uid.text():
if registered[0] is False and registered[2] is None:
self.view.display_info(self.tr("Could not find your identity on the network."))
elif registered[0] is False and registered[2]:
self.view.display_info(self.tr("""Your pubkey or UID is different on the network.
Yours : {0}, the network : {1}""".format(registered[1], registered[2])))
else:
self.step_key.set_result(found_identity)
else:
self.step_key.set_result(None)
except DuniterError as e:
self.view.display_info(e.message)
except NoPeerAvailable as e:
self.view.display_info(str(e))
else:
self.view.display_info(self.tr("A connection already exists using this key."))
except NoPeerAvailable:
self.config_dialog.label_error.setText(self.tr("Could not connect. Check node peering entry"))
@asyncify
async def check_wallet(self, checked=False):
self._logger.debug("Is valid ? ")
self.view.display_info(self.tr("connecting..."))
try:
salt = self.view.edit_salt.text()
password = self.view.edit_password.text()
self.model.set_scrypt_infos(salt, password, self.view.scrypt_params)
self.model.set_uid("")
if not self.model.key_exists():
try:
registered, found_identity = await self.model.check_registered()
self.view.button_connect.setEnabled(True)
self.view.button_register.setEnabled(True)
if registered[0] is False and registered[2] is None:
self.step_key.set_result(None)
elif registered[2]:
self.view.display_info(self.tr("""Your pubkey is associated to an identity.
Yours : {0}, the network : {1}""".format(registered[1], registered[2])))
except DuniterError as e:
self.view.display_info(e.message)
except NoPeerAvailable as e:
self.view.display_info(str(e))
else:
self.view.display_info(self.tr("A connection already exists using this key."))
except NoPeerAvailable:
self.config_dialog.label_error.setText(self.tr("Could not connect. Check node peering entry"))
@asyncify
async def check_connect(self, checked=False):
self._logger.debug("Is valid ? ")
self.view.display_info(self.tr("connecting..."))
try:
salt = self.view.edit_salt.text()
password = self.view.edit_password.text()
self.model.set_scrypt_infos(salt, password, self.view.scrypt_params)
self.model.set_uid(self.view.edit_uid.text())
if not self.model.key_exists():
try:
registered, found_identity = await self.model.check_registered()
self.view.button_connect.setEnabled(True)
self.view.button_register.setEnabled(True)
if registered[0] is False and registered[2] is None:
self.view.display_info(self.tr("Could not find your identity on the network."))
elif registered[0] is False and registered[2]:
self.view.display_info(self.tr("""Your pubkey or UID is different on the network.
Yours : {0}, the network : {1}""".format(registered[1], registered[2])))
else:
self.step_key.set_result(found_identity)
except DuniterError as e:
self.view.display_info(e.message)
except NoPeerAvailable as e:
self.view.display_info(str(e))
else:
self.view.display_info(self.tr("A connection already exists using this key."))
except NoPeerAvailable:
self.config_dialog.label_error.setText(self.tr("Could not connect. Check node peering entry"))
@asyncify
async def check_register(self, checked=False):
self._logger.debug("Is valid ? ")
self.view.display_info(self.tr("connecting..."))
try:
salt = self.view.edit_salt.text()
password = self.view.edit_password.text()
self.model.set_scrypt_infos(salt, password, self.view.scrypt_params)
self.model.set_uid(self.view.edit_uid.text())
if not self.model.key_exists():
try:
registered, found_identity = await self.model.check_registered()
if registered[0] is False and registered[2] is None:
result, identity = await self.action_save_revocation()
if result:
self.step_key.set_result(identity)
else:
self.view.display_info("Saving your revocation document on your disk is mandatory.")
elif registered[0] is False and registered[2]:
self.view.display_info(self.tr("""Your pubkey or UID was already found on the network.
Yours : {0}, the network : {1}""".format(registered[1], registered[2])))
else:
self.view.display_info("Your account already exists on the network")
except DuniterError as e:
self.view.display_info(e.message)
except NoPeerAvailable as e:
self.view.display_info(str(e))
else:
self.view.display_info(self.tr("A connection already exists using this key."))
except NoPeerAvailable:
self.view.display_info(self.tr("Could not connect. Check node peering entry"))
@asyncify
async def accept(self):
self.view.accept()
def async_exec(self):
future = asyncio.Future()
self.view.finished.connect(lambda r: future.set_result(r))
self.view.open()
return future
def exec(self):
return self.view.exec()
|
|
# Copyright (c) 2015, Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology,
# Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import pytesmo.metrics as met
import numpy as np
import numpy.testing as nptest
def test_pearson_conf():
"""
Test the person confidence interval based on
the fisher z-transform
"""
# first example
n = 34
r = 0.654
rl, ru = met.pearson_conf(r, n, c=95)
nptest.assert_almost_equal(rl, 0.406, decimal=3)
nptest.assert_almost_equal(ru, 0.812, decimal=3)
rl, ru = met.pearson_conf(r, n, c=99)
nptest.assert_almost_equal(rl, 0.309, decimal=3)
nptest.assert_almost_equal(ru, 0.8468, decimal=3)
# second example
r = 0.824
n = 300
rl, ru = met.pearson_conf(r, n, c=95)
nptest.assert_almost_equal(rl, 0.784, decimal=3)
nptest.assert_almost_equal(ru, 0.857, decimal=3)
rl, ru = met.pearson_conf(r, n, c=99)
nptest.assert_almost_equal(rl, 0.7697, decimal=3)
nptest.assert_almost_equal(ru, 0.866, decimal=3)
# test numpy arrays as input
r = np.array([0.654, 0.824])
n = np.array([34, 300])
rl, ru = met.pearson_conf(r, n, c=95)
nptest.assert_almost_equal(rl, np.array([0.406, 0.784]), decimal=3)
nptest.assert_almost_equal(ru, np.array([0.812, 0.857]), decimal=3)
def test_bias():
"""
Test for bias
"""
# example 1
x = np.arange(10)
y = np.arange(10) + 2
b_pred = -2
b_obs = met.bias(x, y)
nptest.assert_equal(b_obs, b_pred)
# example 2
x = np.arange(10)
y = np.arange(20, 30)
b_pred = 20.
b_obs = met.bias(y, x)
nptest.assert_equal(b_obs, b_pred)
def test_aad():
"""
Test for average absolute deviation
"""
# example 1
x = np.arange(10)
y = np.arange(10) + 2
dev_pred = 2.
dev_obs = met.aad(x, y)
nptest.assert_equal(dev_obs, dev_pred)
# example 2, with outlier
x = np.arange(10)
y = np.arange(10) + 2
y[-1] = 201.
dev_pred = 21.
dev_obs = met.aad(x, y)
nptest.assert_equal(dev_obs, dev_pred)
def test_mad():
"""
Test for median absolute deviation
"""
# example 1
x = np.arange(10)
y = np.arange(10) + 2
dev_pred = 2.
dev_obs = met.mad(x, y)
nptest.assert_equal(dev_obs, dev_pred)
# example 2, with outlier
x = np.arange(10)
y = np.arange(10) + 2
y[-1] = 201.
dev_pred = 2.
dev_obs = met.mad(x, y)
nptest.assert_equal(dev_obs, dev_pred)
def test_rmsd():
"""
Test for rmsd
"""
# example 1
x = np.arange(10)
y = np.arange(10) + 2
rmsd_pred = 2.
rmsd_obs = met.rmsd(x, y)
nptest.assert_equal(rmsd_obs, rmsd_pred)
# example 2, with outlier
x = np.arange(10)
y = np.arange(10) + 2
y[-1] = 100.
rmsd_pred = np.sqrt(831.7)
rmsd_obs = met.rmsd(x, y)
nptest.assert_almost_equal(rmsd_obs, rmsd_pred, 6)
def test_mse():
"""
Test for mse
"""
# example 1
x = np.arange(10)
y = np.arange(10) + 2
mse_pred = 4.
mse_bias_pred = 2. ** 2
mse_obs, _, mse_bias, _ = met.mse(x, y)
nptest.assert_equal(mse_obs, mse_pred)
nptest.assert_equal(mse_bias, mse_bias_pred)
# example 2, with outlier
x = np.arange(10)
y = np.arange(10) + 2
y[-1] = 51.
mse_pred = 180.
mse_bias_pred = 36.
mse_obs, _, mse_bias, _ = met.mse(x, y)
nptest.assert_almost_equal(mse_obs, mse_pred, 6)
nptest.assert_almost_equal(mse_bias, mse_bias_pred, 6)
def test_rmsd_mse():
"""
Test for rmsd and mse
"""
# example 1
x = np.random.randn(1000)
y = np.random.randn(1000)
rmsd_pred = met.rmsd(x, y)
mse_pred, _, _, _ = met.mse(x, y)
nptest.assert_almost_equal(rmsd_pred ** 2, mse_pred, 6)
def test_tcol_error():
"""
Test the triple collocation error estimation based on
a random signal and error.
Also compare the results to the other method
"""
n = 1000000
signal = np.sin(np.linspace(0, 2 * np.pi, n))
sig_err_x = 0.02
sig_err_y = 0.07
sig_err_z = 0.04
err_pred = np.array((sig_err_x, sig_err_y, sig_err_z))
err_x = np.random.normal(0, sig_err_x, n)
err_y = np.random.normal(0, sig_err_y, n)
err_z = np.random.normal(0, sig_err_z, n)
alpha_y = 0.2
alpha_z = 0.5
beta_y = 0.9
beta_z = 1.6
x = signal + err_x
y = alpha_y + beta_y * (signal + err_y)
z = alpha_z + beta_z * (signal + err_z)
snr, err, beta = met.tcol_snr(x, y, z, ref_ind=0)
# classical triple collocation errors use scaled (removed alpha and beta)
# input arrays
ex, ey, ez = met.tcol_error(signal + err_x, signal + err_y, signal + err_z)
nptest.assert_almost_equal(err, np.array([ex, ey, ez]), decimal=2)
nptest.assert_almost_equal(err_pred, np.array([ex, ey, ez]), decimal=2)
def test_tcol_snr():
"""
Test the triple collocation based estimation of
signal to noise ratio, absolute errors and rescaling coefficients
"""
n = 1000000
mean_signal = 0.3
sig_signal = 0.2
signal = np.random.normal(mean_signal, sig_signal, n)
sig_err_x = 0.02
sig_err_y = 0.07
sig_err_z = 0.04
err_x = np.random.normal(0, sig_err_x, n)
err_y = np.random.normal(0, sig_err_y, n)
err_z = np.random.normal(0, sig_err_z, n)
alpha_y = 0.2
alpha_z = 0.5
beta_y = 0.9
beta_z = 1.6
x = signal + err_x
y = alpha_y + beta_y * (signal + err_y)
z = alpha_z + beta_z * (signal + err_z)
beta_pred = 1. / np.array((1, beta_y, beta_z))
err_pred = np.array((sig_err_x, sig_err_y, sig_err_z))
snr_pred = np.array(
((sig_signal / sig_err_x), (sig_signal / sig_err_y), (sig_signal / sig_err_z)))
snr, err, beta = met.tcol_snr(x, y, z, ref_ind=0)
nptest.assert_almost_equal(beta, beta_pred, decimal=2)
nptest.assert_almost_equal(err, err_pred, decimal=2)
nptest.assert_almost_equal(np.sqrt(10 ** (snr / 10.)), snr_pred, decimal=1)
|
|
#!/usr/bin/kivy
'''
Showcase of Kivy Features
=========================
This showcases many features of Kivy. You should see a
menu bar across the top with a demonstration area below. The
first demonstration is the accordion layout. You can see, but not
edit, the kv language code for any screen by pressing the bug or
'show source' icon. Scroll through the demonstrations using the
left and right icons in the top right or selecting from the menu
bar.
The file showcase.kv describes the main container, while each demonstration
pane is described in a separate .kv file in the data/screens directory.
The image data/background.png provides the gradient background while the
icons in data/icon directory are used in the control bar. The file
data/faust_github.jpg is used in the Scatter pane. The icons are
from `http://www.gentleface.com/free_icon_set.html` and licensed as
Creative Commons - Attribution and Non-commercial Use Only; they
sell a commercial license.
The file android.txt is used to package the application for use with the
Kivy Launcher Android application. For Android devices, you can
copy/paste this directory into /sdcard/kivy/showcase on your Android device.
'''
from time import time
from kivy.app import App
from os.path import dirname, join
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty, BooleanProperty,\
ListProperty
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.uix.screenmanager import Screen
class ShowcaseScreen(Screen):
fullscreen = BooleanProperty(False)
def add_widget(self, *args):
if 'content' in self.ids:
return self.ids.content.add_widget(*args)
return super(ShowcaseScreen, self).add_widget(*args)
class ShowcaseApp(App):
index = NumericProperty(-1)
current_title = StringProperty()
time = NumericProperty(0)
show_sourcecode = BooleanProperty(False)
sourcecode = StringProperty()
screen_names = ListProperty([])
hierarchy = ListProperty([])
def build(self):
self.title = 'hello world'
Clock.schedule_interval(self._update_clock, 1 / 60.)
self.screens = {}
self.available_screens = sorted([
'Buttons', 'ToggleButton', 'Sliders', 'ProgressBar', 'Switches',
'CheckBoxes', 'TextInputs', 'Accordions', 'FileChoosers',
'Carousel', 'Bubbles', 'CodeInput', 'DropDown', 'Spinner',
'Scatter', 'Splitter', 'TabbedPanel + Layouts', 'RstDocument',
'Popups', 'ScreenManager'])
self.screen_names = self.available_screens
curdir = dirname(__file__)
self.available_screens = [join(curdir, 'data', 'screens',
'{}.kv'.format(fn)) for fn in self.available_screens]
self.go_next_screen()
def on_pause(self):
return True
def on_resume(self):
pass
def on_current_title(self, instance, value):
self.root.ids.spnr.text = value
def go_previous_screen(self):
self.index = (self.index - 1) % len(self.available_screens)
screen = self.load_screen(self.index)
sm = self.root.ids.sm
sm.switch_to(screen, direction='right')
self.current_title = screen.name
self.update_sourcecode()
def go_next_screen(self):
self.index = (self.index + 1) % len(self.available_screens)
screen = self.load_screen(self.index)
sm = self.root.ids.sm
sm.switch_to(screen, direction='left')
self.current_title = screen.name
self.update_sourcecode()
def go_screen(self, idx):
self.index = idx
self.root.ids.sm.switch_to(self.load_screen(idx), direction='left')
self.update_sourcecode()
def go_hierarchy_previous(self):
ahr = self.hierarchy
if len(ahr) == 1:
return
if ahr:
ahr.pop()
if ahr:
idx = ahr.pop()
self.go_screen(idx)
def load_screen(self, index):
if index in self.screens:
return self.screens[index]
screen = Builder.load_file(self.available_screens[index].lower())
self.screens[index] = screen
return screen
def read_sourcecode(self):
fn = self.available_screens[self.index].lower()
with open(fn) as fd:
return fd.read()
def toggle_source_code(self):
self.show_sourcecode = not self.show_sourcecode
if self.show_sourcecode:
height = self.root.height * .3
else:
height = 0
Animation(height=height, d=.3, t='out_quart').start(
self.root.ids.sv)
self.update_sourcecode()
def update_sourcecode(self):
if not self.show_sourcecode:
self.root.ids.sourcecode.focus = False
return
self.root.ids.sourcecode.text = self.read_sourcecode()
self.root.ids.sv.scroll_y = 1
def showcase_floatlayout(self, layout):
def add_button(*t):
if not layout.get_parent_window():
return
if len(layout.children) > 5:
layout.clear_widgets()
layout.add_widget(Builder.load_string('''
#:import random random.random
Button:
size_hint: random(), random()
pos_hint: {'x': random(), 'y': random()}
text:
'size_hint x: {} y: {}\\n pos_hint x: {} y: {}'.format(\
self.size_hint_x, self.size_hint_y, self.pos_hint['x'],\
self.pos_hint['y'])
'''))
Clock.schedule_once(add_button, 1)
Clock.schedule_once(add_button)
def showcase_boxlayout(self, layout):
def add_button(*t):
if not layout.get_parent_window():
return
if len(layout.children) > 5:
layout.orientation = 'vertical'\
if layout.orientation == 'horizontal' else 'horizontal'
layout.clear_widgets()
layout.add_widget(Builder.load_string('''
Button:
text: self.parent.orientation if self.parent else ''
'''))
Clock.schedule_once(add_button, 1)
Clock.schedule_once(add_button)
def showcase_gridlayout(self, layout):
def add_button(*t):
if not layout.get_parent_window():
return
if len(layout.children) > 15:
layout.rows = 3 if layout.rows is None else None
layout.cols = None if layout.rows == 3 else 3
layout.clear_widgets()
layout.add_widget(Builder.load_string('''
Button:
text:
'rows: {}\\ncols: {}'.format(self.parent.rows, self.parent.cols)\
if self.parent else ''
'''))
Clock.schedule_once(add_button, 1)
Clock.schedule_once(add_button)
def showcase_stacklayout(self, layout):
orientations = ('lr-tb', 'tb-lr',
'rl-tb', 'tb-rl',
'lr-bt', 'bt-lr',
'rl-bt', 'bt-rl')
def add_button(*t):
if not layout.get_parent_window():
return
if len(layout.children) > 11:
layout.clear_widgets()
cur_orientation = orientations.index(layout.orientation)
layout.orientation = orientations[cur_orientation - 1]
layout.add_widget(Builder.load_string('''
Button:
text: self.parent.orientation if self.parent else ''
size_hint: .2, .2
'''))
Clock.schedule_once(add_button, 1)
Clock.schedule_once(add_button)
def showcase_anchorlayout(self, layout):
def change_anchor(self, *l):
if not layout.get_parent_window():
return
anchor_x = ('left', 'center', 'right')
anchor_y = ('top', 'center', 'bottom')
if layout.anchor_x == 'left':
layout.anchor_y = anchor_y[anchor_y.index(layout.anchor_y) - 1]
layout.anchor_x = anchor_x[anchor_x.index(layout.anchor_x) - 1]
Clock.schedule_once(change_anchor, 1)
Clock.schedule_once(change_anchor, 1)
def _update_clock(self, dt):
self.time = time()
if __name__ == '__main__':
ShowcaseApp().run()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import re
import sys
import threading
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError(
"Expected op for node %s is different. %s vs %s" % (
node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError(
"Not all expected ops are present. Expected %s, found %s" % (
expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type " +
type(expected_message_maybe_ascii) + " and " +
type(message))
def assertProtoEqualsVersion(
self, expected, actual, producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: a float value.
f2: a float value.
err: a float value.
"""
self.assertTrue(math.fabs(f1 - f2) < err)
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
for f1, f2 in zip(farray1, farray2):
self.assertNear(f1, f2, err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `Device` object.
device2: A string device name or TensorFlow `Device` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
|
|
import datetime
import arrow
from django.db import models
from django.utils.translation import ugettext_lazy as _
from common.models import Address, User, Org
from common.utils import CURRENCY_CODES
from accounts.models import Account
from phonenumber_field.modelfields import PhoneNumberField
from teams.models import Teams
class Invoice(models.Model):
"""Model definition for Invoice."""
INVOICE_STATUS = (
("Draft", "Draft"),
("Sent", "Sent"),
("Paid", "Paid"),
("Pending", "Pending"),
("Cancelled", "Cancel"),
)
invoice_title = models.CharField(_("Invoice Title"), max_length=50)
invoice_number = models.CharField(_("Invoice Number"), max_length=50)
from_address = models.ForeignKey(
Address,
related_name="invoice_from_address",
on_delete=models.SET_NULL,
null=True,
)
to_address = models.ForeignKey(
Address, related_name="invoice_to_address", on_delete=models.SET_NULL, null=True
)
name = models.CharField(_("Name"), max_length=100)
email = models.EmailField(_("Email"))
assigned_to = models.ManyToManyField(User, related_name="invoice_assigned_to")
# quantity is the number of hours worked
quantity = models.PositiveIntegerField(default=0)
# rate is the rate charged
rate = models.DecimalField(default=0, max_digits=12, decimal_places=2)
# total amount is product of rate and quantity
total_amount = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
tax = models.DecimalField(blank=True, null=True, max_digits=12, decimal_places=2)
currency = models.CharField(
max_length=3, choices=CURRENCY_CODES, blank=True, null=True
)
phone = PhoneNumberField(null=True, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(
User, related_name="invoice_created_by", on_delete=models.SET_NULL, null=True
)
amount_due = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
amount_paid = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
is_email_sent = models.BooleanField(default=False)
status = models.CharField(choices=INVOICE_STATUS, max_length=15, default="Draft")
details = models.TextField(_("Details"), null=True, blank=True)
due_date = models.DateField(blank=True, null=True)
accounts = models.ManyToManyField(Account, related_name="accounts_invoices")
teams = models.ManyToManyField(Teams, related_name="invoices_teams")
org = models.ForeignKey(
Org, on_delete=models.SET_NULL, null=True, blank=True
)
tax = models.DecimalField(blank=True, null=True, max_digits=12, decimal_places=2)
class Meta:
"""Meta definition for Invoice."""
verbose_name = "Invoice"
verbose_name_plural = "Invoices"
def __str__(self):
"""Unicode representation of Invoice."""
return self.invoice_number
def save(self, *args, **kwargs):
if not self.invoice_number:
self.invoice_number = self.invoice_id_generator()
while Invoice.objects.filter(invoice_number=self.invoice_number).exists():
self.invoice_number = self.invoice_id_generator(
prev_invoice_number=self.invoice_number
)
super(Invoice, self).save(*args, **kwargs)
def invoice_id_generator(self, prev_invoice_number=None):
if prev_invoice_number:
prev_invoice_number += 1
return prev_invoice_number
date = datetime.datetime.now().strftime("%d%m%Y")
return int(date + "0001")
def formatted_total_amount(self):
return self.currency + " " + str(self.total_amount)
def formatted_rate(self):
return str(self.rate) + " " + self.currency
def formatted_total_quantity(self):
return str(self.quantity) + " " + "Hours"
def is_draft(self):
if self.status == "Draft":
return True
else:
return False
def is_sent(self):
if self.status == "Sent" and self.is_email_sent == False:
return True
else:
return False
def is_resent(self):
if self.status == "Sent" and self.is_email_sent == True:
return True
else:
return False
def is_paid_or_cancelled(self):
if self.status in ["Paid", "Cancelled"]:
return True
else:
return False
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
@property
def get_team_users(self):
team_user_ids = list(self.teams.values_list("users__id", flat=True))
return User.objects.filter(id__in=team_user_ids)
@property
def get_team_and_assigned_users(self):
team_user_ids = list(self.teams.values_list("users__id", flat=True))
assigned_user_ids = list(self.assigned_to.values_list("id", flat=True))
user_ids = team_user_ids + assigned_user_ids
return User.objects.filter(id__in=user_ids)
@property
def get_assigned_users_not_in_teams(self):
team_user_ids = list(self.teams.values_list("users__id", flat=True))
assigned_user_ids = list(self.assigned_to.values_list("id", flat=True))
user_ids = set(assigned_user_ids) - set(team_user_ids)
return User.objects.filter(id__in=list(user_ids))
class InvoiceHistory(models.Model):
"""Model definition for InvoiceHistory.
This model is used to track/keep a record of the updates made to original invoice object."""
INVOICE_STATUS = (
("Draft", "Draft"),
("Sent", "Sent"),
("Paid", "Paid"),
("Pending", "Pending"),
("Cancelled", "Cancel"),
)
invoice = models.ForeignKey(
Invoice, on_delete=models.CASCADE, related_name="invoice_history"
)
invoice_title = models.CharField(_("Invoice Title"), max_length=50)
invoice_number = models.CharField(_("Invoice Number"), max_length=50)
from_address = models.ForeignKey(
Address,
related_name="invoice_history_from_address",
on_delete=models.SET_NULL,
null=True,
)
to_address = models.ForeignKey(
Address,
related_name="invoice_history_to_address",
on_delete=models.SET_NULL,
null=True,
)
name = models.CharField(_("Name"), max_length=100)
email = models.EmailField(_("Email"))
assigned_to = models.ManyToManyField(
User, related_name="invoice_history_assigned_to"
)
# quantity is the number of hours worked
quantity = models.PositiveIntegerField(default=0)
# rate is the rate charged
rate = models.DecimalField(default=0, max_digits=12, decimal_places=2)
# total amount is product of rate and quantity
total_amount = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
currency = models.CharField(
max_length=3, choices=CURRENCY_CODES, blank=True, null=True
)
phone = PhoneNumberField(null=True, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
# created_by = models.ForeignKey(
# User, related_name='invoice_history_created_by',
# on_delete=models.SET_NULL, null=True)
updated_by = models.ForeignKey(
User,
related_name="invoice_history_created_by",
on_delete=models.SET_NULL,
null=True,
)
amount_due = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
amount_paid = models.DecimalField(
blank=True, null=True, max_digits=12, decimal_places=2
)
is_email_sent = models.BooleanField(default=False)
status = models.CharField(choices=INVOICE_STATUS, max_length=15, default="Draft")
# details or description here stores the fields changed in the original invoice object
details = models.TextField(_("Details"), null=True, blank=True)
due_date = models.DateField(blank=True, null=True)
def __str__(self):
"""Unicode representation of Invoice."""
return self.invoice_number
class Meta:
ordering = ("created_on",)
def formatted_total_amount(self):
return self.currency + " " + str(self.total_amount)
def formatted_rate(self):
return str(self.rate) + " " + self.currency
def formatted_total_quantity(self):
return str(self.quantity) + " " + "Hours"
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
|
|
# examples of python
print [i for i in range(10) if i % 2 == 0] # [0, 2, 4, 6, 8]
print "---- Tuples"
tup1 = ('physics', 'chemistry', 1997, 2000);
print "tup1[1:5]: ", tup1[1:5]
tuple1, tuple2 = (123, 'xyz'), (456, 'abc')
print "compare tuples: %s " % cmp(tuple1, tuple2); # -1 different
tuple1, tuple2 = (123, 'xyz', 'zara', 'abc'), (456, 700, 200)
print "Max value element : ", max(tuple1); # zara
print "Max value element : ", min(tuple2); # 200
a = (1, 2, 3)
b = a + (4, 5, 6)
c = b[1:]
print "print c tuple:", c # print c tuple: (2, 3, 4, 5, 6)
aList = [123, 'xyz', 'zara', 'abc'];
print type(aList) # <type 'list'>
aTuple = tuple(aList) # convert into list
print type(aTuple) # <type 'tuple'>
for i, v in enumerate(['tic', 'tac', 'toe']):
print i, v
print "singleton"
singleton = 'hello',
print type(singleton) # <type 'tuple'>
print "Dictionaries"
tel = {'jack': 4098, 'sape': 4139}
tel['guido'] = 4127
print "dictionary:", tel # {'sape': 4139, 'jack': 4098, 'guido': 4127}
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.iteritems():
print k, v
print "Array"
myArray = ['d', [1.0, 2.0, 3.14], 3 ,u'hello \u2641']
myArray.insert(2, 'x')
print "myArray:", myArray
print "myArray:", type(myArray) # myArray: ['d', [1.0, 2.0, 3.14], 'x', 3, u'hello \u2641']
from array import array
myArray2 = array('d', [1.0, 2.0, 3.14])
print "myArray2:", myArray2 # myArray2: array('d', [1.0, 2.0, 3.14])
myArray2.reverse()
print "myArray2 reverse:", myArray2 # myArray2: array('d', [3.14, 2.0, 1.0])
myArray2.tostring()
print "myArray2 to string:", myArray2
myArray2.tolist()
print "myArray2 to list:", myArray2
print "----List"
a = [66.25, 333, 333, 1, 1234.5]
print a.count(333), a.count(66.25), a.count('x') # 2 1 0
a.insert(2, -1)
a.append(333) # [66.25, 333, -1, 333, 1, 1234.5, 333]
print "Index: %s " % a.index(333) # 1
a.remove(333)
print "Remove 333: %s " % a #[66.25, -1, 333, 1, 1234.5, 333]
a.reverse()
print a # [333, 1234.5, 1, 333, -1, 66.25]
a.sort()
print a #[-1, 1, 66.25, 333, 333, 1234.5]
a.pop() # Remove the item at the given position in the list, and return it. If no index is specified last one
i=iter('abc')
print i.next() # a
print i.next() # b
class MyIterator(object):
def __init__(self, step):
self.step = step
def next(self):
"""Returns the next element."""
if self.step == 0:
raise StopIteration
self.step -= 1
return self.step
def __iter__(self):
"""Returns the iterator itself."""
return self
for el in MyIterator(4):
print el # 3 2 1 0
print 20*"-" + " Fibonacci" + "-"*20
def fibonacci():
a, b = 0, 1
while True:
yield b
a, b = b, a + b
fib = fibonacci()
#for i in range(1,20):
# print fib.next()
print [fib.next() for i in range(10)]
print 20*"-" + " Yield " + "-"*20
#Yield is a keyword that is used like return, except the function will return a generator
def my_generator():
try:
yield 'something'
except ValueError:
yield 'dealing with the exception'
finally: # will catch any close claa or throw call, recommended way to do some cleanup
print "ok let's clean"
myGen= my_generator()
#myGen.next() # run in the end
#myGen.throw()
#myGen.close()
import itertools
print 20*"-" + " Itertools " + "-"*20
horses = [1, 2, 3, 4]
races = itertools.permutations(horses)
print(races) # print object
print(list(itertools.permutations(horses))) # print list
"""
import multitask
import time
def coroutine_1():
for i in range(3):
print 'c1'
yield i
def coroutine_2():
for i in range(3):
print 'c2'
yield i
multitask.add(coroutine_1())
multitask.add(coroutine_2())
multitask.run()
"""
myIter = (x*x for x in range(10) if x %2 ==0)
for i in myIter:
print i # 0,4, 16, 36, 64
print "Decorators to make function and method wrapping (a function that receives a function and returns an enhanced one)"
class WhatFor(object):
@classmethod
def it(cls):
print 'work with %s' % cls
#it = classmethod(it)
@staticmethod
def uncommon():
print 'I could be a global function'
myWhatFor = WhatFor()
myWhatFor.it()
myWhatFor.uncommon()
print "Meta Descriptor - use one or more methods in the hosting class to perform the task"
class Chainer(object):
def __init__(self, methods, callback=None):
self._methods = methods
self._callback = callback
def __get__(self, instance, klass):
if instance is None:
# only for instances
return self
results = []
for method in self._methods:
results.append(method(instance))
if self._callback is not None:
if not self._callback(instance,method,results):
break
return results
class TextProcessor(object):
def __init__(self, text):
self.text = text
def normalize(self):
if isinstance(self.text, list):
self.text = [t.lower() for t in self.text]
else:
self.text = self.text.lower()
def split(self):
if not isinstance(self.text, list):
self.text = self.text.split()
def treshold(self):
if not isinstance(self.text, list):
if len(self.text) < 2:
self.text = ''
self.text = [w for w in self.text if len(w) > 2]
def logger(instance, method, results):
print 'calling %s' % method.__name__
return True
def add_sequence(name, sequence):
setattr(TextProcessor, name,
Chainer([getattr(TextProcessor, n) for n in sequence], logger))
add_sequence('simple_clean', ('split', 'treshold'))
my = TextProcessor(' My Taylor is Rich ')
my.simple_clean
print my.text # ['Taylor', 'Rich']
print "-"*30 + "Properties"
class MyClass(object):
def __init__(self):
self._my_secret_thing = 1
def _i_get(self):
return self._my_secret_thing
def _i_set(self, value):
self._my_secret_thing = value
def _i_delete(self):
print 'neh!'
my_thing = property(_i_get, _i_set, _i_delete, 'the thing')
myclass = MyClass()
print myclass.my_thing # 1
myclass.my_thing=3
print myclass.my_thing # 3
del myclass.my_thing # neh!
#help(myclass) # Methods defined here: __init__, __dict__, __weakref__, my_thing
print "__new__ is meta-constructor. It is called every time an object has to be instantiated by the class factory"
class MyClass2(object):
def __new__(cls):
print '__new__ called'
return object.__new__(cls) # default factory
def __init__(self):
print '__init__ called'
self.a = 1
insMyClass2 = MyClass2() # __new__ called
# __init__ called
#help(insMyClass2)
class MyClass3(object):
def __new__(cls):
return [1,2,3]
myMyClass3 = MyClass3()
print myMyClass3 # [1,2,3]
print myMyClass3.__new__ # object
#help(myMyClass3.__new__ ) #
myMyClass3.__add__
print myMyClass3 # [1,2,3]
#help(myMyClass3) #
#Meta-programming is very powerful, but remember that is obfuscales the readability of the class design
print "*"*30 + "Super"
class Card:
def __init__( self, rank, suit, hard, soft ):
self.rank= rank
self.suit= suit
self.hard= hard
self.soft= soft
class NumberCard( Card ):
def __init__( self, rank, suit ):
super().__init__( str(rank), suit, rank, rank )
def foo(*positional, **kwargs):
print "Positional:", positional
print "Keywords:", kwargs
for key, value in kwargs.iteritems():
print key, value
print foo('one', 'two', 'three') # Positional: ('one', 'two', 'three')
# Keywords: a{}
foo(a='one', b='two', c='three') # Positional: ()
# Keywords: {'a': 'one', 'c': 'three', 'b': 'two'}
foo('one','two',c='three',d='four') # Positional: ('one', 'two')
# Keywords: {'c': 'three', 'd': 'four'}
def func(a='a', b='b', c='c', **kwargs):
print 'a:%s, b:%s, c:%s' % (a, b, c)
func(**{'a' : 'z', 'b':'q'}) # a:z, b:q, c:c
func(a = 'z', b = 'q') # a:z, b:q, c:c
class MyCard:
def __init__( self, dealer_card, *cards ):
self.dealer_card= dealer_card
self.cards= list(cards)
def __str__( self ):
return "-".join( map(str, self.cards) )
def __repr__(self):
return 'reprruda'
myCard = MyCard(123,'456', 2345, 'abc')
print myCard # 456-2345-abc
print myCard.__repr__ #<bound method MyCard.__repr__ of reprruda>
print hash(myCard) # 8740707558329
print "------------------------ super --------------------"
class Card2(object):
insure= False
def __init__( self, rank, suit, hard, soft ):
self.rank= rank
self.suit= suit
self.hard= hard
self.soft= soft
def __repr__( self ):
return "{__class__.__name__}(suit={suit!r}, rank={rank!r})". format(__class__=self.__class__, **self.__dict__)
def __str__( self ):
return "{rank}{suit}".format(**self.__dict__)
def __eq__( self, other ):
return self.suit == other.suit and self.rank == other.rank
def __hash__( self ):
return hash(self.suit) ^ hash(self.rank)
class AceCard2( Card2 ):
insure= True
def __init__( self, rank, suit ):
super(AceCard2, self).__init__( "A", suit, 1, 11 )
c1 = AceCard2( 1, 'qw' )
c2 = AceCard2( 1, 'qw' )
print "Super class card1: %s \t card2: %s" % (id(c1), id(c2) )
print "Print if inside: ", ( c1 is c2 )
print "------------------------ __lt__ --------------------"
class BlackJackCard_p(object):
def __init__( self, rank, suit ):
self.rank= rank
self.suit= suit
def __lt__( self, other ):
print( "Compare {0} < {1}".format( self, other ) )
return self.rank < other.rank
def __str__( self ):
return "{rank}{suit}".format( **self.__dict__ )
two = BlackJackCard_p( 2, 'l' )
three = BlackJackCard_p( 3, 'l' )
print two < three # True
print two > three # False
class BlackJackCard(object):
def __init__( self, rank, suit, hard, soft ):
self.rank= rank
self.suit= suit
self.hard= hard
self.soft= soft
def __lt__( self, other ):
if not isinstance( other, BlackJackCard ): return NotImplemented
return self.rank < other.rank
def __le__( self, other ):
try:
return self.rank <= other.rank
except AttributeError:
return NotImplemented
def __gt__( self, other ):
if not isinstance( other, BlackJackCard ): return NotImplemented
return self.rank > other.rank
def __ge__( self, other ):
if not isinstance( other, BlackJackCard ): return NotImplemented
return self.rank >= other.rank
def __eq__( self, other ):
if not isinstance( other, BlackJackCard ): return NotImplemented
return self.rank == other.rank and self.suit == other.suit
def __ne__( self, other ):
if not isinstance( other, BlackJackCard ): return NotImplemented
return self.rank != other.rank and self.suit != other.suit
def __str__( self ):
return "{rank}{suit}".format( **self.__dict__ )
two2 = BlackJackCard( 2, 'l','h','s' )
three2 = BlackJackCard( 3, 'l','h','s' )
print two2 < three2 # True
print two2 == three2 # False
#help(BlackJackCard)
print "This create immutable objects"
class BlackJackCard3( tuple ):
def __getattr__( self, name ):
return self[{'rank':0, 'suit':1, 'hard':2 ,'soft':3}[name]]
def __setattr__( self, name, value ):
raise AttributeError
d = BlackJackCard3( ('A', 'l', 1, 11) )
print d.rank # 'A'
print d.suit # 'l
#d.hard= 2 # error
""" not working properly
class BlackJackCard4( tuple ):
def __new__( cls, rank, suit, hard, soft ):
return super().__new__( cls, (rank, suit, hard, soft) )
def __getattr__( self, name ):
return self[{'rank':0, 'suit':1, 'hard':2 ,'soft':3}[name]]
def __setattr__( self, name, value ):
raise AttributeError
d = BlackJackCard4( 'A', 'l', 1, 11 )
print d.rank # 'A'
print d.suit # 'l
"""
class Rectangle(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.area = width * height
class Square(Rectangle):
def __init__(self, length):
super(Square, self).__init__(length, length) # super() executes fine now
r = Rectangle(4,5)
print r.area # 20
s = Square(5)
print s.area # 25
# the same as above without super
class Rectangle2(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.area = width * height
class Square2(Rectangle2):
def __init__(self, length):
Rectangle2.__init__(self, length, length) # super() executes fine now
r = Rectangle2(4,5)
print r.area # 20
s = Square2(5)
print s.area # 25
class entryExit(object):
def __init__(self, fu):
self.fu = fu
def __call__(self):
print "Entering", self.fu.__name__
self.fu()
print "Exited", self.fu.__name__
@entryExit
def func1():
print "inside func1()"
@entryExit
def func2():
print "inside func2()"
func1() # Entering func1
# inside func1()
# Exited func1
#func2()
class RateTimeDistance( dict ):
def __init__( self, *args, **kw ):
super(RateTimeDistance, self).__init__( *args, **kw )
self._solve()
def __getattr__( self, name ):
return self.get(name,None)
def __setattr__( self, name, value ):
self[name]= value
self._solve()
def __dir__( self ):
return list(self.keys())
def _solve(self):
if self.rate is not None and self.time is not None:
self['distance'] = self.rate*self.time
elif self.rate is not None and self.distance is not None:
self['time'] = self.distance / self.rate
elif self.time is not None and self.distance is not None:
self['rate'] = self.distance / self.time
rtd= RateTimeDistance( rate=6.3, time=8.25, distance=None )
print( "Rate={rate}, Time={time}, Distance={distance}".format(**rtd ) ) #Rate=6.3, Time=8.25, Distance=51.975
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
class TestListInterface(object):
config = """
templates:
global:
disable: [seen]
tasks:
list_get:
entry_list: test_list
list_1_get:
entry_list: list 1
list_2_get:
entry_list: list 2
test_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: test_list
list_1_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
list_2_add:
mock:
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
accept_all: yes
list_add:
- entry_list: list 2
test_multiple_list_add:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
accept_all: yes
list_add:
- entry_list: list 1
- entry_list: list 2
test_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
test_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
remove_on_match: no
test_multiple_list_accept_with_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
test_multiple_list_accept_without_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 2', url: "http://mock.url/file2.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: list 1
- entry_list: list 2
remove_on_match: no
test_list_remove:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
accept_all: yes
list_remove:
- entry_list: test_list
test_list_reject:
mock:
- {title: 'title 1', url: "http://mock.url/file1.torrent"}
- {title: 'title 3', url: "http://mock.url/file3.torrent"}
list_match:
from:
- entry_list: test_list
action: reject
add_for_list_queue:
mock:
- {title: 'The 5th Wave', url: "", imdb_id: "tt2304933"}
- {title: 'Drumline', url: "", imdb_id: "tt0303933"}
accept_all: yes
list_add:
- movie_list: test_list_queue
test_list_queue:
mock:
- {title: 'Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD', url: "http://mock.url/Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent", imdb_id: "tt0303933"}
- {title: 'Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD', url: "http://mock.url/Drumline 2002 720p BluRay DTS-HD MA 5 1 x264-FuzerHD.torrent", imdb_id: "tt0303933"}
- {title: 'Drumline 2002 DVDRip x264-FuzerHD', url: "http://mock.url/Drumline 2002 DVDRip x264-FuzerHD.torrent", imdb_id: "tt0303933"}
list_match:
from:
- movie_list: test_list_queue
single_match: yes
get_for_list_queue:
movie_list: test_list_queue
test_list_clear_start:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
test_list_clear_exit:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: exit
test_list_clear_input:
entry_list: test_list
list_clear:
what:
- entry_list: test_list
phase: input
"""
def test_list_add(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_add(self, execute_task):
task = execute_task('test_multiple_list_add')
assert len(task.entries) == 2
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 2
def test_list_accept_with_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_with_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_accept_without_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_accept_without_remove')
assert len(task.all_entries) == 3
assert len(task.accepted) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
def test_multiple_list_accept_with_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_with_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 0
task = execute_task('list_2_get')
assert len(task.entries) == 0
def test_multiple_list_accept_without_remove(self, execute_task):
task = execute_task('list_1_add')
assert len(task.entries) == 2
task = execute_task('list_2_add')
assert len(task.entries) == 1
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
task = execute_task('test_multiple_list_accept_without_remove')
assert len(task.accepted) == 3
task = execute_task('list_1_get')
assert len(task.entries) == 2
task = execute_task('list_2_get')
assert len(task.entries) == 1
def test_list_remove(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_remove')
assert len(task.accepted) == 1
task = execute_task('list_get')
assert len(task.entries) == 1
def test_list_reject(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 2
task = execute_task('test_list_reject')
assert len(task.rejected) == 1
def test_list_queue(self, execute_task):
# List queue test is based off movie_list and not entry_list since it entry_list matching is a
# lot more strict so it doesn't make sense to use it with it
task = execute_task('add_for_list_queue')
assert len(task.entries) == 2
task = execute_task('test_list_queue')
assert len(task.accepted) == 1
assert task.find_entry(title="Drumline 2002 1080p BluRay DTS-HD MA 5 1 x264-FuzerHD")
task = execute_task('get_for_list_queue')
assert len(task.entries) == 1
def test_list_clear_start(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_start')
assert len(task.entries) == 0
def test_list_clear_exit(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_exit')
assert len(task.entries) == 2
task = execute_task('list_get')
assert len(task.entries) == 0
def test_list_clear_input(self, execute_task):
task = execute_task('test_list_add')
assert len(task.entries) == 2
task = execute_task('test_list_clear_input')
assert len(task.entries) == 0
|
|
# Copyright (c) 2019-2020, Manfred Moitzi
# License: MIT License
import math
from ezdxf.math import ConstructionBox, ConstructionLine
class TestTextBox:
def test_defaults(self):
box = ConstructionBox()
assert box.center.isclose((0, 0))
assert box.width == 1
assert box.height == 1
assert box.angle == 0
assert box[0].isclose((-0.5, -0.5))
assert box[1].isclose((+0.5, -0.5))
assert box[2].isclose((+0.5, +0.5))
assert box[3].isclose((-0.5, +0.5))
def test_init(self):
box = ConstructionBox(center=(5, 0.5), width=10, height=1, angle=0)
assert box.center == (5, 0.5)
p1, p2, p3, p4 = box.corners
assert p1.isclose((0, 0))
assert p2.isclose((10, 0))
assert p3.isclose((10, 1))
assert p4.isclose((0, 1))
def test_from_points(self):
box = ConstructionBox.from_points((2, 1), (4, 5))
assert box.center == (3, 3)
assert box.width == 2
assert box.height == 4
assert box.angle == 0
# reverse order, same result
box = ConstructionBox.from_points((4, 5), (2, 1))
assert box.center == (3, 3)
assert box.width == 2
assert box.height == 4
assert box.angle == 0
def test_init_angle_90(self):
box = ConstructionBox(center=(0.5, 5), width=10, height=1, angle=90)
assert box.center == (0.5, 5)
p1, p2, p3, p4 = box.corners
assert p1.isclose((1, 0))
assert p2.isclose((1, 10))
assert p3.isclose((0, 10))
assert p4.isclose((0, 0))
def test_set_center(self):
box = ConstructionBox()
box.center = (0.5, 0.5)
assert box.center.isclose((0.5, 0.5))
assert box[0].isclose((0, 0))
def test_set_width(self):
box = ConstructionBox()
box.width = -3
assert box.width == 3
assert box.center.isclose((0, 0))
assert box[0].isclose((-1.5, -0.5))
assert box[2].isclose((+1.5, +0.5))
def test_set_height(self):
box = ConstructionBox()
box.height = -4
assert box.height == 4
assert box.center.isclose((0, 0))
assert box[0].isclose((-0.5, -2))
assert box[2].isclose((+0.5, +2))
def test_incircle_radius(self):
box = ConstructionBox(width=3, height=7)
assert box.incircle_radius == 1.5
box = ConstructionBox(width=4, height=2)
assert box.incircle_radius == 1
def test_circum_circle_radius(self):
box = ConstructionBox(width=3, height=7)
r = math.hypot(1.5, 3.5)
assert math.isclose(box.circumcircle_radius, r)
box = ConstructionBox(width=17, height=1)
r = math.hypot(0.5, 8.5)
assert math.isclose(box.circumcircle_radius, r)
def test_set_angle(self):
box = ConstructionBox()
box.width = 3
box.angle = 90
assert box.angle == 90
assert box.center.isclose((0, 0))
assert box[0].isclose((+0.5, -1.5))
assert box[2].isclose((-0.5, +1.5))
def test_translate(self):
box = ConstructionBox()
box.translate(3, 4)
assert box.center.isclose((3, 4))
assert box[0].isclose((2.5, 3.5))
assert box[2].isclose((3.5, 4.5))
def test_expand(self):
box = ConstructionBox()
box.expand(2, 3)
assert box.width == 3
assert box.height == 4
assert box[0].isclose((-1.5, -2))
assert box[2].isclose((+1.5, +2))
def test_scale(self):
box = ConstructionBox(width=3, height=4)
box.scale(1.5, 2.5)
assert box.width == 4.5
assert box.height == 10
assert box[0].isclose((-2.25, -5))
assert box[2].isclose((+2.25, +5))
def test_intersect_0(self):
box = ConstructionBox(center=(5, 0.5), width=10, height=1, angle=0)
line = ConstructionLine((0, 2), (1, 2)) # above box
assert len(box.intersect(line)) == 0
def test_intersect_1(self):
box = ConstructionBox(center=(5, 0.5), width=10, height=1, angle=0)
line = ConstructionLine((10, 1), (11, 2)) # touch one corner
result = box.intersect(line)
assert len(result) == 1
assert result[0].isclose((10, 1))
def test_intersect_2(self):
box = ConstructionBox(center=(5, 0.5), width=10, height=1, angle=0)
line = ConstructionLine((5, -1), (5, 2))
result = box.intersect(line)
assert len(result) == 2
assert result[0].isclose((5, 0))
assert result[1].isclose((5, 1))
def test_is_inside_horiz_box(self):
box = ConstructionBox()
assert box.is_inside((0, 0)) is True
# on border is inside
assert box.is_inside((0.5, 0.5)) is True
assert box.is_inside(box[0]) is True
assert box.is_inside(box[1]) is True
assert box.is_inside(box[2]) is True
assert box.is_inside(box[3]) is True
# outside
assert box.is_inside((1, 1)) is False
assert box.is_inside((-1, -1)) is False
assert box.is_inside((-1, +1)) is False
assert box.is_inside((+1, -1)) is False
# outside but on extension lines
assert box.is_inside((1, 0.5)) is False
assert box.is_inside((-1, -0.5)) is False
assert box.is_inside((-1, 0.5)) is False
assert box.is_inside((+1, -0.5)) is False
def test_is_inside_rotated_box(self):
box = ConstructionBox(angle=67)
assert box.is_inside((0, 0)) is True
# on border is inside
assert box.is_inside(box[0]) is True
assert box.is_inside(box[1]) is True
assert box.is_inside(box[2]) is True
assert box.is_inside(box[3]) is True
# outside
assert box.is_inside((1, 1)) is False
assert box.is_inside((-1, -1)) is False
assert box.is_inside((-1, +1)) is False
assert box.is_inside((+1, -1)) is False
def test_any_corner_inside(self):
box1 = ConstructionBox()
# one touching corner
box2 = ConstructionBox(center=(1, 1))
assert box1.is_any_corner_inside(box2) is True
assert box2.is_any_corner_inside(box1) is True
# no overlapping
box2 = ConstructionBox(center=(1.01, 1.01))
assert box1.is_any_corner_inside(box2) is False
assert box2.is_any_corner_inside(box1) is False
# one point of box2 inside of box1
box2 = ConstructionBox(center=(0.5404, 0.5404), angle=45)
assert box1.is_any_corner_inside(box2) is False
assert box2.is_any_corner_inside(box1) is True
# one point of box2 inside of box1
box2 = ConstructionBox(center=(1.177, 0.5152), angle=45)
assert box2.is_any_corner_inside(box1) is True
# no overlapping
box2 = ConstructionBox(center=(1.2091, 0.4669), angle=45)
assert box2.is_any_corner_inside(box1) is False
def test_overlapping_boxes(self):
box1 = ConstructionBox()
assert box1.is_overlapping(box1) is True
box2 = ConstructionBox(width=2, height=2)
# box1 complete inside of box2
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
# one touching corner
box2 = ConstructionBox(center=(1, 1))
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
# no overlapping
box2 = ConstructionBox(center=(1.2091, 0.4669), angle=45)
assert box1.is_overlapping(box2) is False
assert box2.is_overlapping(box1) is False
# one point of box2 inside of box1
box2 = ConstructionBox(center=(0.5404, 0.5404), angle=45)
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
def test_overlapping_crossing_boxes(self):
box1 = ConstructionBox()
# overlapping boxes with corners inside of each other
box2 = ConstructionBox(width=0.1, height=3)
assert box1.is_any_corner_inside(box2) is False
assert box2.is_any_corner_inside(box1) is False
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
# center2 outside of box1
box2 = ConstructionBox(center=(0.3, 0.708), width=0.18, height=2.88)
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
# center2 outside of box1, no overlapping
box2 = ConstructionBox(center=(0.6427, 0.6563), width=0.18, height=2.88)
assert box1.is_overlapping(box2) is False
assert box2.is_overlapping(box1) is False
# cutting corner of box1
box2 = ConstructionBox(
center=(0.2639, 0.5721), width=0.18, height=2.88, angle=45
)
assert box1.is_overlapping(box2) is True
assert box2.is_overlapping(box1) is True
def test_issue_2020_01_30():
box = ConstructionBox((0.22499999999999978, -6.15), 1.0, 0.4, 270.0)
start = (-1.4349395363018706e-16, -7.25)
end = (-2.1084952758329149e-16, -6.15)
assert box.is_inside(start) is False
assert box.is_inside(end) is False
|
|
from py2neo import Graph
from django.test import TestCase
from cognitive.apps.atlas.query import (
Node, Task, Condition, Concept, Contrast, search, get, cypher_node,
cypher_relation
)
from cognitive.settings import graph
class NodeTest(TestCase):
def setUp(self):
self.node = Node()
self.node1 = None
self.node2 = None
self.node_name = "test_name"
self.node_properties = {'test_key': 'test_value'}
self.graph = Graph("http://graphdb:7474/db/data/")
def tearDown(self):
if self.node1:
self.node1.delete_related()
self.node1.delete()
if self.node2:
self.node2.delete_related()
self.node2.delete()
def test_create(self):
self.node1 = self.node.create(
name=self.node_name,
properties=self.node_properties)
self.assertEqual(self.node1.properties['name'], self.node_name)
self.assertEqual(
self.node1.properties['test_key'],
self.node_properties['test_key'])
def test_count_delete(self):
count = self.node.count()
self.node1 = self.node.create(
name=self.node_name,
properties=self.node_properties)
self.assertEqual(count + 1, self.node.count())
def test_link(self):
self.node1 = self.node.create(
name=self.node_name,
properties=self.node_properties)
self.node2 = self.node.create(
name=self.node_name,
properties=self.node_properties)
relation = self.node.link(
self.node1.properties['id'],
self.node2.properties['id'],
"GENERIC")
self.assertEqual(
relation.start_node.properties['id'],
self.node1.properties['id'])
self.assertEqual(
relation.end_node.properties['id'],
self.node2.properties['id'])
def test_cypher(self):
self.node1 = self.node.create(
name=self.node_name,
properties=self.node_properties)
self.node2 = self.node.create(
name=self.node_name,
properties=self.node_properties)
relation = self.node.link(
self.node1.properties['id'],
self.node2.properties['id'],
"GENERIC")
result = self.node.cypher(self.node1.properties['id'])
self.assertIn("generic", result['nodes'][0])
self.assertIn(str(self.node1._id), result['nodes'][0])
def test_get_graph(self):
self.node1 = self.node.create(
name=self.node_name,
properties=self.node_properties)
self.node2 = self.node.create(
name=self.node_name,
properties=self.node_properties)
relation = self.node.link(
self.node1.properties['id'],
self.node2.properties['id'],
"GENERIC")
result = self.node.get_graph(self.node1.properties['id'])
self.assertEqual(
result['nodes'][0]['name'],
self.node1.properties['name'])
self.assertEqual(
result['nodes'][1]['name'],
self.node2.properties['name'])
self.assertEqual("GENERIC", result['links'][0]['type'])
def test_all(self):
pass
def test_serach_all_fields(self):
pass
class NodeChildrenTest(TestCase):
def setUp(self):
self.task = Task()
self.node_name = "test_name"
self.node_properties = {'test_key': 'test_value'}
self.graph = Graph("http://graphdb:7474/db/data/")
self.task1 = self.task.create(
name=self.node_name,
properties=self.node_properties)
self.task2 = self.task.create(
name=self.node_name,
properties=self.node_properties)
condition = Condition()
self.cond = condition.create(
name=self.node_name,
properties=self.node_properties)
contrast = Contrast()
self.cont = contrast.create(
name=self.node_name,
properties=self.node_properties)
concept = Concept()
self.con = concept.create(
name=self.node_name,
properties=self.node_properties)
self.task.link(
self.task1.properties['id'],
self.cond.properties['id'],
"HASCONDITION",
endnode_type='condition')
self.task.link(
self.task1.properties['id'],
self.con.properties['id'],
"ASSERTS",
endnode_type='concept')
condition.link(
self.cond.properties['id'],
self.cont.properties['id'],
"HASCONTRAST",
endnode_type='contrast')
concept.link(
self.con.properties['id'],
self.cont.properties['id'],
"MEASUREDBY",
endnode_type='contrast')
def tearDown(self):
self.task1.delete_related()
self.task2.delete_related()
self.cond.delete_related()
self.cont.delete_related()
self.con.delete_related()
self.task1.delete()
self.task2.delete()
self.cond.delete()
self.cont.delete()
self.con.delete()
def test_task_get_contrasts(self):
contrasts = self.task.get_contrasts(self.task1.properties['id'])
self.assertEqual(len(contrasts), 1)
self.assertEqual(
contrasts[0]['contrast_id'],
self.cont.properties['id'])
def test_task_get_conditions(self):
conditions = self.task.get_conditions(self.task1.properties['id'])
self.assertEqual(len(conditions), 1)
self.assertEqual(
conditions[0]['condition_id'],
self.cond.properties['id'])
def test_contrast_get_conditions(self):
contrast = Contrast()
conditions = contrast.get_conditions(self.cont.properties['id'])
self.assertEqual(len(conditions), 1)
self.assertEqual(
conditions[0]['condition_id'],
self.cond.properties['id'])
def test_contrast_get_concepts(self):
contrast = Contrast()
concepts = contrast.get_concepts(self.cont.properties['id'])
self.assertEqual(len(concepts), 1)
self.assertEqual(concepts[0]['concept_id'], self.con.properties['id'])
def test_contrast_get_tasks(self):
contrast = Contrast()
tasks = contrast.get_tasks(self.cont.properties['id'])
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['task_id'], self.task1.properties['id'])
class GraphUtilsTest(TestCase):
def setUp(self):
self.task = Task()
self.node_name = "test_name"
self.node_properties = {'test_key': 'test_value'}
self.graph = Graph("http://graphdb:7474/db/data/")
self.task1 = self.task.create(
name=self.node_name,
properties=self.node_properties)
self.task2 = self.task.create(
name=self.node_name,
properties=self.node_properties)
condition = Condition()
self.cond = condition.create(
name=self.node_name,
properties=self.node_properties)
contrast = Contrast()
self.cont = contrast.create(
name=self.node_name,
properties=self.node_properties)
concept = Concept()
self.con = concept.create(
name=self.node_name,
properties=self.node_properties)
self.task.link(
self.task1.properties['id'],
self.cond.properties['id'],
"HASCONDITION",
endnode_type='condition')
self.task.link(
self.task1.properties['id'],
self.con.properties['id'],
"ASSERTS",
endnode_type='concept')
condition.link(
self.cond.properties['id'],
self.cont.properties['id'],
"HASCONTRAST",
endnode_type='contrast')
concept.link(
self.con.properties['id'],
self.cont.properties['id'],
"MEASUREDBY",
endnode_type='contrast')
def tearDown(self):
self.task1.delete_related()
self.task2.delete_related()
self.cond.delete_related()
self.cont.delete_related()
self.con.delete_related()
self.task1.delete()
self.task2.delete()
self.cond.delete()
self.cont.delete()
self.con.delete()
def test_search(self):
# we create 5 nodes all with the same name, this should find them all
result = search('test_name')
self.assertEqual(len(result), 5)
'''
def test_get(self):
result = get(self.task1.properties['name'])
self.assertEqual(len(result), 1)
'''
''' ignoring gist functions for now
def test_cypher_node(self):
pass
def test_cypher_relation(self):
pass
'''
|
|
"""
*********
Shapefile
*********
Generates a networkx.DiGraph from point and line shapefiles.
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software. It is developed
and regulated by Esri as a (mostly) open specification for data
interoperability among Esri and other software products."
See http://en.wikipedia.org/wiki/Shapefile for additional information.
"""
# Copyright (C) 2004-2016 by
# Ben Reilly <[email protected]>
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """Ben Reilly ([email protected])"""
__all__ = ['read_shp', 'write_shp']
def read_shp(path, simplify=True, geom_attrs=True):
"""Generates a networkx.DiGraph from shapefiles. Point geometries are
translated into nodes, lines into edges. Coordinate tuples are used as
keys. Attributes are preserved, line geometries are simplified into start
and end coordinates. Accepts a single shapefile or directory of many
shapefiles.
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software [1]_."
Parameters
----------
path : file or string
File, directory, or filename to read.
simplify: bool
If True, simplify line geometries to start and end coordinates.
If False, and line feature geometry has multiple segments, the
non-geometric attributes for that feature will be repeated for each
edge comprising that feature.
geom_attrs: bool
If True, include the Wkb, Wkt and Json geometry attributes with
each edge.
NOTE: if these attributes are available, write_shp will use them
to write the geometry. If nodes store the underlying coordinates for
the edge geometry as well (as they do when they are read via
this method) and they change, your geomety will be out of sync.
Returns
-------
G : NetworkX graph
Examples
--------
>>> G=nx.read_shp('test.shp') # doctest: +SKIP
References
----------
.. [1] http://en.wikipedia.org/wiki/Shapefile
"""
try:
from osgeo import ogr
except ImportError:
raise ImportError("read_shp requires OGR: http://www.gdal.org/")
if not isinstance(path, str):
return
net = nx.DiGraph()
shp = ogr.Open(path)
for lyr in shp:
fields = [x.GetName() for x in lyr.schema]
for f in lyr:
flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
g = f.geometry()
attributes = dict(zip(fields, flddata))
attributes["ShpName"] = lyr.GetName()
# Note: Using layer level geometry type
if g.GetGeometryType() == ogr.wkbPoint:
net.add_node((g.GetPoint_2D(0)), attributes)
elif g.GetGeometryType() in (ogr.wkbLineString,
ogr.wkbMultiLineString):
for edge in edges_from_line(g, attributes, simplify,
geom_attrs):
e1, e2, attr = edge
net.add_edge(e1, e2)
net[e1][e2].update(attr)
else:
raise ImportError("GeometryType {} not supported".
format(g.GetGeometryType()))
return net
def edges_from_line(geom, attrs, simplify=True, geom_attrs=True):
"""
Generate edges for each line in geom
Written as a helper for read_shp
Parameters
----------
geom: ogr line geometry
To be converted into an edge or edges
attrs: dict
Attributes to be associated with all geoms
simplify: bool
If True, simplify the line as in read_shp
geom_attrs: bool
If True, add geom attributes to edge as in read_shp
Returns
-------
edges: generator of edges
each edge is a tuple of form
(node1_coord, node2_coord, attribute_dict)
suitable for expanding into a networkx Graph add_edge call
"""
try:
from osgeo import ogr
except ImportError:
raise ImportError("edges_from_line requires OGR: http://www.gdal.org/")
if geom.GetGeometryType() == ogr.wkbLineString:
if simplify:
edge_attrs = attrs.copy()
last = geom.GetPointCount() - 1
if geom_attrs:
edge_attrs["Wkb"] = geom.ExportToWkb()
edge_attrs["Wkt"] = geom.ExportToWkt()
edge_attrs["Json"] = geom.ExportToJson()
yield (geom.GetPoint_2D(0), geom.GetPoint_2D(last), edge_attrs)
else:
for i in range(0, geom.GetPointCount() - 1):
pt1 = geom.GetPoint_2D(i)
pt2 = geom.GetPoint_2D(i + 1)
edge_attrs = attrs.copy()
if geom_attrs:
segment = ogr.Geometry(ogr.wkbLineString)
segment.AddPoint_2D(pt1[0], pt1[1])
segment.AddPoint_2D(pt2[0], pt2[1])
edge_attrs["Wkb"] = segment.ExportToWkb()
edge_attrs["Wkt"] = segment.ExportToWkt()
edge_attrs["Json"] = segment.ExportToJson()
del segment
yield (pt1, pt2, edge_attrs)
elif geom.GetGeometryType() == ogr.wkbMultiLineString:
for i in range(geom.GetGeometryCount()):
geom_i = geom.GetGeometryRef(i)
for edge in edges_from_line(geom_i, attrs, simplify, geom_attrs):
yield edge
def write_shp(G, outdir):
"""Writes a networkx.DiGraph to two shapefiles, edges and nodes.
Nodes and edges are expected to have a Well Known Binary (Wkb) or
Well Known Text (Wkt) key in order to generate geometries. Also
acceptable are nodes with a numeric tuple key (x,y).
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software [1]_."
Parameters
----------
outdir : directory path
Output directory for the two shapefiles.
Returns
-------
None
Examples
--------
nx.write_shp(digraph, '/shapefiles') # doctest +SKIP
References
----------
.. [1] http://en.wikipedia.org/wiki/Shapefile
"""
try:
from osgeo import ogr
except ImportError:
raise ImportError("write_shp requires OGR: http://www.gdal.org/")
# easier to debug in python if ogr throws exceptions
ogr.UseExceptions()
def netgeometry(key, data):
if 'Wkb' in data:
geom = ogr.CreateGeometryFromWkb(data['Wkb'])
elif 'Wkt' in data:
geom = ogr.CreateGeometryFromWkt(data['Wkt'])
elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples
geom = ogr.Geometry(ogr.wkbLineString)
_from, _to = key[0], key[1]
try:
geom.SetPoint(0, *_from)
geom.SetPoint(1, *_to)
except TypeError:
# assume user used tuple of int and choked ogr
_ffrom = [float(x) for x in _from]
_fto = [float(x) for x in _to]
geom.SetPoint(0, *_ffrom)
geom.SetPoint(1, *_fto)
else:
geom = ogr.Geometry(ogr.wkbPoint)
try:
geom.SetPoint(0, *key)
except TypeError:
# assume user used tuple of int and choked ogr
fkey = [float(x) for x in key]
geom.SetPoint(0, *fkey)
return geom
# Create_feature with new optional attributes arg (should be dict type)
def create_feature(geometry, lyr, attributes=None):
feature = ogr.Feature(lyr.GetLayerDefn())
feature.SetGeometry(g)
if attributes != None:
# Loop through attributes, assigning data to each field
for field, data in attributes.items():
feature.SetField(field, data)
lyr.CreateFeature(feature)
feature.Destroy()
drv = ogr.GetDriverByName("ESRI Shapefile")
shpdir = drv.CreateDataSource(outdir)
# delete pre-existing output first otherwise ogr chokes
try:
shpdir.DeleteLayer("nodes")
except:
pass
nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
for n in G:
data = G.node[n]
g = netgeometry(n, data)
create_feature(g, nodes)
try:
shpdir.DeleteLayer("edges")
except:
pass
edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString)
# New edge attribute write support merged into edge loop
fields = {} # storage for field names and their data types
attributes = {} # storage for attribute data (indexed by field names)
# Conversion dict between python and ogr types
OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
# Edge loop
for e in G.edges(data=True):
data = G.get_edge_data(*e)
g = netgeometry(e, data)
# Loop through attribute data in edges
for key, data in e[2].items():
# Reject spatial data not required for attribute table
if (key != 'Json' and key != 'Wkt' and key != 'Wkb'
and key != 'ShpName'):
# For all edges check/add field and data type to fields dict
if key not in fields:
# Field not in previous edges so add to dict
if type(data) in OGRTypes:
fields[key] = OGRTypes[type(data)]
else:
# Data type not supported, default to string (char 80)
fields[key] = ogr.OFTString
# Create the new field
newfield = ogr.FieldDefn(key, fields[key])
edges.CreateField(newfield)
# Store the data from new field to dict for CreateLayer()
attributes[key] = data
else:
# Field already exists, add data to dict for CreateLayer()
attributes[key] = data
# Create the feature with, passing new attribute data
create_feature(g, edges, attributes)
nodes, edges = None, None
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import ogr
except:
raise SkipTest("OGR not available")
|
|
# Copyright 2011-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
import netaddr
import six
from . import errors, serializers
from .utils import force_str
class Argument(object):
default = None
serializer = serializers.Identity()
def __init__(self, default=None, help=None, **kwargs):
self.__doc__ = help
if default is not None:
self.default = default
for k, v in kwargs.items():
setattr(self, k, v)
def get_default(self, instance):
if callable(self.default):
return self.default(instance)
return self.default
def contribute_to_class(self, cls):
pass
class Boolean(Argument):
""" Represents a boolean. "1", "yes", "on" and "true" are all considered
to be True boolean values. Anything else is False. """
serializer = serializers.Boolean()
def clean(self, instance, value):
if isinstance(value, six.string_types):
if value.lower() in ("1", "yes", "on", "true"):
return True
else:
return False
return bool(value)
class String(Argument):
""" Represents a string. """
serializer = serializers.String()
min = None
max = None
choices = None
regex = None
def clean(self, instance, value):
if value is None:
return value
# Automatically cast integers, etc to string
if not isinstance(value, (six.binary_type, six.text_type)):
value = str(value)
# Cast to native string type for this python version
value = force_str(value)
if self.min is not None and len(value) < self.min:
raise errors.InvalidParameter(
"Value should be at least {} characters".format(self.min),
)
if self.max is not None and len(value) > self.max:
raise errors.InvalidParameter(
"Value should be at most {} characters".format(self.max),
)
if self.choices is not None and value not in self.choices:
raise errors.InvalidParameter(
"Not a valid value for this field",
)
if self.regex is not None and not re.match(self.regex, value):
raise errors.InvalidParameter(
"Regex validation failed ({})".format(self.regex),
)
return value
class Integer(Argument):
""" Represents an integer argument taken from the source file. This can
throw an :py:exc:errors.ParseError if the passed in value cannot represent
a base-10 integer. """
serializer = serializers.Integer()
def clean(self, instance, value):
if not isinstance(value, int):
try:
value = int(value)
except ValueError:
raise errors.InvalidParameter("%s is not an integer" % value)
return value
class IPAddress(String):
serializer = serializers.String()
def clean(self, instance, value):
try:
return netaddr.IPAddress(value)
except (netaddr.core.AddrFormatError, ValueError):
raise errors.InvalidParameter("{} is not a valid IP Address")
class IPNetwork(String):
serializer = serializers.String()
def clean(self, instance, value):
try:
network = netaddr.IPNetwork(value)
except (netaddr.core.AddrFormatError, ValueError):
raise errors.InvalidParameter("{} is not a valid IP Address")
if value != str(network.cidr):
raise errors.InvalidParameter("{} looks wrong - did you mean {}?".format(value, network.cidr))
return network
class Dict(Argument):
def clean(self, instance, value):
if not isinstance(value, dict):
raise errors.InvalidParameter("{} is not a dictionary")
return value
def default(self, instance):
return {}
class List(Argument):
serializer = None
def __init__(self, list_of=None, **kwargs):
super(List, self).__init__(**kwargs)
self.list_of = list_of
if not self.serializer:
self.serializer = serializers.List(
self.list_of.serializer if self.list_of else serializers.String(),
skip_empty=True,
)
def clean(self, instance, value):
if not isinstance(value, list):
raise errors.InvalidParameter("{} is not a list")
if not self.list_of:
return value
result = []
for entry in value:
result.append(self.list_of.clean(instance, entry))
return result
def default(self, instance):
return []
class Resource(Argument):
serializer = serializers.Identifier()
"""
An argument that represents a resource that we depend on. For example,
to create an AWS subnet we an AWS VPC to put it in. You might define such a
subnet as::
from touchdown.core.resource import Resource
from touchdown.core import arguments
class Subnet(Resource):
cidr_block = argument.CidrBlock()
vpc = argument.Resource(VPC)
"""
def __init__(self, resource_class, **kwargs):
super(Resource, self).__init__(**kwargs)
self.resource_class = resource_class
def get_resource_class(self):
if not isinstance(self.resource_class, (list, tuple)):
return tuple([self.resource_class] + self.resource_class.__subclasses__())
return tuple(itertools.chain(
self.resource_class,
*[r.__subclasses__() for r in self.resource_class]
))
def get_default(self, instance):
default = super(Resource, self).get_default(instance)
if isinstance(default, dict):
return self.resource_class(instance, **default)
return default
def clean(self, instance, value):
"""
Every time you assign a value to a Resource argument we validate it is
the correct type. We also mark self as depending on the resource.
"""
if isinstance(value, dict):
for resource_class in self.get_resource_class():
try:
value = resource_class(instance, **value)
break
except errors.InvalidParameter:
continue
else:
raise errors.InvalidParameter("Parameter must be one of {}".format(str(self.get_resource_class())))
elif hasattr(self.resource_class, "wrap"):
value = self.resource_class.wrap(instance, value)
elif not isinstance(value, self.get_resource_class()):
raise errors.InvalidParameter("Parameter must be a {}".format(self.resource_class))
instance.add_dependency(value)
return value
def contribute_to_class(self, cls):
"""
If we mark a resource as being assignable to another resource then it
automatically gains a factory method. Continuing the VPC+Subnet example,
we can now::
some_vpc.add_subnet(cidr_block='192.168.0.1/24')
With this form you don't have to pass the vpc argument (it is done
implicitly).
"""
if isinstance(self.resource_class, six.string_types):
from .resource import ResourceType
if self.resource_class not in ResourceType.__all_resources__:
ResourceType.add_callback(self.resource_class, self.contribute_to_class, cls)
return
self.resource_class = ResourceType.__all_resources__[self.resource_class]
if hasattr(cls, "wrap"):
return
if not hasattr(cls, "resource_name"):
return
argument_name = self.name
def _(self, **kwargs):
arguments = {argument_name: self}
arguments.update(kwargs)
resource = cls(self, **arguments)
self.workspace.add_dependency(resource)
return resource
setattr(self.resource_class, 'add_%s' % cls.resource_name, _)
def _(self, **kwargs):
arguments = {argument_name: self}
arguments.update(kwargs)
arguments['ensure'] = ['never-create', 'never-destroy']
resource = cls(self, **arguments)
self.workspace.add_dependency(resource)
return resource
setattr(self.resource_class, 'get_%s' % cls.resource_name, _)
class ResourceList(List):
def __init__(self, resource_class, **kwargs):
super(ResourceList, self).__init__(
Resource(resource_class),
**kwargs
)
def contribute_to_class(self, cls):
resource_class = self.list_of.resource_class
if isinstance(resource_class, six.string_types):
from .resource import ResourceType
if self.resource_class not in ResourceType.__all_resources__:
ResourceType.add_callback(resource_class, self.contribute_to_class, cls)
return
self.list_of.resource_class = ResourceType.__all_resources__[self.resource_class]
super(ResourceList, self).contribute_to_class(cls)
|
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple util to grep through network and service definitions.
Examples:
To find out which tokens contain "10.4.3.1" use
$ cgrep.py -i 10.4.3.1
To find out if token 'FOO' includes ip "1.2.3.4" use
$ cgrep.py -t FOO -i 1.2.3.4
To find the difference and union of tokens 'FOO' and 'BAR' use
$ cgrep.py -c FOO BAR
To find the difference of network tokens to which 2 IPs belong use
$ cgrep.py -g 1.1.1.1 2.2.2.2
To find which IPs are in the 'FOO' network token use
$ cgrep.py -o FOO
To find which port & protocol pairs are in a service token 'FOO' use
$ cgrep.py -s FOO
To find which service tokens contain port '22' and protocol 'tcp' use
$ cgrep.py -p 22 tcp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pprint
from lib import nacaddr
from lib import naming
import logging
def is_valid_ip(arg):
"""Validates a value to be an IP or not.
Args:
arg: potential IP address as a string.
Returns:
arg as IP object (if arg is an IP)
Raises:
Error (if arg is not an IP)
"""
try:
nacaddr.IP(arg)
except:
raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)
return arg
def cli_options():
"""Builds the argparse options for cgrep.
TODO(robankeny): Move this to flags.
Returns:
parser: the arguments, ready to be parsed.
"""
parser = argparse.ArgumentParser(
description='c[apirca]grep',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-d', '--def', dest='defs',
help='Network Definitions directory location. \n',
default='./def')
# -i and -t can be used together, but not with any other option.
ip_group = parser.add_argument_group()
# take 1 or more IPs
ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,
help='Return list of definitions containing the '
'IP(s).\nMultiple IPs permitted.')
ip_group.add_argument('-t', '--token', dest='token',
help=('See if an IP is contained within the given '
'token.\nMust be used in conjunction with '
'-i/--ip [addr].'))
exclusive_group = parser.add_mutually_exclusive_group()
# the rest of the arguments are mutually exclusive with each other,
# and -i / -t
exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,
metavar=('OBJ', 'OBJ'),
help=('Compare the two given network '
'definition tokens'))
exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,
type=is_valid_ip, metavar=('IP', 'IP'),
help=('Diff the network objects to'
' which the given IP(s) belong'))
exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',
help=('Return list of IP(s) contained within '
'the given token(s)'))
exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',
help=('Return list of port(s) contained '
'within given token(s)'))
exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,
metavar=('PORT', 'PROTO'),
help=('Returns a list of tokens containing '
'the given port and protocol'))
return parser
def main(parser):
"""Determines the code path based on the arguments passed.
Args:
parser: the argument parser, but not parsed yet.
"""
options = parser.parse_args()
db = naming.Naming(options.defs)
p = pprint.PrettyPrinter(indent=1, depth=4, width=1).pprint
# if -i and any other option:
if options.ip and any([options.gmp, options.cmp, options.obj, options.svc,
options.port]):
logging.info('You can only use -i with -t or by itself')
# if -i and -t
elif options.token and options.ip:
try:
get_nets([options.token], db)
except naming.UndefinedAddressError:
logging.info("Network group '%s' is not defined!", options.token)
else:
results = compare_ip_token(options, db)
logging.info(results)
# if -t, but not -i; invalid!
elif options.token and not options.ip:
logging.info('You must specify an IP Address with -i [addr]')
# if -i
elif options.ip:
for ip in options.ip:
groups = get_ip_parents(ip, db)
logging.info('Results for IP: %s', ip)
# iterate and print the tokens we found.
for name, networks in groups:
# print the group name [0], and the networks it was in [1]
logging.info('%s %s', name, networks)
elif options.gmp:
common, diff1, diff2 = group_diff(options, db)
print_diff(options.gmp[0], common, diff1, diff2)
logging.info('')
print_diff(options.gmp[1], common, diff2, diff1)
# if -c
elif options.cmp:
meta, results = compare_tokens(options, db)
first_name = meta[0]
second_name = meta[1]
union = meta[2]
logging.info('Union of %s and %s:\n %s\n', first_name, second_name, union)
logging.info('Diff of %s and %s:', first_name, second_name)
for i in results:
logging.info(' ' + i)
logging.info('')
first_obj, sec_obj = options.cmp
if check_encapsulated('network', first_obj, sec_obj, db):
logging.info('%s fully encapsulates %s', sec_obj, first_obj)
else:
logging.info('%s does _not_ fully encapsulate %s', sec_obj, first_obj)
# check the other way around.
if check_encapsulated('network', sec_obj, first_obj, db):
logging.info('%s fully encapsulates %s', first_obj, sec_obj)
else:
logging.info('%s does _not_ fully encapsulate %s', first_obj, sec_obj)
# if -o
elif options.obj:
for obj in options.obj:
try:
token, ips = get_nets([obj], db)[0]
except naming.UndefinedAddressError:
logging.info('%s is an invalid object', obj)
else:
logging.info(token + ':')
# convert list of ip objects to strings and sort them
ips.sort(key=lambda x: int(x.ip))
p([str(x) for x in ips])
# if -s
elif options.svc:
try:
results = get_ports(options.svc, db)
except naming.UndefinedServiceError:
logging.info('%s contains an invalid service object', str(options.svc))
else:
for result in get_ports(options.svc, db):
svc, port = result
logging.info(svc + ':')
p(port)
# if -p
elif options.port:
port, protocol, result = get_services(options, db)
logging.info('%s/%s:', port, protocol)
p(result)
# if nothing is passed
elif not any((options.cmp, options.ip, options.token, options.obj,
options.svc, options.port)):
parser.print_help()
logging.info('')
def check_encapsulated(obj_type, first_obj, second_obj, db):
"""Checks if a network/service object is entirely contained within another.
Args:
obj_type: "network" or "service"
first_obj: The name of the first network/service object
second_obj: The name of the secondnetwork/service object
db: The network and service definitions
Returns:
Error or bool:
ValueError if an invalid object type is passed
True if the first_obj is entirely within second_obj, otherwise False
Raises:
ValueError: When value is not a network or service.
"""
if obj_type == 'network':
# the indexing is to get the list of networks out of the tuple[1] and
# list[0] returned by get_nets
first = get_nets([first_obj], db)[0][1]
second = get_nets([second_obj], db)[0][1]
elif obj_type == 'service':
first = get_ports([first_obj], db)[0][1]
second = get_ports([second_obj], db)[0][1]
else:
raise ValueError("check_encapsulated() currently only supports "
"'network' and 'service' for the obj_type parameter")
# iterates over each object in the first group, and then each obj in the
# second group, making sure each one in the first is contained
# somewhere in the second.
for obj in first:
for sec_obj in second:
if obj in sec_obj:
break
# if we got through every object in the second group, and didn't have
# a match, then the first group is not entirely contained.
else:
return False
# if we got here, then the group was fully contained.
return True
def print_diff(ip, common, diff1, diff2):
"""Print out the common, added, and removed network objects between 2 IPs.
Args:
ip: the IP being compared against
common: the network objects shared between the two IPs
('ip' and the other passed into options.cmp)
diff1: the network objects present in 'ip' but not in the other IP
passed into options.cmp
diff2: the network objects not present in 'ip' but are present in
the other IP passed into options.cmp
"""
logging.info('IP: %s', ip)
if common:
common = [' {0}'.format(elem) for elem in common]
logging.info('\n'.join(common))
if diff1:
diff = ['+ {0}'.format(elem) for elem in diff1]
logging.info('\n'.join(diff))
if diff2:
diff = ['- {0}'.format(elem) for elem in diff2]
logging.info('\n'.join(diff))
def group_diff(options, db):
"""Diffs two different group objects.
Args:
options: the options sent to the script
db : network and service definitions
Returns:
tuple: the common lines, the differences from 1 to 2,
and the differences from 2 to 1
"""
nested_rvals = []
for ip in options.gmp:
nested_rvals.append(get_ip_parents(ip, db))
# get just the list of groups, stripping out the networks.
group1 = [x[0] for x in nested_rvals[0]]
group2 = [x[0] for x in nested_rvals[1]]
common = list(set(group1) & set(group2))
diff1 = list(set(group1) - set(group2))
diff2 = list(set(group2) - set(group1))
return common, diff1, diff2
def get_ip_parents(ip, db):
"""Gets a list of all network objects that include an IP.
Args:
ip: the IP we're looking for the parents of
db: network and service definitions
Returns:
results: a list of all groups that include the IP, in the format:
[("Group", ["networks", "matched"]), (etc)]
"""
results = []
rval = db.GetIpParents(ip)
for v in rval:
nested = db.GetNetParents(v)
prefix_and_nets = get_nets_and_highest_prefix(ip, v, db)
if nested:
for n in nested:
results.append(('%s -> %s' % (n, v), prefix_and_nets))
else:
results.append((v, prefix_and_nets))
# sort the results by prefix length descending
results = sorted(results, key=lambda x: x[1][0], reverse=True)
# strip out the no longer needed prefix lengths before handing off
for index, group in enumerate(results):
results[index] = (group[0], group[1][1])
return results
def get_nets_and_highest_prefix(ip, net_group, db):
"""Find the highest prefix length in all networks given it contains the IP.
Args:
ip: the IP address contained in net_group
net_group: the name of the network object we'll be looking through
db: network and service definitions
Returns:
highest_prefix_length, networks as tuple
highest_prefix_length : the longest prefix length found,
networks : network objects
"""
highest_prefix_length = 0
networks = []
ip = nacaddr.IP(ip)
# loop through all the networks in the net_group
for net in get_nets([net_group], db)[0][1]:
# find the highest prefix length for the networks that contain the IP
if ip in net:
networks.append(str(net))
if net.prefixlen > highest_prefix_length:
highest_prefix_length = net.prefixlen
return highest_prefix_length, networks
def get_nets(objects, db):
"""Gets a list of all networks that are inside of a network object.
Args:
objects: network objects
db: network and service definitions
Returns:
results : all networks inside a network object
"""
results = []
for obj in objects:
net = db.GetNet(obj)
results.append((obj, net))
return results
def compare_tokens(options, db):
"""Compares to network objects against each other.
Args:
options: the options sent to the script
db: network and service definitions
Returns:
meta, results :
((first object, second object, union of those two),
diff of those two network objects)
"""
t1, t2 = options.cmp
d1 = db.GetNet(t1)
d2 = db.GetNet(t2)
union = list(set(d1 + d2))
meta = (t1, t2, union)
results = []
for el in set(d1 + d2):
el = nacaddr.IP(el)
if el in d1 and el in d2:
results.append(str(el))
elif el in d1:
results.append(str(el))
elif el in d2:
results.append(str(el))
return meta, results
def compare_ip_token(options, db):
"""Looks to see if a network IP is contained in a network object.
Args:
options: the options sent to the script
db: network and service definitions
Returns:
results : end-user string stating the results
"""
token = options.token
results = []
for ip in options.ip:
rval = db.GetIpParents(ip)
if token in rval:
results = '%s is in %s' % (ip, token)
else:
results = '%s is _not_ in %s' % (ip, token)
return results
def get_ports(svc_group, db):
"""Gets the ports and protocols defined in a service group.
Args:
svc_group: a list of strings for each service group
db: network and service definitions
Returns:
results: a list of tuples for each service defined, in the format:
(service name, "<port>/<protocol>")
"""
results = []
for svc in svc_group:
port = db.GetService(svc)
results.append((svc, port))
return results
def get_services(options, db):
"""Finds any services with that include a specific port/protocol pair.
Args:
options: the options sent to the script
db: network and service definitions
Returns:
port, protocol, results as tuple in the format:
(port, protocol, list of the services containing this pair)
"""
results = []
port, protocol = options.port
# swap values if they were passed in wrong order
if port.isalpha() and protocol.isdigit():
port, protocol = protocol, port
results = db.GetPortParents(port, protocol)
return port, protocol, results
if __name__ == '__main__':
main(cli_options())
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
As of zookeeper 3.4.0, the `mntr` admin command is provided for easy parsing of zookeeper stats.
This check first parses the `stat` admin command for a version number.
If the zookeeper version supports `mntr`, it is also parsed.
Duplicate information is being reported by both `mntr` and `stat` to keep backwards compatability.
Example:
`stat` reports: zookeeper.latency.avg
`mntr` reports: zookeeper.avg.latency
If available, make use of the data reported by `mntr` not `stat`.
The duplicate `stat` reports are only kept for backward compatability.
Besides the usual zookeeper state of `leader`, `follower`, `observer` and `standalone`,
this check will report three other states:
`down`: the check cannot connect to zookeeper
`inactive`: the zookeeper instance has lost connection to the cluster
`unknown`: an unexpected error has occured in this check
States can be accessed through the gauge `zookeeper.instances.<state>,
through the set `zookeeper.instances`, or through the `mode:<state>` tag.
Parses the response from zookeeper's `stat` admin command, which looks like:
```
Zookeeper version: 3.2.2--1, built on 03/16/2010 07:31 GMT
Clients:
/10.42.114.160:32634[1](queued=0,recved=12,sent=0)
/10.37.137.74:21873[1](queued=0,recved=53613,sent=0)
/10.37.137.74:21876[1](queued=0,recved=57436,sent=0)
/10.115.77.32:32990[1](queued=0,recved=16,sent=0)
/10.37.137.74:21891[1](queued=0,recved=55011,sent=0)
/10.37.137.74:21797[1](queued=0,recved=19431,sent=0)
Latency min/avg/max: -10/0/20007
Received: 101032173
Sent: 0
Outstanding: 0
Zxid: 0x1034799c7
Mode: leader
Node count: 487
```
`stat` tested with Zookeeper versions 3.0.0 to 3.4.5
The following is an example of the `mntr` commands output:
```
zk_version 3.4.5-cdh4.4.0--1, built on 09/04/2013 01:46 GMT
zk_avg_latency 0
zk_max_latency 0
zk_min_latency 0
zk_packets_received 4
zk_packets_sent 3
zk_num_alive_connections 1
zk_outstanding_requests 0
zk_server_state standalone
zk_znode_count 4
zk_watch_count 0
zk_ephemerals_count 0
zk_approximate_data_size 27
zk_open_file_descriptor_count 29
zk_max_file_descriptor_count 4096
```
`mntr` tested with ZooKeeper 3.4.5
'''
# stdlib
from collections import defaultdict
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
from StringIO import StringIO
import re
import socket
import struct
# project
from checks import AgentCheck
from util import get_hostname
class ZKConnectionFailure(Exception):
""" Raised when we are unable to connect or get the output of a command. """
pass
class ZKMetric(tuple):
"""
A Zookeeper metric.
Tuple with an optional metric type (default is 'gauge').
"""
def __new__(cls, name, value, m_type="gauge"):
return super(ZKMetric, cls).__new__(cls, [name, value, m_type])
class ZookeeperCheck(AgentCheck):
"""
ZooKeeper AgentCheck.
Parse content from `stat` and `mntr`(if available) commmands to retrieve health cluster metrics.
"""
version_pattern = re.compile(r'Zookeeper version: ([^.]+)\.([^.]+)\.([^-]+)', flags=re.I)
SOURCE_TYPE_NAME = 'zookeeper'
STATUS_TYPES = [
'leader',
'follower',
'observer',
'standalone',
'down',
'inactive',
]
# `mntr` information to report as `rate`
_MNTR_RATES = set(
[
'zk_packets_received',
'zk_packets_sent',
]
)
def check(self, instance):
host = instance.get('host', 'localhost')
port = int(instance.get('port', 2181))
timeout = float(instance.get('timeout', 3.0))
expected_mode = (instance.get('expected_mode') or '').strip()
tags = instance.get('tags', [])
cx_args = (host, port, timeout)
sc_tags = ["host:{0}".format(host), "port:{0}".format(port)]
hostname = get_hostname(self.agentConfig)
report_instance_mode = instance.get("report_instance_mode", True)
zk_version = None # parse_stat will parse and set version string
# Send a service check based on the `ruok` response.
# Set instance status to down if not ok.
try:
ruok_out = self._send_command('ruok', *cx_args)
except ZKConnectionFailure:
# The server should not respond at all if it's not OK.
status = AgentCheck.CRITICAL
message = 'No response from `ruok` command'
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
else:
ruok_out.seek(0)
ruok = ruok_out.readline()
if ruok == 'imok':
status = AgentCheck.OK
else:
status = AgentCheck.WARNING
message = u'Response from the server: %s' % ruok
finally:
self.service_check(
'zookeeper.ruok', status, message=message, tags=sc_tags
)
# Read metrics from the `stat` output.
try:
stat_out = self._send_command('stat', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
# Parse the response
metrics, new_tags, mode, zk_version = self.parse_stat(stat_out)
# Write the data
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + new_tags)
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
if expected_mode:
if mode == expected_mode:
status = AgentCheck.OK
message = u"Server is in %s mode" % mode
else:
status = AgentCheck.CRITICAL
message = u"Server is in %s mode but check expects %s mode"\
% (mode, expected_mode)
self.service_check('zookeeper.mode', status, message=message,
tags=sc_tags)
# Read metrics from the `mntr` output
if zk_version and LooseVersion(zk_version) > LooseVersion("3.4.0"):
try:
mntr_out = self._send_command('mntr', *cx_args)
except ZKConnectionFailure:
self.increment('zookeeper.timeouts')
if report_instance_mode:
self.report_instance_mode(hostname, 'down', tags)
raise
except Exception as e:
self.warning(e)
self.increment('zookeeper.datadog_client_exception')
if report_instance_mode:
self.report_instance_mode(hostname, 'unknown', tags)
raise
else:
metrics, mode = self.parse_mntr(mntr_out)
mode_tag = "mode:%s" % mode
if mode != 'inactive':
for metric, value, m_type in metrics:
submit_metric = getattr(self, m_type)
submit_metric(metric, value, tags=tags + [mode_tag])
if report_instance_mode:
self.report_instance_mode(hostname, mode, tags)
def report_instance_mode(self, hostname, mode, tags):
gauges = defaultdict(int)
if mode not in self.STATUS_TYPES:
mode = "unknown"
tags = tags + ['mode:%s' % mode]
self.set('zookeeper.instances', hostname, tags=tags)
gauges[mode] = 1
for k, v in gauges.iteritems():
gauge_name = 'zookeeper.instances.%s' % k
self.gauge(gauge_name, v)
def _send_command(self, command, host, port, timeout):
sock = socket.socket()
sock.settimeout(timeout)
buf = StringIO()
chunk_size = 1024
# try-finally and try-except to stay compatible with python 2.4
try:
try:
# Connect to the zk client port and send the stat command
sock.connect((host, port))
sock.sendall(command)
# Read the response into a StringIO buffer
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads = 1
max_reads = 10000
while chunk:
if num_reads > max_reads:
# Safeguard against an infinite loop
raise Exception("Read %s bytes before exceeding max reads of %s. "
% (buf.tell(), max_reads))
chunk = sock.recv(chunk_size)
buf.write(chunk)
num_reads += 1
except (socket.timeout, socket.error):
raise ZKConnectionFailure()
finally:
sock.close()
return buf
def parse_stat(self, buf):
''' `buf` is a readable file-like object
returns a tuple: (metrics, tags, mode, version)
'''
metrics = []
buf.seek(0)
# Check the version line to make sure we parse the rest of the
# body correctly. Particularly, the Connections val was added in
# >= 3.4.4.
start_line = buf.readline()
match = self.version_pattern.match(start_line)
if match is None:
return (None, None, "inactive", None)
raise Exception("Could not parse version from stat command output: %s" % start_line)
else:
version_tuple = match.groups()
has_connections_val = version_tuple >= ('3', '4', '4')
version = "%s.%s.%s" % version_tuple
# Clients:
buf.readline() # skip the Clients: header
connections = 0
client_line = buf.readline().strip()
if client_line:
connections += 1
while client_line:
client_line = buf.readline().strip()
if client_line:
connections += 1
# Latency min/avg/max: -10/0/20007
_, value = buf.readline().split(':')
l_min, l_avg, l_max = [int(v) for v in value.strip().split('/')]
metrics.append(ZKMetric('zookeeper.latency.min', l_min))
metrics.append(ZKMetric('zookeeper.latency.avg', l_avg))
metrics.append(ZKMetric('zookeeper.latency.max', l_max))
# Received: 101032173
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_received', long(value.strip())))
# Sent: 1324
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.bytes_sent', long(value.strip())))
if has_connections_val:
# Connections: 1
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.connections', int(value.strip())))
else:
# If the zk version doesnt explicitly give the Connections val,
# use the value we computed from the client list.
metrics.append(ZKMetric('zookeeper.connections', connections))
# Outstanding: 0
_, value = buf.readline().split(':')
# Fixme: This metric name is wrong. It should be removed in a major version of the agent
# See https://github.com/DataDog/dd-agent/issues/1383
metrics.append(ZKMetric('zookeeper.bytes_outstanding', long(value.strip())))
metrics.append(ZKMetric('zookeeper.outstanding_requests', long(value.strip())))
# Zxid: 0x1034799c7
_, value = buf.readline().split(':')
# Parse as a 64 bit hex int
zxid = long(value.strip(), 16)
# convert to bytes
zxid_bytes = struct.pack('>q', zxid)
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
metrics.append(ZKMetric('zookeeper.zxid.epoch', zxid_epoch))
metrics.append(ZKMetric('zookeeper.zxid.count', zxid_count))
# Mode: leader
_, value = buf.readline().split(':')
mode = value.strip().lower()
tags = [u'mode:' + mode]
# Node count: 487
_, value = buf.readline().split(':')
metrics.append(ZKMetric('zookeeper.nodes', long(value.strip())))
return metrics, tags, mode, version
def parse_mntr(self, buf):
'''
Parse `mntr` command's content.
`buf` is a readable file-like object
Returns: a tuple (metrics, mode)
if mode == 'inactive', metrics will be None
'''
buf.seek(0)
first = buf.readline() # First is version string or error
if first == 'This ZooKeeper instance is not currently serving requests':
return (None, 'inactive')
metrics = []
mode = 'inactive'
for line in buf:
try:
key, value = line.split()
if key == "zk_server_state":
mode = value.lower()
continue
metric_name = self._normalize_metric_label(key)
metric_type = "rate" if key in self._MNTR_RATES else "gauge"
metric_value = int(value)
metrics.append(ZKMetric(metric_name, metric_value, metric_type))
except ValueError:
self.log.warning(
u"Cannot format `mntr` value. key={key}, value{value}".format(
key=key, value=value
)
)
continue
except Exception:
self.log.exception(
u"Unexpected exception occurred while parsing `mntr` command content:\n"
u"{buf}".format(
buf=buf
)
)
return (metrics, mode)
def _normalize_metric_label(self, key):
if re.match('zk', key):
key = key.replace('zk', 'zookeeper', 1)
return key.replace('_', '.', 1)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
class CreateFlavorInfoAction(workflows.Action):
_flavor_id_regex = (r'^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-'
r'[0-9a-fA-F]{4}-[0-9a-fA-F]{12}|[0-9]+|auto$')
_flavor_id_help_text = _("Flavor ID should be UUID4 or integer. "
"Leave this field blank or use 'auto' to set "
"a random UUID4.")
name = forms.CharField(
label=_("Name"),
max_length=255)
flavor_id = forms.RegexField(label=_("ID"),
regex=_flavor_id_regex,
required=False,
initial='auto',
help_text=_flavor_id_help_text)
vcpus = forms.IntegerField(label=_("VCPUs"),
min_value=1)
memory_mb = forms.IntegerField(label=_("RAM (MB)"),
min_value=1)
disk_gb = forms.IntegerField(label=_("Root Disk (GB)"),
min_value=0)
eph_gb = forms.IntegerField(label=_("Ephemeral Disk (GB)"),
required=False,
initial=0,
min_value=0)
swap_mb = forms.IntegerField(label=_("Swap Disk (MB)"),
required=False,
initial=0,
min_value=0)
rxtx_factor = forms.FloatField(label=_("RX/TX Factor"),
required=False,
initial=1,
min_value=1)
class Meta(object):
name = _("Flavor Information")
help_text = _("Flavors define the sizes for RAM, disk, number of "
"cores, and other resources and can be selected when "
"users deploy instances.")
def clean_name(self):
name = self.cleaned_data.get('name').strip()
if not name:
msg = _('Flavor name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return name
def clean(self):
cleaned_data = super(CreateFlavorInfoAction, self).clean()
name = cleaned_data.get('name')
flavor_id = cleaned_data.get('flavor_id')
try:
flavors = api.nova.flavor_list(self.request, None)
except Exception:
flavors = []
msg = _('Unable to get flavor list')
exceptions.check_message(["Connection", "refused"], msg)
raise
if flavors is not None and name is not None:
for flavor in flavors:
if flavor.name.lower() == name.lower():
error_msg = _('The name "%s" is already used by '
'another flavor.') % name
self._errors['name'] = self.error_class([error_msg])
if flavor.id == flavor_id:
error_msg = _('The ID "%s" is already used by '
'another flavor.') % flavor_id
self._errors['flavor_id'] = self.error_class([error_msg])
return cleaned_data
class CreateFlavorInfo(workflows.Step):
action_class = CreateFlavorInfoAction
contributes = ("flavor_id",
"name",
"vcpus",
"memory_mb",
"disk_gb",
"eph_gb",
"swap_mb",
"rxtx_factor")
class FlavorAccessAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(FlavorAccessAction, self).__init__(request, *args, **kwargs)
err_msg = _('Unable to retrieve flavor access list. '
'Please try again later.')
context = args[0]
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False)
self.fields[default_role_field_name].initial = 'member'
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
# Get list of available projects.
all_projects = []
try:
all_projects, has_more = api.keystone.tenant_list(request)
except Exception:
exceptions.handle(request, err_msg)
projects_list = [(project.id, project.name)
for project in all_projects]
self.fields[field_name].choices = projects_list
# If we have a POST from the CreateFlavor workflow, the flavor id
# isn't an existing flavor. For the UpdateFlavor case, we don't care
# about the access list for the current flavor anymore as we're about
# to replace it.
if request.method == 'POST':
return
# Get list of flavor projects if the flavor is not public.
flavor = context.get('flavor')
flavor_access = []
try:
if flavor and not flavor.is_public:
flavor_access = [project.tenant_id for project in
context['current_flavor_access']]
except Exception:
exceptions.handle(request, err_msg)
self.fields[field_name].initial = flavor_access
class Meta(object):
name = _("Flavor Access")
slug = "flavor_access"
class FlavorAccess(workflows.UpdateMembersStep):
action_class = FlavorAccessAction
help_text = _("Select the projects where the flavors will be used. If no "
"projects are selected, then the flavor will be available "
"in all projects.")
available_list_title = _("All Projects")
members_list_title = _("Selected Projects")
no_available_text = _("No projects found.")
no_members_text = _("No projects selected. "
"All projects can use the flavor.")
show_roles = False
def contribute(self, data, context):
if data:
member_field_name = self.get_member_field_name('member')
context['flavor_access'] = data.get(member_field_name, [])
return context
class CreateFlavorAccess(FlavorAccess):
contributes = ("flavor_access",)
class UpdateFlavorAccess(FlavorAccess):
depends_on = ("flavor", "current_flavor_access")
contributes = ("flavor_access",)
class CreateFlavor(workflows.Workflow):
slug = "create_flavor"
name = _("Create Flavor")
finalize_button_name = _("Create Flavor")
success_message = _('Created new flavor "%s".')
failure_message = _('Unable to create flavor "%s".')
success_url = "horizon:admin:flavors:index"
default_steps = (CreateFlavorInfo,
CreateFlavorAccess)
def format_status_message(self, message):
return message % self.context['name']
def handle(self, request, data):
flavor_id = data.get('flavor_id') or 'auto'
swap = data.get('swap_mb') or 0
ephemeral = data.get('eph_gb') or 0
flavor_access = data['flavor_access']
is_public = not flavor_access
rxtx_factor = data.get('rxtx_factor') or 1
# Create the flavor
try:
self.object = api.nova.flavor_create(request,
name=data['name'],
memory=data['memory_mb'],
vcpu=data['vcpus'],
disk=data['disk_gb'],
ephemeral=ephemeral,
swap=swap,
flavorid=flavor_id,
is_public=is_public,
rxtx_factor=rxtx_factor)
except Exception:
exceptions.handle(request, _('Unable to create flavor.'))
return False
# Update flavor access if the new flavor is not public
flavor_id = self.object.id
for project in flavor_access:
try:
api.nova.add_tenant_to_flavor(
request, flavor_id, project)
except Exception:
exceptions.handle(
request,
_('Unable to set flavor access for project %s.') % project)
return True
class UpdateFlavor(workflows.Workflow):
slug = "update_flavor"
name = _("Edit Flavor")
finalize_button_name = _("Save")
success_message = _('Modified flavor access of "%s".')
failure_message = _('Unable to modify flavor access of "%s".')
success_url = "horizon:admin:flavors:index"
default_steps = (UpdateFlavorAccess,)
def format_status_message(self, message):
return message % self.context['flavor'].name
def handle(self, request, data):
flavor_projects = data["flavor_access"]
flavor = self.context['flavor']
# Check if the flavor info is not actually changed
try:
if flavor.is_public:
old_flavor_projects = []
else:
old_flavor_projects = [project.tenant_id for project in
self.context['current_flavor_access']]
to_remove = [project for project in old_flavor_projects if project
not in flavor_projects]
to_add = [project for project in flavor_projects if project not in
old_flavor_projects]
for project in to_remove:
api.nova.remove_tenant_from_flavor(request,
flavor.id,
project)
for project in to_add:
api.nova.add_tenant_to_flavor(request,
flavor.id,
project)
return True
except Exception:
# Error message will be shown by the workflow view.
return False
|
|
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[vr2]')
# Python
from itertools import izip
# Scientific
import pandas as pd
import numpy as np
from numpy.linalg import svd
#from numba import autojit
# HotSpotter
from hscom import helpers
def score_chipmatch_csum(chipmatch):
(_, cx2_fs, _) = chipmatch
cx2_score = np.array([np.sum(fs) for fs in cx2_fs])
return cx2_score
def score_chipmatch_nsum(hs, qcx, chipmatch, qdat):
raise NotImplementedError('nsum')
def score_chipmatch_nunique(hs, qcx, chipmatch, qdat):
raise NotImplementedError('nunique')
def enforce_one_name(hs, cx2_score, chipmatch=None, cx2_chipscore=None):
'this is a hack to make the same name only show up once in the top ranked list'
if chipmatch is not None:
(_, cx2_fs, _) = chipmatch
cx2_chipscore = np.array([np.sum(fs) for fs in cx2_fs])
nx2_cxs = hs.get_nx2_cxs()
cx2_score = np.array(cx2_score)
for nx, cxs in enumerate(nx2_cxs):
if len(cxs) < 2 or nx <= 1:
continue
#print(cxs)
# zero the cxs with the lowest csum score
sortx = cx2_chipscore[cxs].argsort()
cxs_to_zero = np.array(cxs)[sortx[0:-1]]
cx2_score[cxs_to_zero] = 0
return cx2_score
def score_chipmatch_pos(hs, qcx, chipmatch, qdat, rule='borda'):
(cx2_fm, cx2_fs, cx2_fk) = chipmatch
K = qdat.cfg.nn_cfg.K
isWeighted = qdat.cfg.agg_cfg.isWeighted
# Create voting vectors of top K utilities
qfx2_utilities = _chipmatch2_utilities(hs, qcx, chipmatch, K)
# Run Positional Scoring Rule
altx2_score, altx2_tnx = positional_scoring_rule(qfx2_utilities, rule, isWeighted)
# Map alternatives back to chips/names
cx2_score, nx2_score = get_scores_from_altx2_score(hs, qcx, altx2_score, altx2_tnx)
# HACK HACK HACK!!!
#cx2_score = enforce_one_name_per_cscore(hs, cx2_score, chipmatch)
return cx2_score, nx2_score
# chipmatch = qcx2_chipmatch[qcx]
def score_chipmatch_PL(hs, qcx, chipmatch, qdat):
K = qdat.cfg.nn_cfg.K
max_alts = qdat.cfg.agg_cfg.max_alts
isWeighted = qdat.cfg.agg_cfg.isWeighted
# Create voting vectors of top K utilities
qfx2_utilities = _chipmatch2_utilities(hs, qcx, chipmatch, K)
qfx2_utilities = _filter_utilities(qfx2_utilities, max_alts)
# Run Placket Luce Model
# 1) create placket luce matrix pairwise matrix
if isWeighted:
PL_matrix, altx2_tnx = _utilities2_weighted_pairwise_breaking(qfx2_utilities)
else:
PL_matrix, altx2_tnx = _utilities2_pairwise_breaking(qfx2_utilities)
# 2) find the gamma vector which minimizes || Pl * gamma || s.t. gamma > 0
gamma = _optimize(PL_matrix)
# Find the probability each alternative is #1
altx2_prob = _PL_score(gamma)
#print('[vote] gamma = %r' % gamma)
#print('[vote] altx2_prob = %r' % altx2_prob)
# Use probabilities as scores
cx2_score, nx2_score = get_scores_from_altx2_score(hs, qcx, altx2_prob, altx2_tnx)
# HACK HACK HACK!!!
#cx2_score = enforce_one_name_per_cscore(hs, cx2_score, chipmatch)
return cx2_score, nx2_score
TMP = []
def _optimize(M):
global TMP
#print('[vote] optimize')
if M.size == 0:
return np.array([])
(u, s, v) = svd(M)
x = np.abs(v[-1])
check = np.abs(M.dot(x)) < 1E-9
if not all(check):
raise Exception('SVD method failed miserably')
#tmp1 = []
#tmp1 += [('[vote] x=%r' % x)]
#tmp1 += [('[vote] M.dot(x).sum() = %r' % M.dot(x).sum())]
#tmp1 += [('[vote] M.dot(np.abs(x)).sum() = %r' % M.dot(np.abs(x)).sum())]
#TMP += [tmp1]
return x
def _PL_score(gamma):
#print('[vote] computing probabilities')
nAlts = len(gamma)
altx2_prob = np.zeros(nAlts)
for ax in xrange(nAlts):
altx2_prob[ax] = gamma[ax] / np.sum(gamma)
#print('[vote] altx2_prob: '+str(altx2_prob))
#print('[vote] sum(prob): '+str(sum(altx2_prob)))
return altx2_prob
def get_scores_from_altx2_score(hs, qcx, altx2_prob, altx2_tnx):
nx2_score = np.zeros(len(hs.tables.nx2_name))
cx2_score = np.zeros(len(hs.tables.cx2_cid) + 1)
nx2_cxs = hs.get_nx2_cxs()
for altx, prob in enumerate(altx2_prob):
tnx = altx2_tnx[altx]
if tnx < 0: # account for temporary names
cx2_score[-tnx] = prob
nx2_score[1] += prob
else:
nx2_score[tnx] = prob
for cx in nx2_cxs[tnx]:
if cx == qcx:
continue
cx2_score[cx] = prob
return cx2_score, nx2_score
def _chipmatch2_utilities(hs, qcx, chipmatch, K):
'''
returns qfx2_utilities
fx1 : [(cx_0, tnx_0, fs_0, fk_0), ..., (cx_m, tnx_m, fs_m, fk_m)]
fx2 : [(cx_0, tnx_0, fs_0, fk_0), ..., (cx_m, tnx_m, fs_m, fk_m)]
...
fxN : [(cx_0, tnx_0, fs_0, fk_0), ..., (cx_m, tnx_m, fs_m, fk_m)]
'''
#print('[vote] computing utilities')
cx2_nx = hs.tables.cx2_nx
nQFeats = len(hs.feats.cx2_kpts[qcx])
# Stack the feature matches
(cx2_fm, cx2_fs, cx2_fk) = chipmatch
cxs = np.hstack([[cx] * len(cx2_fm[cx]) for cx in xrange(len(cx2_fm))])
cxs = np.array(cxs, np.int)
fms = np.vstack(cx2_fm)
# Get the individual feature match lists
qfxs = fms[:, 0]
fss = np.hstack(cx2_fs)
fks = np.hstack(cx2_fk)
qfx2_utilities = [[] for _ in xrange(nQFeats)]
for cx, qfx, fk, fs in izip(cxs, qfxs, fks, fss):
nx = cx2_nx[cx]
# Apply temporary uniquish name
tnx = nx if nx >= 2 else -cx
utility = (cx, tnx, fs, fk)
qfx2_utilities[qfx].append(utility)
for qfx in xrange(len(qfx2_utilities)):
utilities = qfx2_utilities[qfx]
utilities = sorted(utilities, key=lambda tup: tup[3])
qfx2_utilities[qfx] = utilities
return qfx2_utilities
def _filter_utilities(qfx2_utilities, max_alts=200):
print('[vote] filtering utilities')
tnxs = [util[1] for utils in qfx2_utilities for util in utils]
if len(tnxs) == 0:
return qfx2_utilities
tnxs = np.array(tnxs)
tnxs_min = tnxs.min()
tnx2_freq = np.bincount(tnxs - tnxs_min)
nAlts = (tnx2_freq > 0).sum()
nRemove = max(0, nAlts - max_alts)
print(' * removing %r/%r alternatives' % (nRemove, nAlts))
if nRemove > 0: # remove least frequent names
most_freq_tnxs = tnx2_freq.argsort()[::-1] + tnxs_min
keep_tnxs = set(most_freq_tnxs[0:max_alts].tolist())
for qfx in xrange(len(qfx2_utilities)):
utils = qfx2_utilities[qfx]
qfx2_utilities[qfx] = [util for util in utils if util[1] in keep_tnxs]
return qfx2_utilities
def _utilities2_pairwise_breaking(qfx2_utilities):
print('[vote] building pairwise matrix')
hstack = np.hstack
cartesian = helpers.cartesian
tnxs = [util[1] for utils in qfx2_utilities for util in utils]
altx2_tnx = pd.unique(tnxs)
tnx2_altx = {nx: altx for altx, nx in enumerate(altx2_tnx)}
nUtilities = len(qfx2_utilities)
nAlts = len(altx2_tnx)
altxs = np.arange(nAlts)
pairwise_mat = np.zeros((nAlts, nAlts))
qfx2_porder = [np.array([tnx2_altx[util[1]] for util in utils])
for utils in qfx2_utilities]
def sum_win(ij): # pairiwse wins on off-diagonal
pairwise_mat[ij[0], ij[1]] += 1
def sum_loss(ij): # pairiwse wins on off-diagonal
pairwise_mat[ij[1], ij[1]] -= 1
nVoters = 0
for qfx in xrange(nUtilities):
# partial and compliment order over alternatives
porder = pd.unique(qfx2_porder[qfx])
nReport = len(porder)
if nReport == 0:
continue
#sys.stdout.write('.')
corder = np.setdiff1d(altxs, porder)
# pairwise winners and losers
pw_winners = [porder[r:r + 1] for r in xrange(nReport)]
pw_losers = [hstack((corder, porder[r + 1:])) for r in xrange(nReport)]
pw_iter = izip(pw_winners, pw_losers)
pw_votes_ = [cartesian((winner, losers)) for winner, losers in pw_iter]
pw_votes = np.vstack(pw_votes_)
#pw_votes = [(w,l) for votes in pw_votes_ for w,l in votes if w != l]
map(sum_win, iter(pw_votes))
map(sum_loss, iter(pw_votes))
nVoters += 1
#print('')
PLmatrix = pairwise_mat / nVoters
# sum(0) gives you the sum over rows, which is summing each column
# Basically a column stochastic matrix should have
# M.sum(0) = 0
#print('CheckMat = %r ' % all(np.abs(PLmatrix.sum(0)) < 1E-9))
return PLmatrix, altx2_tnx
def _get_alts_from_utilities(qfx2_utilities):
# get temp name indexes
tnxs = [util[1] for utils in qfx2_utilities for util in utils]
altx2_tnx = pd.unique(tnxs)
tnx2_altx = {nx: altx for altx, nx in enumerate(altx2_tnx)}
nUtilities = len(qfx2_utilities)
nAlts = len(altx2_tnx)
altxs = np.arange(nAlts)
return tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs
def _utilities2_weighted_pairwise_breaking(qfx2_utilities):
print('[vote] building pairwise matrix')
tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs = _get_alts_from_utilities(qfx2_utilities)
pairwise_mat = np.zeros((nAlts, nAlts))
# agent to alternative vote vectors
qfx2_porder = [np.array([tnx2_altx[util[1]] for util in utils]) for utils in qfx2_utilities]
# agent to alternative weight/utility vectors
qfx2_worder = [np.array([util[2] for util in utils]) for utils in qfx2_utilities]
nVoters = 0
for qfx in xrange(nUtilities):
# partial and compliment order over alternatives
porder = qfx2_porder[qfx]
worder = qfx2_worder[qfx]
_, idx = np.unique(porder, return_inverse=True)
idx = np.sort(idx)
porder = porder[idx]
worder = worder[idx]
nReport = len(porder)
if nReport == 0:
continue
#sys.stdout.write('.')
corder = np.setdiff1d(altxs, porder)
nUnreport = len(corder)
# pairwise winners and losers
for r_win in xrange(0, nReport):
# for each prefered alternative
i = porder[r_win]
wi = worder[r_win]
# count the reported victories: i > j
for r_lose in xrange(r_win + 1, nReport):
j = porder[r_lose]
#wj = worder[r_lose]
#w = wi - wj
w = wi
pairwise_mat[i, j] += w
pairwise_mat[j, j] -= w
# count the un-reported victories: i > j
for r_lose in xrange(nUnreport):
j = corder[r_lose]
#wj = 0
#w = wi - wj
w = wi
pairwise_mat[i, j] += w
pairwise_mat[j, j] -= w
nVoters += wi
#print('')
PLmatrix = pairwise_mat / nVoters
# sum(0) gives you the sum over rows, which is summing each column
# Basically a column stochastic matrix should have
# M.sum(0) = 0
#print('CheckMat = %r ' % all(np.abs(PLmatrix.sum(0)) < 1E-9))
return PLmatrix, altx2_tnx
# Positional Scoring Rules
def positional_scoring_rule(qfx2_utilities, rule, isWeighted):
tnxs, altx2_tnx, tnx2_altx, nUtilities, nAlts, altxs = _get_alts_from_utilities(qfx2_utilities)
# agent to alternative vote vectors
qfx2_porder = [np.array([tnx2_altx[util[1]] for util in utils]) for utils in qfx2_utilities]
# agent to alternative weight/utility vectors
if isWeighted:
qfx2_worder = [np.array([util[2] for util in utils]) for utils in qfx2_utilities]
else:
qfx2_worder = [np.array([ 1.0 for util in utils]) for utils in qfx2_utilities]
K = max(map(len, qfx2_utilities))
if rule == 'borda':
score_vec = np.arange(0, K)[::-1] + 1
if rule == 'plurality':
score_vec = np.zeros(K)
score_vec[0] = 1
if rule == 'topk':
score_vec = np.ones(K)
score_vec = np.array(score_vec, dtype=np.int)
#print('----')
#title = 'Rule=%s Weighted=%r ' % (rule, not qfx2_weight is None)
#print('[vote] ' + title)
#print('[vote] score_vec = %r' % (score_vec,))
altx2_score = _positional_score(altxs, score_vec, qfx2_porder, qfx2_worder)
#ranked_candiates = alt_score.argsort()[::-1]
#ranked_scores = alt_score[ranked_candiates]
#viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum)
return altx2_score, altx2_tnx
def _positional_score(altxs, score_vec, qfx2_porder, qfx2_worder):
nAlts = len(altxs)
altx2_score = np.zeros(nAlts)
# For each voter
for qfx in xrange(len(qfx2_porder)):
partial_order = qfx2_porder[qfx]
weights = qfx2_worder[qfx]
# Loop over the ranked alternatives applying positional/meta weight
for ix, altx in enumerate(partial_order):
#if altx == -1: continue
altx2_score[altx] += weights[ix] * score_vec[ix]
return altx2_score
|
|
# This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore.
"""
import re
from beets import util
from datetime import datetime, timedelta
class Query(object):
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
Return a clause string, a sequence of substitution values for
the clause, and a Query object representing the "remainder"
Returns (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
"""
return None, ()
def match(self, item):
"""Check whether this query matches a given Item. Can be used to
perform queries on arbitrary sets of Items.
"""
raise NotImplementedError
class FieldQuery(Query):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
self.fast = fast
def col_clause(self):
return None, ()
def clause(self):
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings.
"""
raise NotImplementedError()
def match(self, item):
return self.value_match(self.pattern, item.get(self.field))
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@classmethod
def value_match(cls, pattern, value):
return pattern == value
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(cls, pattern, value):
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError()
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
.replace('%', '\\%')
.replace('_', '\\_'))
search = '%' + pattern + '%'
clause = self.field + " like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern, value):
return pattern.lower() in value.lower()
class RegexpQuery(StringFieldQuery):
"""A query that matches a regular expression in a specific item
field.
"""
@classmethod
def string_match(cls, pattern, value):
try:
res = re.search(pattern, value)
except re.error:
# Invalid regular expression.
return False
return res is not None
class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super(BooleanQuery, self).__init__(field, pattern, fast)
if isinstance(pattern, basestring):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
class BytesQuery(MatchQuery):
"""Match a raw bytes field (i.e., a path). This is a necessary hack
to work around the `sqlite3` module's desire to treat `str` and
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super(BytesQuery, self).__init__(field, pattern)
# Use a buffer representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
if isinstance(self.pattern, basestring):
# Implicitly coerce Unicode strings to their bytes
# equivalents.
if isinstance(self.pattern, unicode):
self.pattern = self.pattern.encode('utf8')
self.buf_pattern = buffer(self.pattern)
elif isinstance(self.pattern, buffer):
self.buf_pattern = self.pattern
self.pattern = bytes(self.pattern)
def col_clause(self):
return self.field + " = ?", [self.buf_pattern]
class NumericQuery(FieldQuery):
"""Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century.
"""
def _convert(self, s):
"""Convert a string to a numeric type (float or int). If the
string cannot be converted, return None.
"""
# This is really just a bit of fun premature optimization.
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return None
def __init__(self, field, pattern, fast=True):
super(NumericQuery, self).__init__(field, pattern, fast)
parts = pattern.split('..', 1)
if len(parts) == 1:
# No range.
self.point = self._convert(parts[0])
self.rangemin = None
self.rangemax = None
else:
# One- or two-sided range.
self.point = None
self.rangemin = self._convert(parts[0])
self.rangemax = self._convert(parts[1])
def match(self, item):
value = getattr(item, self.field)
if isinstance(value, basestring):
value = self._convert(value)
if self.point is not None:
return value == self.point
else:
if self.rangemin is not None and value < self.rangemin:
return False
if self.rangemax is not None and value > self.rangemax:
return False
return True
def col_clause(self):
if self.point is not None:
return self.field + '=?', (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return (u'{0} >= ? AND {0} <= ?'.format(self.field),
(self.rangemin, self.rangemax))
elif self.rangemin is not None:
return u'{0} >= ?'.format(self.field), (self.rangemin,)
elif self.rangemax is not None:
return u'{0} <= ?'.format(self.field), (self.rangemax,)
else:
return '1', ()
class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
def __init__(self, subqueries=()):
self.subqueries = subqueries
# Act like a sequence.
def __len__(self):
return len(self.subqueries)
def __getitem__(self, key):
return self.subqueries[key]
def __iter__(self):
return iter(self.subqueries)
def __contains__(self, item):
return item in self.subqueries
def clause_with_joiner(self, joiner):
"""Returns a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
clause_parts = []
subvals = []
for subq in self.subqueries:
subq_clause, subq_subvals = subq.clause()
if not subq_clause:
# Fall back to slow query.
return None, ()
clause_parts.append('(' + subq_clause + ')')
subvals += subq_subvals
clause = (' ' + joiner + ' ').join(clause_parts)
return clause, subvals
class AnyFieldQuery(CollectionQuery):
"""A query that matches if a given FieldQuery subclass matches in
any field. The individual field query class is provided to the
constructor.
"""
def __init__(self, pattern, fields, cls):
self.pattern = pattern
self.fields = fields
self.query_class = cls
subqueries = []
for field in self.fields:
subqueries.append(cls(field, pattern, True))
super(AnyFieldQuery, self).__init__(subqueries)
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
for subq in self.subqueries:
if subq.match(item):
return True
return False
class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
def __setitem__(self, key, value):
self.subqueries[key] = value
def __delitem__(self, key):
del self.subqueries[key]
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('and')
def match(self, item):
return all([q.match(item) for q in self.subqueries])
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('or')
def match(self, item):
return any([q.match(item) for q in self.subqueries])
class TrueQuery(Query):
"""A query that always matches."""
def clause(self):
return '1', ()
def match(self, item):
return True
class FalseQuery(Query):
"""A query that never matches."""
def clause(self):
return '0', ()
def match(self, item):
return False
# Time/date queries.
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
epoch = datetime.fromtimestamp(0)
delta = date - epoch
try:
return int(delta.total_seconds())
except AttributeError:
# datetime.timedelta.total_seconds() is not available on Python 2.6
return delta.seconds + delta.days * 24 * 3600
def _parse_periods(pattern):
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
parts = pattern.split('..', 1)
if len(parts) == 1:
instant = Period.parse(parts[0])
return (instant, instant)
else:
start = Period.parse(parts[0])
end = Period.parse(parts[1])
return (start, end)
class Period(object):
"""A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all
instants of time during January 2014.
"""
precisions = ('year', 'month', 'day')
date_formats = ('%Y', '%Y-%m', '%Y-%m-%d')
def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", or "day").
"""
if precision not in Period.precisions:
raise ValueError('Invalid precision ' + str(precision))
self.date = date
self.precision = precision
@classmethod
def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the
string is empty.
"""
if not string:
return None
ordinal = string.count('-')
if ordinal >= len(cls.date_formats):
raise ValueError('date is not in one of the formats '
+ ', '.join(cls.date_formats))
date_format = cls.date_formats[ordinal]
date = datetime.strptime(string, date_format)
precision = cls.precisions[ordinal]
return cls(date, precision)
def open_right_endpoint(self):
"""Based on the precision, convert the period to a precise
`datetime` for use as a right endpoint in a right-open interval.
"""
precision = self.precision
date = self.date
if 'year' == self.precision:
return date.replace(year=date.year + 1, month=1)
elif 'month' == precision:
if (date.month < 12):
return date.replace(month=date.month + 1)
else:
return date.replace(year=date.year + 1, month=1)
elif 'day' == precision:
return date + timedelta(days=1)
else:
raise ValueError('unhandled precision ' + str(precision))
class DateInterval(object):
"""A closed-open interval of dates.
A left endpoint of None means since the beginning of time.
A right endpoint of None means towards infinity.
"""
def __init__(self, start, end):
if start is not None and end is not None and not start < end:
raise ValueError("start date {0} is not before end date {1}"
.format(start, end))
self.start = start
self.end = end
@classmethod
def from_periods(cls, start, end):
"""Create an interval with two Periods as the endpoints.
"""
end_date = end.open_right_endpoint() if end is not None else None
start_date = start.date if start is not None else None
return cls(start_date, end_date)
def contains(self, date):
if self.start is not None and date < self.start:
return False
if self.end is not None and date >= self.end:
return False
return True
def __str__(self):
return'[{0}, {1})'.format(self.start, self.end)
class DateQuery(FieldQuery):
"""Matches date fields stored as seconds since Unix epoch time.
Dates can be specified as ``year-month-day`` strings where only year
is mandatory.
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field, pattern, fast=True):
super(DateQuery, self).__init__(field, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, item):
timestamp = float(item[self.field])
date = datetime.utcfromtimestamp(timestamp)
return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?"
def col_clause(self):
clause_parts = []
subvals = []
if self.interval.start:
clause_parts.append(self._clause_tmpl.format(self.field, ">="))
subvals.append(_to_epoch_time(self.interval.start))
if self.interval.end:
clause_parts.append(self._clause_tmpl.format(self.field, "<"))
subvals.append(_to_epoch_time(self.interval.end))
if clause_parts:
# One- or two-sided interval.
clause = ' AND '.join(clause_parts)
else:
# Match any date.
clause = '1'
return clause, subvals
|
|
from django import forms
import django
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.template.context import RequestContext
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_unicode(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_unicode(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
self.context(context)
return loader.render_to_string(self.template, context, context_instance=RequestContext(self.request))
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return django.apps.apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_unicode(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return django.apps.apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception, e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_unicode(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An extremely simple WSGI web application framework.
This module exports three primary classes: Request, Response, and
RequestHandler. You implement a web application by subclassing RequestHandler.
As WSGI requests come in, they are passed to instances of your RequestHandlers.
The RequestHandler class provides access to the easy-to-use Request and
Response objects so you can interpret the request and write the response with
no knowledge of the esoteric WSGI semantics. Here is a simple example:
from google.appengine.ext import webapp
import wsgiref.simple_server
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write(
'<html><body><form action="/hello" method="post">'
'Name: <input name="name" type="text" size="20"> '
'<input type="submit" value="Say Hello"></form></body></html>')
class HelloPage(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, %s' % self.request.get('name'))
application = webapp.WSGIApplication([
('/', MainPage),
('/hello', HelloPage)
], debug=True)
server = wsgiref.simple_server.make_server('', 8080, application)
print 'Serving on port 8080...'
server.serve_forever()
The WSGIApplication class maps URI regular expressions to your RequestHandler
classes. It is a WSGI-compatible application object, so you can use it in
conjunction with wsgiref to make your web application into, e.g., a CGI
script or a simple HTTP server, as in the example above.
The framework does not support streaming output. All output from a response
is stored in memory before it is written.
"""
import cgi
import StringIO
import logging
import re
import sys
import traceback
import urlparse
import webob
import wsgiref.handlers
import wsgiref.headers
import wsgiref.util
wsgiref.handlers.BaseHandler.os_environ = {}
RE_FIND_GROUPS = re.compile('\(.*?\)')
_CHARSET_RE = re.compile(r';\s*charset=([^;\s]*)', re.I)
class Error(Exception):
"""Base of all exceptions in the webapp module."""
pass
class NoUrlFoundError(Error):
"""Thrown when RequestHandler.get_url() fails."""
pass
class Request(webob.Request):
"""Abstraction for an HTTP request.
Properties:
uri: the complete URI requested by the user
scheme: 'http' or 'https'
host: the host, including the port
path: the path up to the ';' or '?' in the URL
parameters: the part of the URL between the ';' and the '?', if any
query: the part of the URL after the '?'
You can access parsed query and POST values with the get() method; do not
parse the query string yourself.
"""
request_body_tempfile_limit = 0
uri = property(lambda self: self.url)
query = property(lambda self: self.query_string)
def __init__(self, environ):
"""Constructs a Request object from a WSGI environment.
If the charset isn't specified in the Content-Type header, defaults
to UTF-8.
Args:
environ: A WSGI-compliant environment dictionary.
"""
match = _CHARSET_RE.search(environ.get('CONTENT_TYPE', ''))
if match:
charset = match.group(1).lower()
else:
charset = 'utf-8'
webob.Request.__init__(self, environ, charset=charset,
unicode_errors= 'ignore', decode_param_names=True)
def get(self, argument_name, default_value='', allow_multiple=False):
"""Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
default_value: the value to return if the given argument is not present
allow_multiple: return a list of values with the given name (deprecated)
Returns:
If allow_multiple is False (which it is by default), we return the first
value with the given name given in the request. If it is True, we always
return an list.
"""
param_value = self.get_all(argument_name)
if allow_multiple:
return param_value
else:
if len(param_value) > 0:
return param_value[0]
else:
return default_value
def get_all(self, argument_name):
"""Returns a list of query or POST arguments with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
Args:
argument_name: the name of the query or POST argument
Returns:
A (possibly empty) list of values.
"""
if self.charset:
argument_name = argument_name.encode(self.charset)
param_value = self.params.getall(argument_name)
for i in xrange(len(param_value)):
if isinstance(param_value[i], cgi.FieldStorage):
param_value[i] = param_value[i].value
return param_value
def arguments(self):
"""Returns a list of the arguments provided in the query and/or POST.
The return value is a list of strings.
"""
return list(set(self.params.keys()))
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
Args:
name: the name of the argument
min_value: the minimum int value of the argument (if any)
max_value: the maximum int value of the argument (if any)
default: the default value of the argument if it is not given
Returns:
An int within the given range for the argument
"""
try:
value = int(self.get(name, default))
except ValueError:
value = default
if max_value != None:
value = min(value, max_value)
if min_value != None:
value = max(value, min_value)
return value
class Response(object):
"""Abstraction for an HTTP response.
Properties:
out: file pointer for the output stream
headers: wsgiref.headers.Headers instance representing the output headers
"""
def __init__(self):
"""Constructs a response with the default settings."""
self.out = StringIO.StringIO()
self.__wsgi_headers = []
self.headers = wsgiref.headers.Headers(self.__wsgi_headers)
self.headers['Content-Type'] = 'text/html; charset=utf-8'
self.headers['Cache-Control'] = 'no-cache'
self.set_status(200)
def set_status(self, code, message=None):
"""Sets the HTTP status code of this response.
Args:
message: the HTTP status string to use
If no status string is given, we use the default from the HTTP/1.1
specification.
"""
if not message:
message = Response.http_status_message(code)
self.__status = (code, message)
def clear(self):
"""Clears all data written to the output stream so that it is empty."""
self.out.seek(0)
self.out.truncate(0)
def wsgi_write(self, start_response):
"""Writes this response using WSGI semantics with the given WSGI function.
Args:
start_response: the WSGI-compatible start_response function
"""
body = self.out.getvalue()
if isinstance(body, unicode):
body = body.encode('utf-8')
elif self.headers.get('Content-Type', '').endswith('; charset=utf-8'):
try:
body.decode('utf-8')
except UnicodeError, e:
logging.warning('Response written is not UTF-8: %s', e)
if (self.headers.get('Cache-Control') == 'no-cache' and
not self.headers.get('Expires')):
self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
self.headers['Content-Length'] = str(len(body))
write = start_response('%d %s' % self.__status, self.__wsgi_headers)
write(body)
self.out.close()
def http_status_message(code):
"""Returns the default HTTP status message for the given code.
Args:
code: the HTTP code for which we want a message
"""
if not Response.__HTTP_STATUS_MESSAGES.has_key(code):
raise Error('Invalid HTTP status code: %d' % code)
return Response.__HTTP_STATUS_MESSAGES[code]
http_status_message = staticmethod(http_status_message)
__HTTP_STATUS_MESSAGES = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Moved Temporarily',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: 'Unused',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Time-out',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Large',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Time-out',
505: 'HTTP Version not supported'
}
class RequestHandler(object):
"""Our base HTTP request handler. Clients should subclass this class.
Subclasses should override get(), post(), head(), options(), etc to handle
different HTTP methods.
"""
def initialize(self, request, response):
"""Initializes this request handler with the given Request and Response."""
self.request = request
self.response = response
def get(self, *args):
"""Handler method for GET requests."""
self.error(405)
def post(self, *args):
"""Handler method for POST requests."""
self.error(405)
def head(self, *args):
"""Handler method for HEAD requests."""
self.error(405)
def options(self, *args):
"""Handler method for OPTIONS requests."""
self.error(405)
def put(self, *args):
"""Handler method for PUT requests."""
self.error(405)
def delete(self, *args):
"""Handler method for DELETE requests."""
self.error(405)
def trace(self, *args):
"""Handler method for TRACE requests."""
self.error(405)
def error(self, code):
"""Clears the response output stream and sets the given HTTP error code.
Args:
code: the HTTP status error code (e.g., 501)
"""
self.response.set_status(code)
self.response.clear()
def redirect(self, uri, permanent=False):
"""Issues an HTTP redirect to the given relative URL.
Args:
uri: a relative or absolute URI (e.g., '../flowers.html')
permanent: if true, we use a 301 redirect instead of a 302 redirect
"""
if permanent:
self.response.set_status(301)
else:
self.response.set_status(302)
absolute_url = urlparse.urljoin(self.request.uri, uri)
self.response.headers['Location'] = str(absolute_url)
self.response.clear()
def handle_exception(self, exception, debug_mode):
"""Called if this handler throws an exception during execution.
The default behavior is to call self.error(500) and print a stack trace
if debug_mode is True.
Args:
exception: the exception that was thrown
debug_mode: True if the web application is running in debug mode
"""
self.error(500)
logging.exception(exception)
if debug_mode:
lines = ''.join(traceback.format_exception(*sys.exc_info()))
self.response.clear()
self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
@classmethod
def get_url(cls, *args, **kargs):
"""Returns the url for the given handler.
The default implementation uses the patterns passed to the active
WSGIApplication and the django urlresolvers module to create a url.
However, it is different from urlresolvers.reverse() in the following ways:
- It does not try to resolve handlers via module loading
- It does not support named arguments
- It performs some post-prosessing on the url to remove some regex
operators that urlresolvers.reverse_helper() seems to miss.
- It will try to fill in the left-most missing arguments with the args
used in the active request.
Args:
args: Parameters for the url pattern's groups.
kwargs: Optionally contains 'implicit_args' that can either be a boolean
or a tuple. When it is True, it will use the arguments to the
active request as implicit arguments. When it is False (default),
it will not use any implicit arguments. When it is a tuple, it
will use the tuple as the implicit arguments.
the left-most args if some are missing from args.
Returns:
The url for this handler/args combination.
Raises:
NoUrlFoundError: No url pattern for this handler has the same
number of args that were passed in.
"""
app = WSGIApplication.active_instance
pattern_map = app._pattern_map
implicit_args = kargs.get('implicit_args', ())
if implicit_args == True:
implicit_args = app.current_request_args
min_params = len(args)
urlresolvers = None
for pattern_tuple in pattern_map.get(cls, ()):
num_params_in_pattern = pattern_tuple[1]
if num_params_in_pattern < min_params:
continue
if urlresolvers is None:
from django.core import urlresolvers
try:
num_implicit_args = max(0, num_params_in_pattern - len(args))
merged_args = implicit_args[:num_implicit_args] + args
url = urlresolvers.reverse_helper(pattern_tuple[0], *merged_args)
url = url.replace('\\', '')
url = url.replace('?', '')
return url
except urlresolvers.NoReverseMatch:
continue
logging.warning('get_url failed for Handler name: %r, Args: %r',
cls.__name__, args)
raise NoUrlFoundError
class WSGIApplication(object):
"""Wraps a set of webapp RequestHandlers in a WSGI-compatible application.
To use this class, pass a list of (URI regular expression, RequestHandler)
pairs to the constructor, and pass the class instance to a WSGI handler.
See the example in the module comments for details.
The URL mapping is first-match based on the list ordering.
"""
REQUEST_CLASS = Request
RESPONSE_CLASS = Response
def __init__(self, url_mapping, debug=False):
"""Initializes this application with the given URL mapping.
Args:
url_mapping: list of (URI, RequestHandler) pairs (e.g., [('/', ReqHan)])
debug: if true, we send Python stack traces to the browser on errors
"""
self._init_url_mappings(url_mapping)
self.__debug = debug
WSGIApplication.active_instance = self
self.current_request_args = ()
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in."""
request = self.REQUEST_CLASS(environ)
response = self.RESPONSE_CLASS()
WSGIApplication.active_instance = self
handler = None
groups = ()
for regexp, handler_class in self._url_mapping:
match = regexp.match(request.path)
if match:
handler = handler_class()
handler.initialize(request, response)
groups = match.groups()
break
self.current_request_args = groups
if handler:
try:
method = environ['REQUEST_METHOD']
if method == 'GET':
handler.get(*groups)
elif method == 'POST':
handler.post(*groups)
elif method == 'HEAD':
handler.head(*groups)
elif method == 'OPTIONS':
handler.options(*groups)
elif method == 'PUT':
handler.put(*groups)
elif method == 'DELETE':
handler.delete(*groups)
elif method == 'TRACE':
handler.trace(*groups)
else:
handler.error(501)
except Exception, e:
handler.handle_exception(e, self.__debug)
else:
response.set_status(404)
response.wsgi_write(start_response)
return ['']
def _init_url_mappings(self, handler_tuples):
"""Initializes the maps needed for mapping urls to handlers and handlers
to urls.
Args:
handler_tuples: list of (URI, RequestHandler) pairs.
"""
handler_map = {}
pattern_map = {}
url_mapping = []
for regexp, handler in handler_tuples:
handler_map[handler.__name__] = handler
if not regexp.startswith('^'):
regexp = '^' + regexp
if not regexp.endswith('$'):
regexp += '$'
compiled = re.compile(regexp)
url_mapping.append((compiled, handler))
num_groups = len(RE_FIND_GROUPS.findall(regexp))
handler_patterns = pattern_map.setdefault(handler, [])
handler_patterns.append((compiled, num_groups))
self._handler_map = handler_map
self._pattern_map = pattern_map
self._url_mapping = url_mapping
def get_registered_handler_by_name(self, handler_name):
"""Returns the handler given the handler's name.
This uses the application's url mapping.
Args:
handler_name: The __name__ of a handler to return.
Returns:
The handler with the given name.
Raises:
KeyError: If the handler name is not found in the parent application.
"""
try:
return self._handler_map[handler_name]
except:
logging.error('Handler does not map to any urls: %s', handler_name)
raise
|
|
import itertools
import sys
from peewee import ModelQueryResultWrapper
from peewee import NaiveQueryResultWrapper
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import skip_test_if
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestQueryResultWrapper(ModelTestCase):
requires = [User, Blog, Comment]
def test_iteration(self):
User.create_users(10)
with self.assertQueryCount(1):
sq = User.select()
qr = sq.execute()
first_five = []
for i, u in enumerate(qr):
first_five.append(u.username)
if i == 4:
break
self.assertEqual(first_five, ['u1', 'u2', 'u3', 'u4', 'u5'])
names = lambda it: [obj.username for obj in it]
self.assertEqual(names(sq[5:]), ['u6', 'u7', 'u8', 'u9', 'u10'])
self.assertEqual(names(sq[2:5]), ['u3', 'u4', 'u5'])
another_iter = names(qr)
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
another_iter = names(qr)
self.assertEqual(another_iter, ['u%d' % i for i in range(1, 11)])
def test_count(self):
User.create_users(5)
with self.assertQueryCount(1):
query = User.select()
qr = query.execute()
self.assertEqual(qr.count, 5)
# Calling again does not incur another query.
self.assertEqual(qr.count, 5)
with self.assertQueryCount(1):
query = query.where(User.username != 'u1')
qr = query.execute()
self.assertEqual(qr.count, 4)
# Calling again does not incur another query.
self.assertEqual(qr.count, 4)
# TODO: Fix this.
#@skip_test_if(lambda: True)
def test_nested_iteration(self):
User.create_users(4)
with self.assertQueryCount(1):
sq = User.select()
outer = []
inner = []
for i_user in sq:
outer.append(i_user.username)
for o_user in sq:
inner.append(o_user.username)
self.assertEqual(outer, ['u1', 'u2', 'u3', 'u4'])
self.assertEqual(inner, ['u1', 'u2', 'u3', 'u4'] * 4)
def test_iteration_protocol(self):
User.create_users(3)
with self.assertQueryCount(1):
query = User.select().order_by(User.id)
qr = query.execute()
for _ in range(2):
for user in qr:
pass
i = iter(qr)
for obj in i:
pass
self.assertRaises(StopIteration, next, i)
self.assertEqual([u.username for u in qr], ['u1', 'u2', 'u3'])
self.assertEqual(query[0].username, 'u1')
self.assertEqual(query[2].username, 'u3')
self.assertRaises(StopIteration, next, i)
def test_iterator(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id).execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
self.assertTrue(qr._populated)
self.assertEqual(qr._result_cache, [])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
with self.assertQueryCount(1):
qr = User.select().where(User.username == 'xxx').execute()
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, [])
def test_iterator_query_method(self):
User.create_users(10)
with self.assertQueryCount(1):
qr = User.select().order_by(User.id)
usernames = [u.username for u in qr.iterator()]
self.assertEqual(usernames, ['u%d' % i for i in range(1, 11)])
with self.assertQueryCount(0):
again = [u.username for u in qr]
self.assertEqual(again, [])
def test_iterator_extended(self):
User.create_users(10)
for i in range(1, 4):
for j in range(i):
Blog.create(
title='blog-%s-%s' % (i, j),
user=User.get(User.username == 'u%s' % i))
qr = (User
.select(
User.username,
fn.Count(Blog.pk).alias('ct'))
.join(Blog)
.where(User.username << ['u1', 'u2', 'u3'])
.group_by(User)
.order_by(User.id)
.naive())
accum = []
with self.assertQueryCount(1):
for user in qr.iterator():
accum.append((user.username, user.ct))
self.assertEqual(accum, [
('u1', 1),
('u2', 2),
('u3', 3)])
qr = (User
.select(fn.Count(User.id).alias('ct'))
.group_by(User.username << ['u1', 'u2', 'u3'])
.order_by(fn.Count(User.id).desc()))
accum = []
with self.assertQueryCount(1):
for ct, in qr.tuples().iterator():
accum.append(ct)
self.assertEqual(accum, [7, 3])
def test_fill_cache(self):
def assertUsernames(qr, n):
self.assertEqual([u.username for u in qr._result_cache], ['u%d' % i for i in range(1, n+1)])
User.create_users(20)
with self.assertQueryCount(1):
qr = User.select().execute()
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# a subsequent call will not "over-fill"
qr.fill_cache(5)
self.assertFalse(qr._populated)
assertUsernames(qr, 5)
# ask for one more and ye shall receive
qr.fill_cache(6)
self.assertFalse(qr._populated)
assertUsernames(qr, 6)
qr.fill_cache(21)
self.assertTrue(qr._populated)
assertUsernames(qr, 20)
self.assertRaises(StopIteration, next, qr)
def test_select_related(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
c11 = Comment.create(blog=b1, comment='c11')
c12 = Comment.create(blog=b1, comment='c12')
c21 = Comment.create(blog=b2, comment='c21')
c22 = Comment.create(blog=b2, comment='c22')
# missing comment.blog_id
comments = (Comment
.select(Comment.id, Comment.comment, Blog.pk, Blog.title)
.join(Blog)
.where(Blog.title == 'b1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
# missing blog.pk
comments = (Comment
.select(Comment.id, Comment.comment, Comment.blog, Blog.title)
.join(Blog)
.where(Blog.title == 'b2')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.blog.title for c in comments], ['b2', 'b2'])
# both but going up 2 levels
comments = (Comment
.select(Comment, Blog, User)
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.blog.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, ModelQueryResultWrapper))
comments = (Comment
.select()
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(5):
self.assertEqual([c.blog.user.username for c in comments], ['u1', 'u1'])
self.assertTrue(isinstance(comments._qr, NaiveQueryResultWrapper))
# Go up two levels and use aliases for the joined instances.
comments = (Comment
.select(Comment, Blog, User)
.join(Blog, on=(Comment.blog == Blog.pk).alias('bx'))
.join(User, on=(Blog.user == User.id).alias('ux'))
.where(User.username == 'u1')
.order_by(Comment.id))
with self.assertQueryCount(1):
self.assertEqual([c.comment for c in comments], ['c11', 'c12'])
self.assertEqual([c.bx.title for c in comments], ['b1', 'b1'])
self.assertEqual([c.bx.ux.username for c in comments], ['u1', 'u1'])
def test_naive(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertTrue(isinstance(users._qr, NaiveQueryResultWrapper))
users = User.select(User, Blog).join(Blog).naive()
self.assertEqual([u.username for u in users], ['u1', 'u2'])
self.assertEqual([u.title for u in users], ['b1', 'b2'])
query = Blog.select(Blog, User).join(User).order_by(Blog.title).naive()
self.assertEqual(query.get().user, User.get(User.username == 'u1'))
def test_tuples_dicts(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1 = Blog.create(user=u1, title='b1')
b2 = Blog.create(user=u2, title='b2')
users = User.select().tuples().order_by(User.id)
self.assertEqual([r for r in users], [
(u1.id, 'u1'),
(u2.id, 'u2'),
])
users = User.select().dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1'},
{'id': u2.id, 'username': 'u2'},
])
users = User.select(User, Blog).join(Blog).order_by(User.id).tuples()
self.assertEqual([r for r in users], [
(u1.id, 'u1', b1.pk, u1.id, 'b1', '', None),
(u2.id, 'u2', b2.pk, u2.id, 'b2', '', None),
])
users = User.select(User, Blog).join(Blog).order_by(User.id).dicts()
self.assertEqual([r for r in users], [
{'id': u1.id, 'username': 'u1', 'pk': b1.pk, 'user': u1.id, 'title': 'b1', 'content': '', 'pub_date': None},
{'id': u2.id, 'username': 'u2', 'pk': b2.pk, 'user': u2.id, 'title': 'b2', 'content': '', 'pub_date': None},
])
def test_slicing_dicing(self):
def assertUsernames(users, nums):
self.assertEqual([u.username for u in users], ['u%d' % i for i in nums])
User.create_users(10)
with self.assertQueryCount(1):
uq = User.select().order_by(User.id)
for i in range(2):
res = uq[0]
self.assertEqual(res.username, 'u1')
with self.assertQueryCount(0):
for i in range(2):
res = uq[1]
self.assertEqual(res.username, 'u2')
with self.assertQueryCount(0):
for i in range(2):
res = uq[:3]
assertUsernames(res, [1, 2, 3])
with self.assertQueryCount(0):
for i in range(2):
res = uq[2:5]
assertUsernames(res, [3, 4, 5])
with self.assertQueryCount(0):
for i in range(2):
res = uq[5:]
assertUsernames(res, [6, 7, 8, 9, 10])
self.assertRaises(IndexError, uq.__getitem__, 10)
self.assertRaises(ValueError, uq.__getitem__, -1)
with self.assertQueryCount(0):
res = uq[10:]
self.assertEqual(res, [])
def test_indexing_fill_cache(self):
def assertUser(query_or_qr, idx):
self.assertEqual(query_or_qr[idx].username, 'u%d' % (idx + 1))
User.create_users(10)
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
# Ensure we can grab the first 5 users in 1 query.
for i in range(5):
assertUser(uq, i)
# Iterate in reverse and ensure only costs 1 query.
uq = User.select().order_by(User.id)
with self.assertQueryCount(1):
for i in reversed(range(10)):
assertUser(uq, i)
# Execute the query and get reference to result wrapper.
query = User.select().order_by(User.id)
query.execute()
qr = query._qr
# Getting the first user will populate the result cache with 1 obj.
assertUser(query, 0)
self.assertEqual(len(qr._result_cache), 1)
# Getting the last user will fill the cache.
assertUser(query, 9)
self.assertEqual(len(qr._result_cache), 10)
def test_prepared(self):
for i in range(2):
u = User.create(username='u%d' % i)
for j in range(2):
Blog.create(title='b%d-%d' % (i, j), user=u, content='')
for u in User.select():
# check prepared was called
self.assertEqual(u.foo, u.username)
for b in Blog.select(Blog, User).join(User):
# prepared is called for select-related instances
self.assertEqual(b.foo, b.title)
self.assertEqual(b.user.foo, b.user.username)
def test_aliasing_values(self):
User.create_users(2)
q = User.select(User.username.alias('xx')).order_by(User.username)
results = [row for row in q.dicts()]
self.assertEqual(results, [
{'xx': 'u1'},
{'xx': 'u2'}])
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Force ModelQueryResultWrapper.
q = (User
.select(User.username.alias('xx'), Blog.pk)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username))
results = [user.xx for user in q]
self.assertEqual(results, ['u1', 'u2'])
# Use Model and Field aliases.
UA = User.alias()
q = (User
.select(
User.username.alias('x'),
UA.username.alias('y'))
.join(UA, on=(User.id == UA.id).alias('z'))
.order_by(User.username))
results = [(user.x, user.z.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
q = q.naive()
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
uq = User.select(User.id, User.username).alias('u2')
q = (User
.select(
User.username.alias('x'),
uq.c.username.alias('y'))
.join(uq, on=(User.id == uq.c.id))
.order_by(User.username))
results = [(user.x, user.y) for user in q]
self.assertEqual(results, [('u1', 'u1'), ('u2', 'u2')])
class TestJoinedInstanceConstruction(ModelTestCase):
requires = [Blog, User, Relationship]
def setUp(self):
super(TestJoinedInstanceConstruction, self).setUp()
u1 = User.create(username='u1')
u2 = User.create(username='u2')
Blog.create(user=u1, title='b1')
Blog.create(user=u2, title='b2')
def test_fk_missing_pk(self):
# Not enough information.
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNone(blog.user.id)
self.assertIsNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_fk_with_pk(self):
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username, User.id)
.join(User)
.order_by(Blog.title, User.username))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertIsNotNone(blog.user.id)
self.assertIsNotNone(blog.user_id)
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_backref_missing_pk(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username, Blog.title))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertIsNone(user.id)
self.assertIsNone(user.blog.pk)
self.assertIsNone(user.blog.user_id)
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_fk_join_expr(self):
with self.assertQueryCount(1):
q = (User
.select(User.username, Blog.title)
.join(Blog, on=(User.id == Blog.user).alias('bx'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.bx.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (Blog
.select(Blog.title, User.username)
.join(User, on=(Blog.user == User.id).alias('ux'))
.order_by(Blog.title))
results = []
for blog in q:
results.append((blog.title, blog.ux.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
def test_aliases(self):
B = Blog.alias()
U = User.alias()
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=(U.id == B.user))
.order_by(U.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=(B.user == U.id))
.order_by(B.title))
results = []
for blog in q:
results.append((blog.title, blog.user.username))
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
# No explicit join condition.
with self.assertQueryCount(1):
q = (B.select(B.title, U.username)
.join(U, on=B.user)
.order_by(B.title))
results = [(blog.title, blog.user.username) for blog in q]
self.assertEqual(results, [('b1', 'u1'), ('b2', 'u2')])
# No explicit condition, backref.
Blog.create(user=User.get(User.username == 'u2'), title='b2-2')
with self.assertQueryCount(1):
q = (U.select(U.username, B.title)
.join(B, on=B.user)
.order_by(U.username, B.title))
results = [(user.username, user.blog.title) for user in q]
self.assertEqual(
results,
[('u1', 'b1'), ('u2', 'b2'), ('u2', 'b2-2')])
def test_subqueries(self):
uq = User.select()
bq = Blog.select(Blog.title, Blog.user).alias('bq')
with self.assertQueryCount(1):
q = (User
.select(User, bq.c.title.bind_to(Blog))
.join(bq, on=(User.id == bq.c.user_id).alias('blog'))
.order_by(User.username))
results = []
for user in q:
results.append((user.username, user.blog.title))
self.assertEqual(results, [('u1', 'b1'), ('u2', 'b2')])
def test_multiple_joins(self):
Blog.delete().execute()
User.delete().execute()
users = [User.create(username='u%s' % i) for i in range(4)]
for from_user, to_user in itertools.combinations(users, 2):
Relationship.create(from_user=from_user, to_user=to_user)
with self.assertQueryCount(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User, on=Relationship.from_user)
.switch(Relationship)
.join(ToUser, on=Relationship.to_user)
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) for r in q]
self.assertEqual(results, [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
])
with self.assertQueryCount(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User,
on=(Relationship.from_user == User.id))
.switch(Relationship)
.join(ToUser,
on=(Relationship.to_user == ToUser.id).alias('to_user'))
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) for r in q]
self.assertEqual(results, [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
])
class TestQueryResultTypeConversion(ModelTestCase):
requires = [User]
def setUp(self):
super(TestQueryResultTypeConversion, self).setUp()
for i in range(3):
User.create(username='u%d' % i)
def assertNames(self, query, expected, attr='username'):
id_field = query.model_class.id
self.assertEqual(
[getattr(item, attr) for item in query.order_by(id_field)],
expected)
def test_simple_select(self):
query = UpperUser.select()
self.assertNames(query, ['U0', 'U1', 'U2'])
query = User.select()
self.assertNames(query, ['u0', 'u1', 'u2'])
def test_with_alias(self):
# Even when aliased to a different attr, the column is coerced.
query = UpperUser.select(UpperUser.username.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
def test_scalar(self):
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar(convert=True))
self.assertEqual(max_username, 'U2')
max_username = (UpperUser
.select(fn.Max(UpperUser.username))
.scalar())
self.assertEqual(max_username, 'u2')
def test_function(self):
substr = fn.SubStr(UpperUser.username, 1, 3)
# Being the first parameter of the function, it meets the special-case
# criteria.
query = UpperUser.select(substr.alias('foo'))
self.assertNames(query, ['U0', 'U1', 'U2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('username'))
self.assertNames(query, ['u0', 'u1', 'u2'])
query = UpperUser.select(fn.Lower(UpperUser.username).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.Lower(UpperUser.username).alias('username').coerce(False))
self.assertNames(query, ['u0', 'u1', 'u2'])
# Since it is aliased to an existing column, we will use that column's
# coerce.
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('username'))
self.assertNames(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('foo'))
self.assertNames(query, ['u0', 'u1', 'u2'], 'foo')
class TestModelQueryResultWrapper(ModelTestCase):
requires = [TestModelA, TestModelB, TestModelC, User, Blog]
data = (
(TestModelA, (
('pk1', 'a1'),
('pk2', 'a2'),
('pk3', 'a3'))),
(TestModelB, (
('pk1', 'b1'),
('pk2', 'b2'),
('pk3', 'b3'))),
(TestModelC, (
('pk1', 'c1'),
('pk2', 'c2'))),
)
def setUp(self):
super(TestModelQueryResultWrapper, self).setUp()
for model_class, model_data in self.data:
for pk, data in model_data:
model_class.create(field=pk, data=data)
def test_join_expr(self):
def get_query(join_type=JOIN.INNER):
sq = (TestModelA
.select(TestModelA, TestModelB, TestModelC)
.join(
TestModelB,
on=(TestModelA.field == TestModelB.field).alias('rel_b'))
.join(
TestModelC,
join_type=join_type,
on=(TestModelB.field == TestModelC.field))
.order_by(TestModelA.field))
return sq
sq = get_query()
self.assertEqual(sq.count(), 2)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
sq = get_query(JOIN.LEFT_OUTER)
self.assertEqual(sq.count(), 3)
with self.assertQueryCount(1):
results = list(sq)
expected = (('b1', 'c1'), ('b2', 'c2'), ('b3', None))
for i, (b_data, c_data) in enumerate(expected):
self.assertEqual(results[i].rel_b.data, b_data)
self.assertEqual(results[i].rel_b.field.data, c_data)
def test_backward_join(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
for user in (u1, u2):
Blog.create(title='b-%s' % user.username, user=user)
# Create an additional blog for user 2.
Blog.create(title='b-u2-2', user=u2)
res = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username.asc(), Blog.title.asc()))
self.assertEqual([(u.username, u.blog.title) for u in res], [
('u1', 'b-u1'),
('u2', 'b-u2'),
('u2', 'b-u2-2')])
def test_joins_with_aliases(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
b1_1 = Blog.create(user=u1, title='b1-1')
b1_2 = Blog.create(user=u1, title='b1-2')
b2_1 = Blog.create(user=u2, title='b2-1')
UserAlias = User.alias()
BlogAlias = Blog.alias()
def assertExpectedQuery(query, is_user_query):
accum = []
with self.assertQueryCount(1):
if is_user_query:
for user in query:
accum.append((user.username, user.blog.title))
else:
for blog in query:
accum.append((blog.user.username, blog.title))
self.assertEqual(accum, [
('u1', 'b1-1'),
('u1', 'b1-2'),
('u2', 'b2-1'),
])
combinations = [
(User, BlogAlias, User.id == BlogAlias.user, True),
(User, BlogAlias, BlogAlias.user == User.id, True),
(User, Blog, User.id == Blog.user, True),
(User, Blog, Blog.user == User.id, True),
(User, Blog, None, True),
(Blog, UserAlias, UserAlias.id == Blog.user, False),
(Blog, UserAlias, Blog.user == UserAlias.id, False),
(Blog, User, User.id == Blog.user, False),
(Blog, User, Blog.user == User.id, False),
(Blog, User, None, False),
]
for Src, JoinModel, predicate, is_user_query in combinations:
query = (Src
.select(Src, JoinModel)
.join(JoinModel, on=predicate)
.order_by(SQL('1, 2')))
assertExpectedQuery(query, is_user_query)
class TestModelQueryResultForeignKeys(ModelTestCase):
requires = [Parent, Child]
def test_foreign_key_assignment(self):
parent = Parent.create(data='p1')
child = Child.create(parent=parent, data='c1')
ParentAlias = Parent.alias()
query = Child.select(Child, ParentAlias)
ljoin = (ParentAlias.id == Child.parent)
rjoin = (Child.parent == ParentAlias.id)
lhs_alias = query.join(ParentAlias, on=ljoin)
rhs_alias = query.join(ParentAlias, on=rjoin)
self.assertJoins(lhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("parent"."id" = "child"."parent_id")'])
self.assertJoins(rhs_alias, [
'INNER JOIN "parent" AS parent '
'ON ("child"."parent_id" = "parent"."id")'])
with self.assertQueryCount(1):
lchild = lhs_alias.get()
self.assertEqual(lchild.id, child.id)
self.assertEqual(lchild.parent.id, parent.id)
with self.assertQueryCount(1):
rchild = rhs_alias.get()
self.assertEqual(rchild.id, child.id)
self.assertEqual(rchild.parent.id, parent.id)
class TestSelectRelatedForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def test_select_related(self):
p1 = Package.create(barcode='101')
p2 = Package.create(barcode='102')
pi11 = PackageItem.create(title='p11', package='101')
pi12 = PackageItem.create(title='p12', package='101')
pi21 = PackageItem.create(title='p21', package='102')
pi22 = PackageItem.create(title='p22', package='102')
# missing PackageItem.package_id.
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, Package.barcode)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual(
[i.package.barcode for i in items],
['101', '101'])
with self.assertQueryCount(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, PackageItem.package, Package.id)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
self.assertEqual([i.package.id for i in items], [p1.id, p1.id])
class BaseTestPrefetch(ModelTestCase):
requires = [
User,
Blog,
Comment,
Parent,
Child,
Orphan,
ChildPet,
OrphanPet,
Category,
Post,
Tag,
TagPostThrough,
TagPostThroughAlt,
Category,
UserCategory,
Relationship,
]
user_data = [
('u1', (('b1', ('b1-c1', 'b1-c2')), ('b2', ('b2-c1',)))),
('u2', ()),
('u3', (('b3', ('b3-c1', 'b3-c2')), ('b4', ()))),
('u4', (('b5', ('b5-c1', 'b5-c2')), ('b6', ('b6-c1',)))),
]
parent_data = [
('p1', (
# children
(
('c1', ('c1-p1', 'c1-p2')),
('c2', ('c2-p1',)),
('c3', ('c3-p1',)),
('c4', ()),
),
# orphans
(
('o1', ('o1-p1', 'o1-p2')),
('o2', ('o2-p1',)),
('o3', ('o3-p1',)),
('o4', ()),
),
)),
('p2', ((), ())),
('p3', (
# children
(
('c6', ()),
('c7', ('c7-p1',)),
),
# orphans
(
('o6', ('o6-p1', 'o6-p2')),
('o7', ('o7-p1',)),
),
)),
]
category_tree = [
['root', ['p1', 'p2']],
['p1', ['p1-1', 'p1-2']],
['p2', ['p2-1', 'p2-2']],
['p1-1', []],
['p1-2', []],
['p2-1', []],
['p2-2', []],
]
def setUp(self):
super(BaseTestPrefetch, self).setUp()
for parent, (children, orphans) in self.parent_data:
p = Parent.create(data=parent)
for child_pets in children:
child, pets = child_pets
c = Child.create(parent=p, data=child)
for pet in pets:
ChildPet.create(child=c, data=pet)
for orphan_pets in orphans:
orphan, pets = orphan_pets
o = Orphan.create(parent=p, data=orphan)
for pet in pets:
OrphanPet.create(orphan=o, data=pet)
for user, blog_comments in self.user_data:
u = User.create(username=user)
for blog, comments in blog_comments:
b = Blog.create(user=u, title=blog, content='')
for c in comments:
Comment.create(blog=b, comment=c)
def _build_category_tree(self):
def cc(name, parent=None):
return Category.create(name=name, parent=parent)
root = cc('root')
p1 = cc('p1', root)
p2 = cc('p2', root)
for p in (p1, p2):
for i in range(2):
cc('%s-%s' % (p.name, i + 1), p)
class TestPrefetch(BaseTestPrefetch):
def test_prefetch_simple(self):
sq = User.select().where(User.username != 'u3')
sq2 = Blog.select().where(Blog.title != 'b2')
sq3 = Comment.select()
with self.assertQueryCount(3):
prefetch_sq = prefetch(sq, sq2, sq3)
results = []
for user in prefetch_sq:
results.append(user.username)
for blog in user.blog_set_prefetch:
results.append(blog.title)
for comment in blog.comments_prefetch:
results.append(comment.comment)
self.assertEqual(results, [
'u1', 'b1', 'b1-c1', 'b1-c2',
'u2',
'u4', 'b5', 'b5-c1', 'b5-c2', 'b6', 'b6-c1',
])
with self.assertQueryCount(0):
results = []
for user in prefetch_sq:
for blog in user.blog_set_prefetch:
results.append(blog.user.username)
for comment in blog.comments_prefetch:
results.append(comment.blog.title)
self.assertEqual(results, [
'u1', 'b1', 'b1', 'u4', 'b5', 'b5', 'u4', 'b6',
])
def test_prefetch_reverse(self):
sq = User.select()
sq2 = Blog.select().where(Blog.title != 'b2').order_by(Blog.pk)
with self.assertQueryCount(2):
prefetch_sq = prefetch(sq2, sq)
results = []
for blog in prefetch_sq:
results.append(blog.title)
results.append(blog.user.username)
self.assertEqual(results, [
'b1', 'u1',
'b3', 'u3',
'b4', 'u3',
'b5', 'u4',
'b6', 'u4'])
def test_prefetch_up_and_down(self):
blogs = Blog.select(Blog, User).join(User).order_by(Blog.title)
comments = Comment.select().order_by(Comment.comment.desc())
with self.assertQueryCount(2):
query = prefetch(blogs, comments)
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments_prefetch]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c2', 'b1-c1']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c2', 'b3-c1']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c2', 'b5-c1']),
('u4', 'b6', ['b6-c1']),
])
def test_prefetch_multi_depth(self):
sq = Parent.select()
sq2 = Child.select()
sq3 = Orphan.select()
sq4 = ChildPet.select()
sq5 = OrphanPet.select()
with self.assertQueryCount(5):
prefetch_sq = prefetch(sq, sq2, sq3, sq4, sq5)
results = []
for parent in prefetch_sq:
results.append(parent.data)
for child in parent.child_set_prefetch:
results.append(child.data)
for pet in child.childpet_set_prefetch:
results.append(pet.data)
for orphan in parent.orphan_set_prefetch:
results.append(orphan.data)
for pet in orphan.orphanpet_set_prefetch:
results.append(pet.data)
self.assertEqual(results, [
'p1', 'c1', 'c1-p1', 'c1-p2', 'c2', 'c2-p1', 'c3', 'c3-p1', 'c4',
'o1', 'o1-p1', 'o1-p2', 'o2', 'o2-p1', 'o3', 'o3-p1', 'o4',
'p2',
'p3', 'c6', 'c7', 'c7-p1', 'o6', 'o6-p1', 'o6-p2', 'o7', 'o7-p1',
])
def test_prefetch_no_aggregate(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title))
results = []
for user in query:
results.append((
user.username,
user.blog.title))
self.assertEqual(results, [
('u1', 'b1'),
('u1', 'b2'),
('u2', None),
('u3', 'b3'),
('u3', 'b4'),
('u4', 'b5'),
('u4', 'b6'),
])
def test_prefetch_self_join(self):
self._build_category_tree()
Child = Category.alias()
with self.assertQueryCount(2):
query = prefetch(Category.select().order_by(Category.id), Child)
names_and_children = [
[parent.name, [child.name for child in parent.children_prefetch]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
class TestAggregateRows(BaseTestPrefetch):
def test_aggregate_users(self):
with self.assertQueryCount(1):
query = (User
.select(User, Blog, Comment)
.join(Blog, JOIN.LEFT_OUTER)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(User.username, Blog.title, Comment.id)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[(blog.title,
[comment.comment for comment in blog.comments])
for blog in user.blog_set]))
self.assertEqual(results, [
('u1', [
('b1', ['b1-c1', 'b1-c2']),
('b2', ['b2-c1'])]),
('u2', []),
('u3', [
('b3', ['b3-c1', 'b3-c2']),
('b4', [])]),
('u4', [
('b5', ['b5-c1', 'b5-c2']),
('b6', ['b6-c1'])]),
])
def test_aggregate_blogs(self):
with self.assertQueryCount(1):
query = (Blog
.select(Blog, User, Comment)
.join(User)
.switch(Blog)
.join(Comment, JOIN.LEFT_OUTER)
.order_by(Blog.title, User.username, Comment.id)
.aggregate_rows())
results = []
for blog in query:
results.append((
blog.user.username,
blog.title,
[comment.comment for comment in blog.comments]))
self.assertEqual(results, [
('u1', 'b1', ['b1-c1', 'b1-c2']),
('u1', 'b2', ['b2-c1']),
('u3', 'b3', ['b3-c1', 'b3-c2']),
('u3', 'b4', []),
('u4', 'b5', ['b5-c1', 'b5-c2']),
('u4', 'b6', ['b6-c1']),
])
def test_aggregate_on_expression_join(self):
with self.assertQueryCount(1):
join_expr = (User.id == Blog.user)
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER, on=join_expr)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
])
def test_aggregate_with_join_model_aliases(self):
expected = [
('u1', ['b1', 'b2']),
('u2', []),
('u3', ['b3', 'b4']),
('u4', ['b5', 'b6']),
]
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(
Blog,
JOIN.LEFT_OUTER,
on=(User.id == Blog.user).alias('blogz'))
.order_by(User.id, Blog.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
BlogAlias = Blog.alias()
with self.assertQueryCount(1):
query = (User
.select(User, BlogAlias)
.join(
BlogAlias,
JOIN.LEFT_OUTER,
on=(User.id == BlogAlias.user).alias('blogz'))
.order_by(User.id, BlogAlias.title)
.aggregate_rows())
results = [
(user.username, [blog.title for blog in user.blogz])
for user in query]
self.assertEqual(results, expected)
def test_aggregate_unselected_join_backref(self):
cat_1 = Category.create(name='category 1')
cat_2 = Category.create(name='category 2')
with test_db.transaction():
for i, user in enumerate(User.select().order_by(User.username)):
if i % 2 == 0:
category = cat_2
else:
category = cat_1
UserCategory.create(user=user, category=category)
with self.assertQueryCount(1):
# The join on UserCategory is a backref join (since the FK is on
# UserCategory). Additionally, UserCategory/Category are not
# selected and are only used for filtering the result set.
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.switch(User)
.join(UserCategory)
.join(Category)
.where(Category.name == cat_1.name)
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u2', []),
('u4', ['b5', 'b6']),
])
def test_aggregate_manytomany(self):
p1 = Post.create(title='p1')
p2 = Post.create(title='p2')
Post.create(title='p3')
p4 = Post.create(title='p4')
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
t3 = Tag.create(tag='t3')
TagPostThroughAlt.create(tag=t1, post=p1)
TagPostThroughAlt.create(tag=t2, post=p1)
TagPostThroughAlt.create(tag=t2, post=p2)
TagPostThroughAlt.create(tag=t3, post=p2)
TagPostThroughAlt.create(tag=t1, post=p4)
TagPostThroughAlt.create(tag=t2, post=p4)
TagPostThroughAlt.create(tag=t3, post=p4)
with self.assertQueryCount(1):
query = (Post
.select(Post, TagPostThroughAlt, Tag)
.join(TagPostThroughAlt, JOIN.LEFT_OUTER)
.join(Tag, JOIN.LEFT_OUTER)
.order_by(Post.id, TagPostThroughAlt.post, Tag.id)
.aggregate_rows())
results = []
for post in query:
post_data = [post.title]
for tpt in post.tags_alt:
post_data.append(tpt.tag.tag)
results.append(post_data)
self.assertEqual(results, [
['p1', 't1', 't2'],
['p2', 't2', 't3'],
['p3'],
['p4', 't1', 't2', 't3'],
])
def test_aggregate_parent_child(self):
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, Orphan, ChildPet, OrphanPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan, JOIN.LEFT_OUTER)
.join(OrphanPet, JOIN.LEFT_OUTER)
.order_by(
Parent.data,
Child.data,
ChildPet.id,
Orphan.data,
OrphanPet.id)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set],
[(orphan.data, [pet.data for pet in orphan.orphanpet_set])
for orphan in parent.orphan_set]
))
# Without the `.aggregate_rows()` call, this would be 289!!
self.assertEqual(results, [
('p1',
[('c1', ['c1-p1', 'c1-p2']),
('c2', ['c2-p1']),
('c3', ['c3-p1']),
('c4', [])],
[('o1', ['o1-p1', 'o1-p2']),
('o2', ['o2-p1']),
('o3', ['o3-p1']),
('o4', [])],
),
('p2', [], []),
('p3',
[('c6', []),
('c7', ['c7-p1'])],
[('o6', ['o6-p1', 'o6-p2']),
('o7', ['o7-p1'])],)
])
def test_aggregate_with_unselected_joins(self):
with self.assertQueryCount(1):
query = (Child
.select(Child, ChildPet, Parent)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Child)
.join(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Child.data, ChildPet.data)
.aggregate_rows())
results = []
for child in query:
results.append((
child.data,
child.parent.data,
[child_pet.data for child_pet in child.childpet_set]))
self.assertEqual(results, [
('c6', 'p3', []),
('c7', 'p3', ['c7-p1']),
])
with self.assertQueryCount(1):
query = (Parent
.select(Parent, Child, ChildPet)
.join(Child, JOIN.LEFT_OUTER)
.join(ChildPet, JOIN.LEFT_OUTER)
.switch(Parent)
.join(Orphan)
.join(OrphanPet)
.where(OrphanPet.data == 'o6-p2')
.order_by(Parent.data, Child.data, ChildPet.data)
.aggregate_rows())
results = []
for parent in query:
results.append((
parent.data,
[(child.data, [pet.data for pet in child.childpet_set])
for child in parent.child_set]))
self.assertEqual(results, [('p3', [
('c6', []),
('c7', ['c7-p1']),
])])
def test_aggregate_rows_ordering(self):
# Refs github #519.
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username.desc(), Blog.title.desc())
.aggregate_rows())
accum = []
for user in query:
accum.append((
user.username,
[blog.title for blog in user.blog_set]))
if sys.version_info[:2] > (2, 6):
self.assertEqual(accum, [
('u4', ['b6', 'b5']),
('u3', ['b4', 'b3']),
('u2', []),
('u1', ['b2', 'b1']),
])
def test_aggregate_rows_self_join(self):
self._build_category_tree()
Child = Category.alias()
# Same query, but this time use an `alias` on the join expr.
with self.assertQueryCount(1):
query = (Category
.select(Category, Child)
.join(
Child,
JOIN.LEFT_OUTER,
on=(Category.id == Child.parent).alias('childrenx'))
.order_by(Category.id, Child.id)
.aggregate_rows())
names_and_children = [
[parent.name, [child.name for child in parent.childrenx]]
for parent in query]
self.assertEqual(names_and_children, self.category_tree)
def test_multiple_fks(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
UserAlias = User.alias()
with self.assertQueryCount(1):
query = (User
.select(User, Relationship, UserAlias)
.join(
Relationship,
JOIN.LEFT_OUTER,
on=Relationship.from_user)
.join(
UserAlias,
on=(
Relationship.to_user == UserAlias.id
).alias('to_user'))
.order_by(User.username, Relationship.id)
.where(User.username == 'charlie')
.aggregate_rows())
results = [row for row in query]
self.assertEqual(len(results), 1)
user = results[0]
self.assertEqual(user.username, 'charlie')
self.assertEqual(len(user.relationships), 2)
rh, rz = user.relationships
self.assertEqual(rh.to_user.username, 'huey')
self.assertEqual(rz.to_user.username, 'zaizee')
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
with self.assertQueryCount(1):
query = (Relationship
.select(Relationship, FromUser, ToUser)
.join(FromUser, on=from_join.alias('from_user'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.order_by(Relationship.id)
.aggregate_rows())
results = [
(relationship.from_user.username,
relationship.to_user.username)
for relationship in query]
self.assertEqual(results, [
('charlie', 'huey'),
('charlie', 'zaizee'),
('huey', 'charlie'),
('zaizee', 'charlie'),
])
def test_multiple_fks_multi_depth(self):
names = ['charlie', 'huey', 'zaizee']
charlie, huey, zaizee = [
User.create(username=username) for username in names]
Relationship.create(from_user=charlie, to_user=huey)
Relationship.create(from_user=charlie, to_user=zaizee)
Relationship.create(from_user=huey, to_user=charlie)
Relationship.create(from_user=zaizee, to_user=charlie)
human = Category.create(name='human')
kitty = Category.create(name='kitty')
UserCategory.create(user=charlie, category=human)
UserCategory.create(user=huey, category=kitty)
UserCategory.create(user=zaizee, category=kitty)
FromUser = User.alias()
ToUser = User.alias()
from_join = (Relationship.from_user == FromUser.id)
to_join = (Relationship.to_user == ToUser.id)
FromUserCategory = UserCategory.alias()
ToUserCategory = UserCategory.alias()
from_uc_join = (FromUser.id == FromUserCategory.user)
to_uc_join = (ToUser.id == ToUserCategory.user)
FromCategory = Category.alias()
ToCategory = Category.alias()
from_c_join = (FromUserCategory.category == FromCategory.id)
to_c_join = (ToUserCategory.category == ToCategory.id)
with self.assertQueryCount(1):
query = (Relationship
.select(
Relationship,
FromUser,
ToUser,
FromUserCategory,
ToUserCategory,
FromCategory,
ToCategory)
.join(FromUser, on=from_join.alias('from_user'))
.join(FromUserCategory, on=from_uc_join.alias('fuc'))
.join(FromCategory, on=from_c_join.alias('category'))
.switch(Relationship)
.join(ToUser, on=to_join.alias('to_user'))
.join(ToUserCategory, on=to_uc_join.alias('tuc'))
.join(ToCategory, on=to_c_join.alias('category'))
.order_by(Relationship.id)
.aggregate_rows())
results = []
for obj in query:
from_user = obj.from_user
to_user = obj.to_user
results.append((
from_user.username,
from_user.fuc[0].category.name,
to_user.username,
to_user.tuc[0].category.name))
self.assertEqual(results, [
('charlie', 'human', 'huey', 'kitty'),
('charlie', 'human', 'zaizee', 'kitty'),
('huey', 'kitty', 'charlie', 'human'),
('zaizee', 'kitty', 'charlie', 'human'),
])
class TestAggregateRowsRegression(ModelTestCase):
requires = [
User,
Blog,
Comment,
Category,
CommentCategory,
BlogData]
def setUp(self):
super(TestAggregateRowsRegression, self).setUp()
u = User.create(username='u1')
b = Blog.create(title='b1', user=u)
BlogData.create(blog=b)
c1 = Comment.create(blog=b, comment='c1')
c2 = Comment.create(blog=b, comment='c2')
cat1 = Category.create(name='cat1')
cat2 = Category.create(name='cat2')
CommentCategory.create(category=cat1, comment=c1, sort_order=1)
CommentCategory.create(category=cat2, comment=c1, sort_order=1)
CommentCategory.create(category=cat1, comment=c2, sort_order=2)
CommentCategory.create(category=cat2, comment=c2, sort_order=2)
def test_aggregate_rows_regression(self):
comments = (Comment
.select(
Comment,
CommentCategory,
Category,
Blog,
BlogData)
.join(CommentCategory, JOIN.LEFT_OUTER)
.join(Category, JOIN.LEFT_OUTER)
.switch(Comment)
.join(Blog)
.join(BlogData, JOIN.LEFT_OUTER)
.where(Category.id == 1)
.order_by(CommentCategory.sort_order))
with self.assertQueryCount(1):
c_list = list(comments.aggregate_rows())
def test_regression_506(self):
user = User.create(username='u2')
for i in range(2):
Blog.create(title='u2-%s' % i, user=user)
users = (User
.select()
.order_by(User.id.desc())
.paginate(1, 5)
.alias('users'))
with self.assertQueryCount(1):
query = (User
.select(User, Blog)
.join(Blog)
.join(users, on=(User.id == users.c.id))
.order_by(User.username, Blog.title)
.aggregate_rows())
results = []
for user in query:
results.append((
user.username,
[blog.title for blog in user.blog_set]))
self.assertEqual(results, [
('u1', ['b1']),
('u2', ['u2-0', 'u2-1']),
])
class TestPrefetchNonPKFK(ModelTestCase):
requires = [Package, PackageItem]
data = {
'101': ['a', 'b'],
'102': ['c'],
'103': [],
'104': ['a', 'b', 'c', 'd', 'e'],
}
def setUp(self):
super(TestPrefetchNonPKFK, self).setUp()
for barcode, titles in self.data.items():
Package.create(barcode=barcode)
for title in titles:
PackageItem.create(package=barcode, title=title)
def test_prefetch(self):
packages = Package.select().order_by(Package.barcode)
items = PackageItem.select().order_by(PackageItem.id)
query = prefetch(packages, items)
for package, (barcode, titles) in zip(query, sorted(self.data.items())):
self.assertEqual(package.barcode, barcode)
self.assertEqual(
[item.title for item in package.items_prefetch],
titles)
packages = (Package
.select()
.where(Package.barcode << ['101', '104'])
.order_by(Package.id))
items = items.where(PackageItem.title << ['a', 'c', 'e'])
query = prefetch(packages, items)
accum = {}
for package in query:
accum[package.barcode] = [
item.title for item in package.items_prefetch]
self.assertEqual(accum, {
'101': ['a'],
'104': ['a', 'c','e'],
})
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import sys
import uuid
import eventlet
import mock
from oslo.config import cfg
import testtools
from quantum.agent.common import config
from quantum.agent import dhcp_agent
from quantum.agent.dhcp_agent import DhcpAgentWithStateReport
from quantum.agent.linux import dhcp
from quantum.agent.linux import interface
from quantum.common import constants
from quantum.common import exceptions
from quantum.openstack.common import jsonutils
from quantum.tests import base
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
HOSTNAME = 'hostname'
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class FakeModel:
def __init__(self, id_, **kwargs):
self.id = id_
self.__dict__.update(kwargs)
def __str__(self):
return str(self.__dict__)
fake_subnet1 = FakeModel('bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=True)
fake_subnet2 = FakeModel('dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=False)
fake_subnet3 = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True)
fake_meta_subnet = FakeModel('bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253', enable_dhcp=True)
fake_fixed_ip = FakeModel('', subnet=fake_subnet1, ip_address='172.9.9.9')
fake_meta_fixed_ip = FakeModel('', subnet=fake_meta_subnet,
ip_address='169.254.169.254')
fake_port1 = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip])
fake_port2 = FakeModel('12345678-1234-aaaa-123456789000',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab')
fake_meta_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=constants.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip])
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1])
fake_meta_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port])
fake_down_network = FakeModel('12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
subnets=[],
ports=[])
class TestDhcpAgent(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgent, self).setUp()
dhcp_agent.register_options()
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
self.driver_cls_p = mock.patch(
'quantum.agent.dhcp_agent.importutils.import_class')
self.driver = mock.Mock(name='driver')
self.driver.existing_dhcp_networks.return_value = []
self.driver_cls = self.driver_cls_p.start()
self.driver_cls.return_value = self.driver
def tearDown(self):
self.driver_cls_p.stop()
cfg.CONF.reset()
super(TestDhcpAgent, self).tearDown()
def test_dhcp_agent_manager(self):
state_rpc_str = 'quantum.agent.rpc.PluginReportStateAPI'
lease_relay_str = 'quantum.agent.dhcp_agent.DhcpLeaseRelay'
with mock.patch.object(DhcpAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch.object(DhcpAgentWithStateReport,
'periodic_resync',
autospec=True) as mock_periodic_resync:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch(lease_relay_str) as mock_lease_relay:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'dhcp', '--config-file',
etcdir('quantum.conf.test')]
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(
dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(
dhcp_agent.DhcpLeaseRelay.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF(project='quantum')
agent_mgr = DhcpAgentWithStateReport('testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
mock_sync_state.assert_called_once_with(agent_mgr)
mock_periodic_resync.assert_called_once_with(
agent_mgr)
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY)])
mock_lease_relay.assert_has_calls(
[mock.call(mock.ANY),
mock.call().start()])
def test_dhcp_agent_main_agent_manager(self):
logging_str = 'quantum.agent.common.config.setup_logging'
launcher_str = 'quantum.openstack.common.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['dhcp', '--config-file',
etcdir('quantum.conf.test')]
dhcp_agent.main()
launcher.assert_has_calls(
[mock.call(), mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager'):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['sync_state', 'lease_relay', 'periodic_resync']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
dhcp.run()
mocks['sync_state'].assert_called_once_with()
mocks['periodic_resync'].assert_called_once_with()
mocks['lease_relay'].assert_has_mock_calls(
[mock.call.start()])
def test_ns_name(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager'):
mock_net = mock.Mock(id='foo')
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertEqual(dhcp._ns_name(mock_net), 'qdhcp-foo')
def test_ns_name_disabled_namespace(self):
with mock.patch('quantum.agent.dhcp_agent.DeviceManager'):
cfg.CONF.set_override('use_namespaces', False)
mock_net = mock.Mock(id='foo')
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertIsNone(dhcp._ns_name(mock_net))
def test_call_driver(self):
network = mock.Mock()
network.id = '1'
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertTrue(dhcp.call_driver('foo', network))
self.assertTrue(dev_mgr.called)
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
'qdhcp-1',
mock.ANY)
def test_call_driver_failure(self):
network = mock.Mock()
network.id = '1'
self.driver.return_value.foo.side_effect = Exception
with mock.patch('quantum.agent.dhcp_agent.DeviceManager') as dev_mgr:
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.assertIsNone(dhcp.call_driver('foo', network))
self.assertTrue(dev_mgr.called)
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
'sudo',
mock.ANY,
'qdhcp-1',
mock.ANY)
self.assertEqual(log.call_count, 1)
self.assertTrue(dhcp.needs_resync)
def test_update_lease(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.update_lease('net_id', '192.168.1.1', 120)
plug.assert_has_calls(
[mock.call().update_lease_expiration(
'net_id', '192.168.1.1', 120)])
def test_update_lease_failure(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
plug.return_value.update_lease_expiration.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.update_lease('net_id', '192.168.1.1', 120)
plug.assert_has_calls(
[mock.call().update_lease_expiration(
'net_id', '192.168.1.1', 120)])
self.assertTrue(log.called)
self.assertTrue(dhcp.needs_resync)
def _test_sync_state_helper(self, known_networks, active_networks):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks.return_value = active_networks
plug.return_value = mock_plugin
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['refresh_dhcp_helper', 'disable_dhcp_helper', 'cache']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
mocks['cache'].get_network_ids.return_value = known_networks
dhcp.sync_state()
exp_refresh = [
mock.call(net_id) for net_id in active_networks]
diff = set(known_networks) - set(active_networks)
exp_disable = [mock.call(net_id) for net_id in diff]
mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
mocks['refresh_dhcp_helper'].assert_has_called(exp_refresh)
mocks['disable_dhcp_helper'].assert_has_called(exp_disable)
def test_sync_state_initial(self):
self._test_sync_state_helper([], ['a'])
def test_sync_state_same(self):
self._test_sync_state_helper(['a'], ['a'])
def test_sync_state_disabled_net(self):
self._test_sync_state_helper(['b'], ['a'])
def test_sync_state_plugin_error(self):
with mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi') as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.sync_state()
self.assertTrue(log.called)
self.assertTrue(dhcp.needs_resync)
def test_periodic_resync(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
dhcp.periodic_resync()
spawn.assert_called_once_with(dhcp._periodic_resync_helper)
def test_periodoc_resync_helper(self):
with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
dhcp.needs_resync = True
with mock.patch.object(dhcp, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
dhcp._periodic_resync_helper()
sync_state.assert_called_once_with()
sleep.assert_called_once_with(dhcp.conf.resync_interval)
self.assertFalse(dhcp.needs_resync)
def test_populate_cache_on_start_without_active_networks_support(self):
# emul dhcp driver that doesn't support retrieving of active networks
self.driver.existing_dhcp_networks.side_effect = NotImplementedError
with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertFalse(dhcp.cache.get_network_ids())
self.assertTrue(log.called)
def test_populate_cache_on_start(self):
networks = ['aaa', 'bbb']
self.driver.existing_dhcp_networks.return_value = networks
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
cfg.CONF.root_helper
)
self.assertEquals(set(networks), set(dhcp.cache.get_network_ids()))
class TestLogArgs(base.BaseTestCase):
def test_log_args_without_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': None,
'log_file': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_file(self):
conf_dict = {'debug': True,
'verbose': True,
'log_dir': '/etc/tests',
'log_file': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug', '--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': '/etc/tests',
'log_file': 'tests/filelog'}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_dir(self):
conf_dict = {'debug': True,
'verbose': False,
'log_file': 'tests/filelog',
'log_dir': None}
conf = dhcp_agent.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
class TestDhcpAgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentEventHandler, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpLeaseRelay.OPTS)
cfg.CONF.register_opts(dhcp.OPTS)
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
self.plugin_p = mock.patch('quantum.agent.dhcp_agent.DhcpPluginApi')
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch('quantum.agent.dhcp_agent.NetworkCache')
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
self.call_driver = self.call_driver_p.start()
self.external_process_p = mock.patch(
'quantum.agent.linux.external_process.ProcessManager'
)
self.external_process = self.external_process_p.start()
def tearDown(self):
self.external_process_p.stop()
self.call_driver_p.stop()
self.cache_p.stop()
self.plugin_p.stop()
cfg.CONF.reset()
super(TestDhcpAgentEventHandler, self).tearDown()
def _enable_dhcp_helper(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.plugin.get_network_info.return_value = fake_network
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.cache.assert_has_calls([mock.call.put(fake_network)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
else:
self.assertFalse(self.external_process.call_count)
def test_enable_dhcp_helper_enable_isolated_metadata(self):
self._enable_dhcp_helper(isolated_metadata=True)
def test_enable_dhcp_helper(self):
self._enable_dhcp_helper()
def test_enable_dhcp_helper_down_network(self):
self.plugin.get_network_info.return_value = fake_down_network
self.dhcp.enable_dhcp_helper(fake_down_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_down_network.id)])
self.assertFalse(self.call_driver.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_exception_during_rpc(self):
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_driver_failure(self):
self.plugin.get_network_info.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_known_network_isolated_metadata(self):
self._disable_dhcp_helper_known_network(isolated_metadata=True)
def test_disable_dhcp_helper_known_network(self):
self._disable_dhcp_helper_known_network()
def test_disable_dhcp_helper_unknown_network(self):
self.cache.get_network_by_id.return_value = None
self.dhcp.disable_dhcp_helper('abcdef')
self.cache.assert_has_calls(
[mock.call.get_network_by_id('abcdef')])
self.assertEqual(0, self.call_driver.call_count)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
if isolated_metadata:
self.external_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
def test_disable_dhcp_helper_driver_failure(self):
self._disable_dhcp_helper_driver_failure()
def test_enable_isolated_metadata_proxy(self):
class_path = 'quantum.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.enable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().enable(mock.ANY)
])
def test_disable_isolated_metadata_proxy(self):
class_path = 'quantum.agent.linux.external_process.ProcessManager'
with mock.patch(class_path) as ext_process:
self.dhcp.disable_isolated_metadata_proxy(fake_network)
ext_process.assert_has_calls([
mock.call(
cfg.CONF,
'12345678-1234-5678-1234567890ab',
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().disable()
])
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('log_file', 'test.log')
class_path = 'quantum.agent.linux.ip_lib.IPWrapper'
self.external_process_p.stop()
# Ensure the mock is restored if this test fail
try:
with mock.patch(class_path) as ip_wrapper:
self.dhcp.enable_isolated_metadata_proxy(fake_meta_network)
ip_wrapper.assert_has_calls([mock.call(
'sudo',
'qdhcp-12345678-1234-5678-1234567890ab'),
mock.call().netns.execute([
'quantum-ns-metadata-proxy',
mock.ANY,
'--router_id=forzanapoli',
mock.ANY,
mock.ANY,
'--debug',
('--log-file=quantum-ns-metadata-proxy-%s.log' %
fake_meta_network.id)])
])
finally:
self.external_process_p.start()
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_create_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_up(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_update_end(None, payload)
enable.assertCalledOnceWith(fake_network.id)
def test_network_update_end_admin_state_down(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_update_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_network_delete_end(self):
payload = dict(network_id=fake_network.id)
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_delete_end(None, payload)
disable.assertCalledOnceWith(fake_network.id)
def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
network = FakeModel('net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[])
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.return_value = network
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.refresh_dhcp_helper(network.id)
disable.called_once_with_args(network.id)
self.assertFalse(self.cache.called)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
def test_refresh_dhcp_helper_exception_during_rpc(self):
network = FakeModel('net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[])
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.refresh_dhcp_helper(network.id)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
self.assertTrue(log.called)
self.assertTrue(self.dhcp.needs_resync)
def test_subnet_update_end(self):
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_subnet_update_end_restart(self):
new_state = FakeModel(fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1])
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = new_state
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(new_state)])
self.call_driver.assert_called_once_with('restart',
new_state)
def test_subnet_update_end_delete_payload(self):
prev_state = FakeModel(fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1])
payload = dict(subnet_id=fake_subnet1.id)
self.cache.get_network_by_subnet_id.return_value = prev_state
self.cache.get_network_by_id.return_value = prev_state
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_delete_end(None, payload)
self.cache.assert_has_calls([
mock.call.get_network_by_subnet_id(
'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('restart',
fake_network)
def test_port_update_end(self):
payload = dict(port=vars(fake_port2))
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port2.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_delete_end(self):
payload = dict(port_id=fake_port2.id)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_port_by_id(fake_port2.id),
mock.call.get_network_by_id(fake_network.id),
mock.call.remove_port(fake_port2)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_delete_end_unknown_port(self):
payload = dict(port_id='unknown')
self.cache.get_port_by_id.return_value = None
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
self.assertEqual(self.call_driver.call_count, 0)
class TestDhcpPluginApiProxy(base.BaseTestCase):
def setUp(self):
super(TestDhcpPluginApiProxy, self).setUp()
self.proxy = dhcp_agent.DhcpPluginApi('foo', {})
self.proxy.host = 'foo'
self.call_p = mock.patch.object(self.proxy, 'call')
self.call = self.call_p.start()
self.make_msg_p = mock.patch.object(self.proxy, 'make_msg')
self.make_msg = self.make_msg_p.start()
def tearDown(self):
self.make_msg_p.stop()
self.call_p.stop()
super(TestDhcpPluginApiProxy, self).tearDown()
def test_get_active_networks(self):
self.proxy.get_active_networks()
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_active_networks',
host='foo')
def test_get_network_info(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_network_info('netid')
self.assertEqual(retval.a, 1)
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_network_info',
network_id='netid',
host='foo')
def test_get_dhcp_port(self):
self.call.return_value = dict(a=1)
retval = self.proxy.get_dhcp_port('netid', 'devid')
self.assertEqual(retval.a, 1)
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('get_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_release_dhcp_port(self):
self.proxy.release_dhcp_port('netid', 'devid')
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('release_dhcp_port',
network_id='netid',
device_id='devid',
host='foo')
def test_release_port_fixed_ip(self):
self.proxy.release_port_fixed_ip('netid', 'devid', 'subid')
self.assertTrue(self.call.called)
self.make_msg.assert_called_once_with('release_port_fixed_ip',
network_id='netid',
subnet_id='subid',
device_id='devid',
host='foo')
def test_update_lease_expiration(self):
with mock.patch.object(self.proxy, 'cast') as mock_cast:
self.proxy.update_lease_expiration('netid', 'ipaddr', 1)
self.assertTrue(mock_cast.called)
self.make_msg.assert_called_once_with('update_lease_expiration',
network_id='netid',
ip_address='ipaddr',
lease_remaining=1,
host='foo')
class TestNetworkCache(base.BaseTestCase):
def test_put_network(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_put_network_existing(self):
prev_network_info = mock.Mock()
nc = dhcp_agent.NetworkCache()
with mock.patch.object(nc, 'remove') as remove:
nc.cache[fake_network.id] = prev_network_info
nc.put(fake_network)
remove.assert_called_once_with(prev_network_info)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_remove_network(self):
nc = dhcp_agent.NetworkCache()
nc.cache = {fake_network.id: fake_network}
nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id}
nc.port_lookup = {fake_port1.id: fake_network.id}
nc.remove(fake_network)
self.assertEqual(len(nc.cache), 0)
self.assertEqual(len(nc.subnet_lookup), 0)
self.assertEqual(len(nc.port_lookup), 0)
def test_get_network_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
def test_get_network_ids(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_ids(), [fake_network.id])
def test_get_network_by_subnet_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
fake_network)
def test_get_network_by_port_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
fake_network)
def test_put_port(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_network.ports)
def test_put_port_existing(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_network.ports)
def test_remove_port_existing(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2])
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
nc.remove_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 1)
self.assertNotIn(fake_port2, fake_network.ports)
def test_get_port_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
class TestDeviceManager(base.BaseTestCase):
def setUp(self):
super(TestDeviceManager, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DeviceManager.OPTS)
cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS)
cfg.CONF.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_isolated_metadata', True)
self.device_exists_p = mock.patch(
'quantum.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.dvr_cls_p = mock.patch('quantum.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('quantum.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
def tearDown(self):
self.dvr_cls_p.stop()
self.device_exists_p.stop()
self.iproute_cls_p.stop()
cfg.CONF.reset()
super(TestDeviceManager, self).tearDown()
def _test_setup_helper(self, device_exists, reuse_existing=False,
net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = port or fake_port1
self.device_exists.return_value = device_exists
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
interface_name = dh.setup(net, reuse_existing)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.get_dhcp_port(net.id, mock.ANY)])
namespace = dhcp_agent.NS_PREFIX + net.id
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [mock.call.init_l3('tap12345678-12',
expected_ips,
namespace=namespace)]
if not reuse_existing:
expected.insert(0,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=namespace))
self.mock_driver.assert_has_calls(expected)
def test_setup(self):
self._test_setup_helper(False)
def test_setup_device_exists(self):
with testtools.ExpectedException(exceptions.PreexistingDeviceFailure):
self._test_setup_helper(True)
def test_setup_device_exists_reuse(self):
self._test_setup_helper(True, True)
def test_destroy(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
#mock_driver.DEV_NAME_LEN = (
# interface.LinuxInterfaceDriver.DEV_NAME_LEN)
#mock_driver.port = fake_port
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.destroy(fake_network, 'tap12345678-12')
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.unplug('tap12345678-12',
namespace='qdhcp-' + fake_network.id)])
plugin.assert_has_calls(
[mock.call.release_dhcp_port(fake_network.id, mock.ANY)])
def test_get_interface_name(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_network, fake_port)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
self.assertEqual(len(plugin.mock_calls), 0)
def test_get_interface_name_no_port_provided(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
fake_port = FakeModel('12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff')
with mock.patch('quantum.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
plugin.get_dhcp_port.return_value = fake_port
dh = dhcp_agent.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_network)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
plugin.assert_has_calls(
[mock.call.get_dhcp_port(fake_network.id, mock.ANY)])
def test_get_device_id(self):
fake_network = FakeModel('12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')
expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
'5678-1234567890ab')
with mock.patch('socket.gethostbyname') as get_host:
with mock.patch('uuid.uuid5') as uuid5:
uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
get_host.return_value = 'localhost'
dh = dhcp_agent.DeviceManager(cfg.CONF, None)
uuid5.called_once_with(uuid.NAMESPACE_DNS, 'localhost')
self.assertEqual(dh.get_device_id(fake_network), expected)
class TestDhcpLeaseRelay(base.BaseTestCase):
def setUp(self):
super(TestDhcpLeaseRelay, self).setUp()
cfg.CONF.register_opts(dhcp_agent.DhcpLeaseRelay.OPTS)
self.unlink_p = mock.patch('os.unlink')
self.unlink = self.unlink_p.start()
def tearDown(self):
self.unlink_p.stop()
super(TestDhcpLeaseRelay, self).tearDown()
def test_init_relay_socket_path_no_prev_socket(self):
with mock.patch('os.path.exists') as exists:
exists.return_value = False
self.unlink.side_effect = OSError
dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
exists.assert_called_once_with(cfg.CONF.dhcp_lease_relay_socket)
def test_init_relay_socket_path_prev_socket_exists(self):
with mock.patch('os.path.exists') as exists:
exists.return_value = False
dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
self.assertFalse(exists.called)
def test_init_relay_socket_path_prev_socket_unlink_failure(self):
self.unlink.side_effect = OSError
with mock.patch('os.path.exists') as exists:
exists.return_value = True
with testtools.ExpectedException(OSError):
dhcp_agent.DhcpLeaseRelay(None)
self.unlink.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
exists.assert_called_once_with(
cfg.CONF.dhcp_lease_relay_socket)
def test_handler_valid_data(self):
network_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
ip_address = '192.168.1.9'
lease_remaining = 120
json_rep = jsonutils.dumps(dict(network_id=network_id,
lease_remaining=lease_remaining,
ip_address=ip_address))
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.return_value = json_rep
relay = dhcp_agent.DhcpLeaseRelay(handler)
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls([mock.call.recv(1024), mock.call.close()])
handler.called_once_with(network_id, ip_address, lease_remaining)
def test_handler_invalid_data(self):
network_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
ip_address = '192.168.x.x'
lease_remaining = 120
json_rep = jsonutils.dumps(
dict(network_id=network_id,
lease_remaining=lease_remaining,
ip_address=ip_address))
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.return_value = json_rep
relay = dhcp_agent.DhcpLeaseRelay(handler)
with mock.patch('quantum.openstack.common.'
'uuidutils.is_uuid_like') as validate:
validate.return_value = False
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls(
[mock.call.recv(1024), mock.call.close()])
self.assertFalse(handler.called)
self.assertTrue(log.called)
def test_handler_other_exception(self):
handler = mock.Mock()
mock_sock = mock.Mock()
mock_sock.recv.side_effect = Exception
relay = dhcp_agent.DhcpLeaseRelay(handler)
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
relay._handler(mock_sock, mock.Mock())
mock_sock.assert_has_calls([mock.call.recv(1024)])
self.assertFalse(handler.called)
self.assertTrue(log.called)
def test_start(self):
with mock.patch.object(dhcp_agent, 'eventlet') as mock_eventlet:
handler = mock.Mock()
relay = dhcp_agent.DhcpLeaseRelay(handler)
relay.start()
mock_eventlet.assert_has_calls(
[mock.call.listen(cfg.CONF.dhcp_lease_relay_socket,
family=socket.AF_UNIX),
mock.call.spawn(mock_eventlet.serve,
mock.call.listen.return_value,
relay._handler)])
class TestDictModel(base.BaseTestCase):
def test_basic_dict(self):
d = dict(a=1, b=2)
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_has_sub_dict(self):
d = dict(a=dict(b=2))
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a.b, 2)
def test_dict_contains_list(self):
d = dict(a=[1, 2])
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a, [1, 2])
def test_dict_contains_list_of_dicts(self):
d = dict(a=[dict(b=2), dict(c=3)])
m = dhcp_agent.DictModel(d)
self.assertEqual(m.a[0].b, 2)
self.assertEqual(m.a[1].c, 3)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datetime
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.loadbalancer.base import LoadBalancer, Member, Algorithm
from libcloud.loadbalancer.types import MemberCondition
from libcloud.loadbalancer.drivers.rackspace import RackspaceLBDriver, \
RackspaceHealthMonitor, RackspaceHTTPHealthMonitor, \
RackspaceConnectionThrottle, RackspaceAccessRule
from libcloud.loadbalancer.drivers.rackspace import RackspaceUKLBDriver
from libcloud.loadbalancer.drivers.rackspace import RackspaceAccessRuleType
from libcloud.common.types import LibcloudError
from libcloud.test import unittest
from libcloud.test import MockHttpTestCase
from libcloud.test.file_fixtures import LoadBalancerFileFixtures
from libcloud.test.file_fixtures import OpenStackFixtures
class RackspaceLBTests(unittest.TestCase):
def setUp(self):
RackspaceLBDriver.connectionCls.conn_classes = (None,
RackspaceLBMockHttp)
RackspaceLBMockHttp.type = None
self.driver = RackspaceLBDriver('user', 'key')
self.driver.connection.poll_interval = 0.0
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
def test_force_auth_token_kwargs(self):
base_url = 'https://ord.loadbalancer.api.rackspacecloud.com/v1.0/11111'
kwargs = {
'ex_force_auth_token': 'some-auth-token',
'ex_force_base_url': base_url
}
driver = RackspaceLBDriver('user', 'key', **kwargs)
driver.list_balancers()
self.assertEqual(kwargs['ex_force_auth_token'],
driver.connection.auth_token)
self.assertEqual('/v1.0/11111',
driver.connection.request_path)
def test_force_auth_url_kwargs(self):
kwargs = {
'ex_force_auth_version': '2.0',
'ex_force_auth_url': 'https://identity.api.rackspace.com'
}
driver = RackspaceLBDriver('user', 'key', **kwargs)
self.assertEqual(kwargs['ex_force_auth_url'],
driver.connection._ex_force_auth_url)
self.assertEqual(kwargs['ex_force_auth_version'],
driver.connection._auth_version)
def test_gets_auth_2_0_endpoint_defaults_to_ord_region(self):
driver = RackspaceLBDriver('user', 'key',
ex_force_auth_version='2.0_password'
)
driver.connection._populate_hosts_and_request_paths()
self.assertEqual(
'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/11111',
driver.connection.get_endpoint())
def test_gets_auth_2_0_endpoint_for_dfw(self):
driver = RackspaceLBDriver('user', 'key',
ex_force_auth_version='2.0_password',
ex_force_region='dfw'
)
driver.connection._populate_hosts_and_request_paths()
self.assertEqual(
'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/11111',
driver.connection.get_endpoint())
def test_list_protocols(self):
protocols = self.driver.list_protocols()
self.assertEqual(len(protocols), 10)
self.assertTrue('http' in protocols)
def test_ex_list_protocols_with_default_ports(self):
protocols = self.driver.ex_list_protocols_with_default_ports()
self.assertEqual(len(protocols), 10)
self.assertTrue(('http', 80) in protocols)
def test_list_supported_algorithms(self):
algorithms = self.driver.list_supported_algorithms()
self.assertTrue(Algorithm.RANDOM in algorithms)
self.assertTrue(Algorithm.ROUND_ROBIN in algorithms)
self.assertTrue(Algorithm.LEAST_CONNECTIONS in algorithms)
self.assertTrue(Algorithm.WEIGHTED_ROUND_ROBIN in algorithms)
self.assertTrue(Algorithm.WEIGHTED_LEAST_CONNECTIONS in algorithms)
def test_ex_list_algorithms(self):
algorithms = self.driver.ex_list_algorithm_names()
self.assertTrue("RANDOM" in algorithms)
self.assertTrue("ROUND_ROBIN" in algorithms)
self.assertTrue("LEAST_CONNECTIONS" in algorithms)
self.assertTrue("WEIGHTED_ROUND_ROBIN" in algorithms)
self.assertTrue("WEIGHTED_LEAST_CONNECTIONS" in algorithms)
def test_list_balancers(self):
balancers = self.driver.list_balancers()
self.assertEqual(len(balancers), 2)
self.assertEqual(balancers[0].name, "test0")
self.assertEqual(balancers[0].id, "8155")
self.assertEqual(balancers[0].port, 80)
self.assertEqual(balancers[0].ip, "1.1.1.25")
self.assertEqual(balancers[1].name, "test1")
self.assertEqual(balancers[1].id, "8156")
def test_list_balancers_ex_member_address(self):
RackspaceLBMockHttp.type = 'EX_MEMBER_ADDRESS'
balancers = self.driver.list_balancers(ex_member_address='127.0.0.1')
self.assertEqual(len(balancers), 3)
self.assertEqual(balancers[0].name, "First Loadbalancer")
self.assertEqual(balancers[0].id, "1")
self.assertEqual(balancers[1].name, "Second Loadbalancer")
self.assertEqual(balancers[1].id, "2")
self.assertEqual(balancers[2].name, "Third Loadbalancer")
self.assertEqual(balancers[2].id, "8")
def test_create_balancer(self):
balancer = self.driver.create_balancer(name='test2',
port=80,
algorithm=Algorithm.ROUND_ROBIN,
members=(
Member(
None, '10.1.0.10', 80,
extra={'condition': MemberCondition.DISABLED,
'weight': 10}),
Member(None, '10.1.0.11', 80))
)
self.assertEqual(balancer.name, 'test2')
self.assertEqual(balancer.id, '8290')
def test_ex_create_balancer(self):
RackspaceLBDriver.connectionCls.conn_classes = (None,
RackspaceLBWithVIPMockHttp)
RackspaceLBMockHttp.type = None
driver = RackspaceLBDriver('user', 'key')
balancer = driver.ex_create_balancer(name='test2',
port=80,
algorithm=Algorithm.ROUND_ROBIN,
members=(
Member(
None, '10.1.0.11', 80),),
vip='12af'
)
self.assertEqual(balancer.name, 'test2')
self.assertEqual(balancer.id, '8290')
def test_destroy_balancer(self):
balancer = self.driver.list_balancers()[0]
ret = self.driver.destroy_balancer(balancer)
self.assertTrue(ret)
def test_ex_destroy_balancers(self):
balancers = self.driver.list_balancers()
ret = self.driver.ex_destroy_balancers(balancers)
self.assertTrue(ret)
def test_get_balancer(self):
balancer = self.driver.get_balancer(balancer_id='8290')
self.assertEqual(balancer.name, 'test2')
self.assertEqual(balancer.id, '8290')
def test_get_balancer_extra_vips(self):
balancer = self.driver.get_balancer(balancer_id='18940')
self.assertEqual(balancer.extra["virtualIps"],
[{"address": "50.56.49.149",
"id": 2359,
"type": "PUBLIC",
"ipVersion": "IPV4"}])
def test_get_balancer_extra_public_source_ipv4(self):
balancer = self.driver.get_balancer(balancer_id='18940')
self.assertEqual(balancer.extra["ipv4PublicSource"], '184.106.100.25')
def test_get_balancer_extra_public_source_ipv6(self):
balancer = self.driver.get_balancer(balancer_id='18940')
self.assertEqual(balancer.extra["ipv6PublicSource"],
'2001:4801:7901::6/64')
def test_get_balancer_extra_private_source_ipv4(self):
balancer = self.driver.get_balancer(balancer_id='18940')
self.assertEqual(balancer.extra["ipv4PrivateSource"], '10.183.252.25')
def test_get_balancer_extra_members(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.extra['members']
self.assertEqual(3, len(members))
self.assertEqual('10.1.0.11', members[0].ip)
self.assertEqual('10.1.0.10', members[1].ip)
self.assertEqual('10.1.0.9', members[2].ip)
def test_get_balancer_extra_created(self):
balancer = self.driver.get_balancer(balancer_id='8290')
created_8290 = datetime.datetime(2011, 4, 7, 16, 27, 50)
self.assertEqual(created_8290, balancer.extra['created'])
def test_get_balancer_extra_updated(self):
balancer = self.driver.get_balancer(balancer_id='8290')
updated_8290 = datetime.datetime(2011, 4, 7, 16, 28, 12)
self.assertEqual(updated_8290, balancer.extra['updated'])
def test_get_balancer_extra_access_list(self):
balancer = self.driver.get_balancer(balancer_id='94698')
access_list = balancer.extra['accessList']
self.assertEqual(3, len(access_list))
self.assertEqual(2883, access_list[0].id)
self.assertEqual("0.0.0.0/0", access_list[0].address)
self.assertEqual(RackspaceAccessRuleType.DENY,
access_list[0].rule_type)
self.assertEqual(2884, access_list[1].id)
self.assertEqual("2001:4801:7901::6/64",
access_list[1].address)
self.assertEqual(RackspaceAccessRuleType.ALLOW,
access_list[1].rule_type)
self.assertEqual(3006, access_list[2].id)
self.assertEqual("8.8.8.8/0", access_list[2].address)
self.assertEqual(RackspaceAccessRuleType.DENY,
access_list[2].rule_type)
def test_get_balancer_algorithm(self):
balancer = self.driver.get_balancer(balancer_id='8290')
self.assertEqual(balancer.extra["algorithm"], Algorithm.RANDOM)
def test_get_balancer_protocol(self):
balancer = self.driver.get_balancer(balancer_id='94695')
self.assertEqual(balancer.extra['protocol'], 'HTTP')
def test_get_balancer_weighted_round_robin_algorithm(self):
balancer = self.driver.get_balancer(balancer_id='94692')
self.assertEqual(balancer.extra["algorithm"],
Algorithm.WEIGHTED_ROUND_ROBIN)
def test_get_balancer_weighted_least_connections_algorithm(self):
balancer = self.driver.get_balancer(balancer_id='94693')
self.assertEqual(balancer.extra["algorithm"],
Algorithm.WEIGHTED_LEAST_CONNECTIONS)
def test_get_balancer_unknown_algorithm(self):
balancer = self.driver.get_balancer(balancer_id='94694')
self.assertFalse('algorithm' in balancer.extra)
def test_get_balancer_connect_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='94695')
balancer_health_monitor = balancer.extra["healthMonitor"]
self.assertEqual(balancer_health_monitor.type, "CONNECT")
self.assertEqual(balancer_health_monitor.delay, 10)
self.assertEqual(balancer_health_monitor.timeout, 5)
self.assertEqual(balancer_health_monitor.attempts_before_deactivation,
2)
def test_get_balancer_http_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='94696')
balancer_health_monitor = balancer.extra["healthMonitor"]
self.assertEqual(balancer_health_monitor.type, "HTTP")
self.assertEqual(balancer_health_monitor.delay, 10)
self.assertEqual(balancer_health_monitor.timeout, 5)
self.assertEqual(balancer_health_monitor.attempts_before_deactivation,
2)
self.assertEqual(balancer_health_monitor.path, "/")
self.assertEqual(balancer_health_monitor.status_regex,
"^[234][0-9][0-9]$")
self.assertEqual(balancer_health_monitor.body_regex,
"Hello World!")
def test_get_balancer_https_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='94697')
balancer_health_monitor = balancer.extra["healthMonitor"]
self.assertEqual(balancer_health_monitor.type, "HTTPS")
self.assertEqual(balancer_health_monitor.delay, 15)
self.assertEqual(balancer_health_monitor.timeout, 12)
self.assertEqual(balancer_health_monitor.attempts_before_deactivation,
5)
self.assertEqual(balancer_health_monitor.path, "/test")
self.assertEqual(balancer_health_monitor.status_regex,
"^[234][0-9][0-9]$")
self.assertEqual(balancer_health_monitor.body_regex, "abcdef")
def test_get_balancer_connection_throttle(self):
balancer = self.driver.get_balancer(balancer_id='94695')
balancer_connection_throttle = balancer.extra["connectionThrottle"]
self.assertEqual(balancer_connection_throttle.min_connections, 50)
self.assertEqual(balancer_connection_throttle.max_connections, 200)
self.assertEqual(balancer_connection_throttle.max_connection_rate, 50)
self.assertEqual(balancer_connection_throttle.rate_interval_seconds,
10)
def test_get_session_persistence(self):
balancer = self.driver.get_balancer(balancer_id='94695')
self.assertEqual(balancer.extra["sessionPersistenceType"],
"HTTP_COOKIE")
def test_get_connection_logging(self):
balancer = self.driver.get_balancer(balancer_id='94695')
self.assertEqual(balancer.extra["connectionLoggingEnabled"], True)
def test_get_error_page(self):
balancer = self.driver.get_balancer(balancer_id='18940')
error_page = self.driver.ex_get_balancer_error_page(balancer)
self.assertTrue("The service is temporarily unavailable" in error_page)
def test_get_access_list(self):
balancer = self.driver.get_balancer(balancer_id='18940')
deny_rule, allow_rule = self.driver.ex_balancer_access_list(balancer)
self.assertEqual(deny_rule.id, 2883)
self.assertEqual(deny_rule.rule_type, RackspaceAccessRuleType.DENY)
self.assertEqual(deny_rule.address, "0.0.0.0/0")
self.assertEqual(allow_rule.id, 2884)
self.assertEqual(allow_rule.address, "2001:4801:7901::6/64")
self.assertEqual(allow_rule.rule_type, RackspaceAccessRuleType.ALLOW)
def test_ex_create_balancer_access_rule(self):
balancer = self.driver.get_balancer(balancer_id='94698')
rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY,
address='0.0.0.0/0')
rule = self.driver.ex_create_balancer_access_rule(balancer, rule)
self.assertEqual(2883, rule.id)
def test_ex_create_balancer_access_rule_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94698')
rule = RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY,
address='0.0.0.0/0')
resp = self.driver.ex_create_balancer_access_rule_no_poll(balancer,
rule)
self.assertTrue(resp)
def test_ex_create_balancer_access_rules(self):
balancer = self.driver.get_balancer(balancer_id='94699')
rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW,
address='2001:4801:7901::6/64'),
RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY,
address='8.8.8.8/0')]
rules = self.driver.ex_create_balancer_access_rules(balancer, rules)
self.assertEqual(2, len(rules))
self.assertEqual(2884, rules[0].id)
self.assertEqual(3006, rules[1].id)
def test_ex_create_balancer_access_rules_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94699')
rules = [RackspaceAccessRule(rule_type=RackspaceAccessRuleType.ALLOW,
address='2001:4801:7901::6/64'),
RackspaceAccessRule(rule_type=RackspaceAccessRuleType.DENY,
address='8.8.8.8/0')]
resp = self.driver.ex_create_balancer_access_rules_no_poll(balancer,
rules)
self.assertTrue(resp)
def test_ex_destroy_balancer_access_rule(self):
balancer = self.driver.get_balancer(balancer_id='94698')
rule = RackspaceAccessRule(id='1007',
rule_type=RackspaceAccessRuleType.ALLOW,
address="10.45.13.5/12"
)
balancer = self.driver.ex_destroy_balancer_access_rule(balancer, rule)
rule_ids = [r.id for r in balancer.extra['accessList']]
self.assertTrue(1007 not in rule_ids)
def test_ex_destroy_balancer_access_rule_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94698')
rule = RackspaceAccessRule(id=1007,
rule_type=RackspaceAccessRuleType.ALLOW,
address="10.45.13.5/12"
)
resp = self.driver.ex_destroy_balancer_access_rule_no_poll(balancer,
rule)
self.assertTrue(resp)
def test_ex_destroy_balancer_access_rules(self):
balancer = self.driver.get_balancer(balancer_id='94699')
balancer = self.driver.ex_destroy_balancer_access_rules(balancer,
balancer.extra['accessList'])
self.assertEqual('94699', balancer.id)
def test_ex_destroy_balancer_access_rules_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94699')
resp = self.driver.ex_destroy_balancer_access_rules_no_poll(balancer,
balancer.extra['accessList'])
self.assertTrue(resp)
def test_ex_update_balancer_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='94695')
monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5,
attempts_before_deactivation=2)
balancer = self.driver.ex_update_balancer_health_monitor(
balancer, monitor)
updated_monitor = balancer.extra['healthMonitor']
self.assertEqual('CONNECT', updated_monitor.type)
self.assertEqual(10, updated_monitor.delay)
self.assertEqual(5, updated_monitor.timeout)
self.assertEqual(2, updated_monitor.attempts_before_deactivation)
def test_ex_update_balancer_http_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='94696')
monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5,
attempts_before_deactivation=2,
path='/',
status_regex='^[234][0-9][0-9]$',
body_regex='Hello World!')
balancer = self.driver.ex_update_balancer_health_monitor(
balancer, monitor)
updated_monitor = balancer.extra['healthMonitor']
self.assertEqual('HTTP', updated_monitor.type)
self.assertEqual(10, updated_monitor.delay)
self.assertEqual(5, updated_monitor.timeout)
self.assertEqual(2, updated_monitor.attempts_before_deactivation)
self.assertEqual('/', updated_monitor.path)
self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex)
self.assertEqual('Hello World!', updated_monitor.body_regex)
def test_ex_update_balancer_health_monitor_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94695')
monitor = RackspaceHealthMonitor(type='CONNECT', delay=10, timeout=5,
attempts_before_deactivation=2)
resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer,
monitor)
self.assertTrue(resp)
def test_ex_update_balancer_http_health_monitor_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94696')
monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5,
attempts_before_deactivation=2,
path='/',
status_regex='^[234][0-9][0-9]$',
body_regex='Hello World!')
resp = self.driver.ex_update_balancer_health_monitor_no_poll(balancer,
monitor)
self.assertTrue(resp)
def test_ex_update_balancer_http_health_monitor_with_no_option_body_regex(self):
balancer = self.driver.get_balancer(balancer_id='94700')
monitor = RackspaceHTTPHealthMonitor(type='HTTP', delay=10, timeout=5,
attempts_before_deactivation=2,
path='/',
status_regex='^[234][0-9][0-9]$',
body_regex='')
balancer = self.driver.ex_update_balancer_health_monitor(
balancer, monitor)
updated_monitor = balancer.extra['healthMonitor']
self.assertEqual('HTTP', updated_monitor.type)
self.assertEqual(10, updated_monitor.delay)
self.assertEqual(5, updated_monitor.timeout)
self.assertEqual(2, updated_monitor.attempts_before_deactivation)
self.assertEqual('/', updated_monitor.path)
self.assertEqual('^[234][0-9][0-9]$', updated_monitor.status_regex)
self.assertEqual('', updated_monitor.body_regex)
def test_ex_disable_balancer_health_monitor(self):
balancer = self.driver.get_balancer(balancer_id='8290')
balancer = self.driver.ex_disable_balancer_health_monitor(balancer)
self.assertTrue('healthMonitor' not in balancer.extra)
def test_ex_disable_balancer_health_monitor_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
resp = self.driver.ex_disable_balancer_health_monitor_no_poll(balancer)
self.assertTrue(resp)
def test_ex_update_balancer_connection_throttle(self):
balancer = self.driver.get_balancer(balancer_id='94695')
connection_throttle = RackspaceConnectionThrottle(max_connections=200,
min_connections=50,
max_connection_rate=50,
rate_interval_seconds=10)
balancer = self.driver.ex_update_balancer_connection_throttle(balancer,
connection_throttle)
updated_throttle = balancer.extra['connectionThrottle']
self.assertEqual(200, updated_throttle.max_connections)
self.assertEqual(50, updated_throttle.min_connections)
self.assertEqual(50, updated_throttle.max_connection_rate)
self.assertEqual(10, updated_throttle.rate_interval_seconds)
def test_ex_update_balancer_connection_throttle_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94695')
connection_throttle = RackspaceConnectionThrottle(max_connections=200,
min_connections=50,
max_connection_rate=50,
rate_interval_seconds=10)
resp = self.driver.ex_update_balancer_connection_throttle_no_poll(
balancer, connection_throttle)
self.assertTrue(resp)
def test_ex_disable_balancer_connection_throttle(self):
balancer = self.driver.get_balancer(balancer_id='8290')
balancer = self.driver.ex_disable_balancer_connection_throttle(
balancer)
self.assertTrue('connectionThrottle' not in balancer.extra)
def test_ex_disable_balancer_connection_throttle_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
resp = self.driver.ex_disable_balancer_connection_throttle_no_poll(
balancer)
self.assertTrue(resp)
def test_ex_enable_balancer_connection_logging(self):
balancer = self.driver.get_balancer(balancer_id='94695')
balancer = self.driver.ex_enable_balancer_connection_logging(
balancer)
self.assertTrue(balancer.extra["connectionLoggingEnabled"])
def test_ex_enable_balancer_connection_logging_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94695')
resp = self.driver.ex_enable_balancer_connection_logging_no_poll(
balancer)
self.assertTrue(resp)
def test_ex_disable_balancer_connection_logging(self):
balancer = self.driver.get_balancer(balancer_id='8290')
balancer = self.driver.ex_disable_balancer_connection_logging(
balancer
)
self.assertFalse(balancer.extra["connectionLoggingEnabled"])
def test_ex_disable_balancer_connection_logging_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
resp = self.driver.ex_disable_balancer_connection_logging_no_poll(
balancer
)
self.assertTrue(resp)
def test_ex_enable_balancer_session_persistence(self):
balancer = self.driver.get_balancer(balancer_id='94695')
balancer = self.driver.ex_enable_balancer_session_persistence(balancer)
persistence_type = balancer.extra['sessionPersistenceType']
self.assertEqual('HTTP_COOKIE', persistence_type)
def test_ex_enable_balancer_session_persistence_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94695')
resp = self.driver.ex_enable_balancer_session_persistence_no_poll(
balancer)
self.assertTrue(resp)
def test_disable_balancer_session_persistence(self):
balancer = self.driver.get_balancer(balancer_id='8290')
balancer = self.driver.ex_disable_balancer_session_persistence(
balancer)
self.assertTrue('sessionPersistenceType' not in balancer.extra)
def test_disable_balancer_session_persistence_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
resp = self.driver.ex_disable_balancer_session_persistence_no_poll(
balancer)
self.assertTrue(resp)
def test_ex_update_balancer_error_page(self):
balancer = self.driver.get_balancer(balancer_id='8290')
content = "<html>Generic Error Page</html>"
balancer = self.driver.ex_update_balancer_error_page(
balancer, content)
error_page_content = self.driver.ex_get_balancer_error_page(balancer)
self.assertEqual(content, error_page_content)
def test_ex_update_balancer_error_page_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
content = "<html>Generic Error Page</html>"
resp = self.driver.ex_update_balancer_error_page_no_poll(
balancer, content)
self.assertTrue(resp)
def test_ex_disable_balancer_custom_error_page_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='94695')
resp = self.driver.ex_disable_balancer_custom_error_page_no_poll(
balancer)
self.assertTrue(resp)
def test_ex_disable_balancer_custom_error_page(self):
fixtures = LoadBalancerFileFixtures('rackspace')
error_page_fixture = json.loads(
fixtures.load('error_page_default.json'))
default_error_page = error_page_fixture['errorpage']['content']
balancer = self.driver.get_balancer(balancer_id='94695')
balancer = self.driver.ex_disable_balancer_custom_error_page(balancer)
error_page_content = self.driver.ex_get_balancer_error_page(balancer)
self.assertEqual(default_error_page, error_page_content)
def test_balancer_list_members(self):
expected = set(['10.1.0.10:80', '10.1.0.11:80', '10.1.0.9:8080'])
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
self.assertEqual(len(members), 3)
self.assertEqual(members[0].balancer, balancer)
self.assertEqual(expected, set(["%s:%s" % (member.ip, member.port) for
member in members]))
def test_balancer_members_extra_weight(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
self.assertEqual(12, members[0].extra['weight'])
self.assertEqual(8, members[1].extra['weight'])
def test_balancer_members_extra_condition(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
self.assertEqual(MemberCondition.ENABLED,
members[0].extra['condition'])
self.assertEqual(MemberCondition.DISABLED,
members[1].extra['condition'])
self.assertEqual(MemberCondition.DRAINING,
members[2].extra['condition'])
def test_balancer_members_extra_status(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
self.assertEqual('ONLINE', members[0].extra['status'])
self.assertEqual('OFFLINE', members[1].extra['status'])
self.assertEqual('DRAINING', members[2].extra['status'])
def test_balancer_attach_member(self):
balancer = self.driver.get_balancer(balancer_id='8290')
extra = {'condition': MemberCondition.DISABLED,
'weight': 10}
member = balancer.attach_member(Member(None, ip='10.1.0.12',
port='80', extra=extra))
self.assertEqual(member.ip, '10.1.0.12')
self.assertEqual(member.port, 80)
def test_balancer_attach_member_with_no_condition_specified(self):
balancer = self.driver.get_balancer(balancer_id='8291')
member = balancer.attach_member(Member(None, ip='10.1.0.12',
port='80'))
self.assertEqual(member.ip, '10.1.0.12')
self.assertEqual(member.port, 80)
def test_balancer_attach_members(self):
balancer = self.driver.get_balancer(balancer_id='8292')
members = [Member(None, ip='10.1.0.12', port='80'),
Member(None, ip='10.1.0.13', port='80')]
attached_members = self.driver.ex_balancer_attach_members(balancer,
members)
first_member = attached_members[0]
second_member = attached_members[1]
self.assertEqual(first_member.ip, '10.1.0.12')
self.assertEqual(first_member.port, 80)
self.assertEqual(second_member.ip, '10.1.0.13')
self.assertEqual(second_member.port, 80)
def test_balancer_detach_member(self):
balancer = self.driver.get_balancer(balancer_id='8290')
member = balancer.list_members()[0]
ret = balancer.detach_member(member)
self.assertTrue(ret)
def test_ex_detach_members(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
balancer = self.driver.ex_balancer_detach_members(balancer, members)
self.assertEqual('8290', balancer.id)
def test_ex_detach_members_no_poll(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = balancer.list_members()
ret = self.driver.ex_balancer_detach_members_no_poll(balancer, members)
self.assertTrue(ret)
def test_update_balancer_protocol(self):
balancer = LoadBalancer(id='3130', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(
balancer, protocol='HTTPS')
self.assertEqual('HTTPS', updated_balancer.extra['protocol'])
def test_update_balancer_protocol_to_imapv2(self):
balancer = LoadBalancer(id='3135', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(
balancer, protocol='imapv2')
self.assertEqual('IMAPv2', updated_balancer.extra['protocol'])
def test_update_balancer_protocol_to_imapv3(self):
balancer = LoadBalancer(id='3136', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(
balancer, protocol='IMAPV3')
self.assertEqual('IMAPv3', updated_balancer.extra['protocol'])
def test_update_balancer_protocol_to_imapv4(self):
balancer = LoadBalancer(id='3137', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(
balancer, protocol='IMAPv4')
self.assertEqual('IMAPv4', updated_balancer.extra['protocol'])
def test_update_balancer_port(self):
balancer = LoadBalancer(id='3131', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(balancer, port=1337)
self.assertEqual(1337, updated_balancer.port)
def test_update_balancer_name(self):
balancer = LoadBalancer(id='3132', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(
balancer, name='new_lb_name')
self.assertEqual('new_lb_name', updated_balancer.name)
def test_update_balancer_algorithm(self):
balancer = LoadBalancer(id='3133', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
updated_balancer = self.driver.update_balancer(balancer,
algorithm=Algorithm.ROUND_ROBIN)
self.assertEqual(
Algorithm.ROUND_ROBIN, updated_balancer.extra['algorithm'])
def test_update_balancer_bad_algorithm_exception(self):
balancer = LoadBalancer(id='3134', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
try:
self.driver.update_balancer(balancer,
algorithm='HAVE_MERCY_ON_OUR_SERVERS')
except LibcloudError:
pass
else:
self.fail(
'Should have thrown an exception with bad algorithm value')
def test_ex_update_balancer_no_poll_protocol(self):
balancer = LoadBalancer(id='3130', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
action_succeeded = self.driver.ex_update_balancer_no_poll(
balancer,
protocol='HTTPS')
self.assertTrue(action_succeeded)
def test_ex_update_balancer_no_poll_port(self):
balancer = LoadBalancer(id='3131', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
action_succeeded = self.driver.ex_update_balancer_no_poll(
balancer,
port=1337)
self.assertTrue(action_succeeded)
def test_ex_update_balancer_no_poll_name(self):
balancer = LoadBalancer(id='3132', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
action_succeeded = self.driver.ex_update_balancer_no_poll(
balancer,
name='new_lb_name')
self.assertTrue(action_succeeded)
def test_ex_update_balancer_no_poll_algorithm(self):
balancer = LoadBalancer(id='3133', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
action_succeeded = self.driver.ex_update_balancer_no_poll(balancer,
algorithm=Algorithm.ROUND_ROBIN)
self.assertTrue(action_succeeded)
def test_ex_update_balancer_no_poll_bad_algorithm_exception(self):
balancer = LoadBalancer(id='3134', name='LB_update',
state='PENDING_UPDATE', ip='10.34.4.3',
port=80, driver=self.driver)
try:
self.driver.update_balancer(balancer,
algorithm='HAVE_MERCY_ON_OUR_SERVERS')
except LibcloudError:
pass
else:
self.fail('Should have thrown exception with bad algorithm value')
def test_ex_update_balancer_member_extra_attributes(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = self.driver.balancer_list_members(balancer)
first_member = members[0]
member = self.driver.ex_balancer_update_member(balancer, first_member,
condition=MemberCondition.ENABLED, weight=12)
self.assertEqual(MemberCondition.ENABLED, member.extra['condition'])
self.assertEqual(12, member.extra['weight'])
def test_ex_update_balancer_member_no_poll_extra_attributes(self):
balancer = self.driver.get_balancer(balancer_id='8290')
members = self.driver.balancer_list_members(balancer)
first_member = members[0]
resp = self.driver.ex_balancer_update_member_no_poll(
balancer, first_member,
condition=MemberCondition.ENABLED, weight=12)
self.assertTrue(resp)
def test_ex_list_current_usage(self):
balancer = self.driver.get_balancer(balancer_id='8290')
usage = self.driver.ex_list_current_usage(balancer=balancer)
self.assertEqual(
usage['loadBalancerUsageRecords'][0]['incomingTransferSsl'],
6182163)
class RackspaceUKLBTests(RackspaceLBTests):
def setUp(self):
RackspaceLBDriver.connectionCls.conn_classes = (None,
RackspaceLBMockHttp)
RackspaceLBMockHttp.type = None
self.driver = RackspaceUKLBDriver('user', 'key')
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
class RackspaceLBMockHttp(MockHttpTestCase):
fixtures = LoadBalancerFileFixtures('rackspace')
auth_fixtures = OpenStackFixtures()
def _v2_0_tokens(self, method, url, body, headers):
body = self.fixtures.load('_v2_0__auth.json')
return (httplib.OK, body, headers,
httplib.responses[httplib.OK])
def _v1_0_11111_loadbalancers_protocols(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_loadbalancers_protocols.json')
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
def _v1_0_11111_loadbalancers_algorithms(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers_algorithms.json')
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == "POST":
json_body = json.loads(body)
loadbalancer_json = json_body['loadBalancer']
member_1_json, member_2_json = loadbalancer_json['nodes']
self.assertEqual(loadbalancer_json['protocol'], 'HTTP')
self.assertEqual(loadbalancer_json['algorithm'], 'ROUND_ROBIN')
self.assertEqual(loadbalancer_json['virtualIps'][0]['type'],
'PUBLIC')
self.assertEqual(member_1_json['condition'], 'DISABLED')
self.assertEqual(member_1_json['weight'], 10)
self.assertEqual(member_2_json['condition'], 'ENABLED')
body = self.fixtures.load('v1_slug_loadbalancers_post.json')
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
elif method == 'DELETE':
balancers = self.fixtures.load('v1_slug_loadbalancers.json')
balancers_json = json.loads(balancers)
for balancer in balancers_json['loadBalancers']:
id = balancer['id']
self.assertTrue(urlencode([('id', id)]) in url,
msg='Did not delete balancer with id %d' % id)
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_EX_MEMBER_ADDRESS(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_loadbalancers_nodeaddress.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_0_11111_loadbalancers_8155(self, method, url, body, headers):
if method == "DELETE":
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers_8290.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_nodes(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == "POST":
json_body = json.loads(body)
json_node = json_body['nodes'][0]
self.assertEqual('DISABLED', json_node['condition'])
self.assertEqual(10, json_node['weight'])
response_body = self.fixtures.load(
'v1_slug_loadbalancers_8290_nodes_post.json')
return (httplib.ACCEPTED, response_body, {},
httplib.responses[httplib.ACCEPTED])
elif method == "DELETE":
nodes = self.fixtures.load('v1_slug_loadbalancers_8290_nodes.json')
json_nodes = json.loads(nodes)
for node in json_nodes['nodes']:
id = node['id']
self.assertTrue(urlencode([('id', id)]) in url,
msg='Did not delete member with id %d' % id)
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8291(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers_8291.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8291_nodes(self, method, url, body, headers):
if method == "POST":
json_body = json.loads(body)
json_node = json_body['nodes'][0]
self.assertEqual('ENABLED', json_node['condition'])
response_body = self.fixtures.load(
'v1_slug_loadbalancers_8290_nodes_post.json')
return (httplib.ACCEPTED, response_body, {},
httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8292(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers_8292.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8292_nodes(self, method, url, body, headers):
if method == "POST":
json_body = json.loads(body)
json_node_1 = json_body['nodes'][0]
json_node_2 = json_body['nodes'][1]
self.assertEqual('10.1.0.12', json_node_1['address'])
self.assertEqual('10.1.0.13', json_node_2['address'])
response_body = self.fixtures.load(
'v1_slug_loadbalancers_8292_nodes_post.json')
return (httplib.ACCEPTED, response_body, {},
httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_nodes_30944(self, method, url, body, headers):
if method == "PUT":
json_body = json.loads(body)
self.assertEqual('ENABLED', json_body['condition'])
self.assertEqual(12, json_body['weight'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
elif method == "DELETE":
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_healthmonitor(self, method, url, body, headers):
if method == "DELETE":
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_connectionthrottle(self, method, url, body, headers):
if method == 'DELETE':
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_connectionlogging(self, method, url, body, headers):
# Connection Logging uses a PUT to disable connection logging
if method == 'PUT':
json_body = json.loads(body)
self.assertFalse(json_body["connectionLogging"]["enabled"])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_sessionpersistence(self, method, url, body, headers):
if method == 'DELETE':
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_errorpage(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'v1_slug_loadbalancers_8290_errorpage.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'PUT':
json_body = json.loads(body)
self.assertEqual('<html>Generic Error Page</html>',
json_body['errorpage']['content'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_18940(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_18940_ex_public_ips.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_18945(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_18945_ex_public_ips.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_18940_errorpage(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_18940_errorpage.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_18940_accesslist(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'v1_slug_loadbalancers_18940_accesslist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_18941(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_18941_ex_private_ips.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94692(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94692_weighted_round_robin.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94693(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94693_weighted_least_connections.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94694(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94694_unknown_algorithm.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94695_full_details.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695_healthmonitor(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
self.assertEqual('CONNECT', json_body['type'])
self.assertEqual(10, json_body['delay'])
self.assertEqual(5, json_body['timeout'])
self.assertEqual(2, json_body['attemptsBeforeDeactivation'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695_connectionthrottle(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
self.assertEqual(50, json_body['minConnections'])
self.assertEqual(200, json_body['maxConnections'])
self.assertEqual(50, json_body['maxConnectionRate'])
self.assertEqual(10, json_body['rateInterval'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695_connectionlogging(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
self.assertTrue(json_body["connectionLogging"]["enabled"])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695_sessionpersistence(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
persistence_type = json_body[
'sessionPersistence']['persistenceType']
self.assertEqual('HTTP_COOKIE', persistence_type)
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94695_errorpage(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load("error_page_default.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'DELETE':
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94696(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94696_http_health_monitor.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94696_healthmonitor(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
self.assertEqual('HTTP', json_body['type'])
self.assertEqual(10, json_body['delay'])
self.assertEqual(5, json_body['timeout'])
self.assertEqual(2, json_body['attemptsBeforeDeactivation'])
self.assertEqual('/', json_body['path'])
self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex'])
self.assertEqual('Hello World!', json_body['bodyRegex'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94697(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94697_https_health_monitor.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94698(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94698_with_access_list.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94698_accesslist(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'v1_slug_loadbalancers_94698_accesslist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'POST':
json_body = json.loads(body)
self.assertEqual('0.0.0.0/0', json_body['networkItem']['address'])
self.assertEqual('DENY', json_body['networkItem']['type'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94699(self, method, url, body, headers):
if method == 'GET':
# Use the same fixture for batch deletes as for single deletes
body = self.fixtures.load(
'v1_slug_loadbalancers_94698_with_access_list.json')
json_body = json.loads(body)
json_body['loadBalancer']['id'] = 94699
updated_body = json.dumps(json_body)
return (httplib.OK, updated_body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94699_accesslist(self, method, url, body, headers):
if method == 'DELETE':
fixture = 'v1_slug_loadbalancers_94698_with_access_list.json'
fixture_json = json.loads(self.fixtures.load(fixture))
access_list_json = fixture_json['loadBalancer']['accessList']
for access_rule in access_list_json:
id = access_rule['id']
self.assertTrue(urlencode([('id', id)]) in url,
msg='Did not delete access rule with id %d' % id)
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
elif method == 'POST':
json_body = json.loads(body)
access_list = json_body['accessList']
self.assertEqual('ALLOW', access_list[0]['type'])
self.assertEqual('2001:4801:7901::6/64', access_list[0]['address'])
self.assertEqual('DENY', access_list[1]['type'])
self.assertEqual('8.8.8.8/0', access_list[1]['address'])
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94698_accesslist_1007(self, method, url, body, headers):
if method == 'DELETE':
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94700(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
"v1_slug_loadbalancers_94700_http_health_monitor_no_body_regex.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_94700_healthmonitor(self, method, url, body, headers):
if method == 'PUT':
json_body = json.loads(body)
self.assertEqual('HTTP', json_body['type'])
self.assertEqual(10, json_body['delay'])
self.assertEqual(5, json_body['timeout'])
self.assertEqual(2, json_body['attemptsBeforeDeactivation'])
self.assertEqual('/', json_body['path'])
self.assertEqual('^[234][0-9][0-9]$', json_body['statusRegex'])
self.assertFalse('bodyRegex' in json_body)
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3130(self, method, url, body, headers):
""" update_balancer(b, protocol='HTTPS'), then get_balancer('3130') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'protocol': 'HTTPS'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3130
response_body['loadBalancer']['protocol'] = 'HTTPS'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3131(self, method, url, body, headers):
""" update_balancer(b, port=443), then get_balancer('3131') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'port': 1337})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3131
response_body['loadBalancer']['port'] = 1337
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3132(self, method, url, body, headers):
""" update_balancer(b, name='new_lb_name'), then get_balancer('3132') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'name': 'new_lb_name'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3132
response_body['loadBalancer']['name'] = 'new_lb_name'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3133(self, method, url, body, headers):
""" update_balancer(b, algorithm='ROUND_ROBIN'), then get_balancer('3133') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'algorithm': 'ROUND_ROBIN'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3133
response_body['loadBalancer']['algorithm'] = 'ROUND_ROBIN'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3134(self, method, url, body, headers):
""" update.balancer(b, algorithm='HAVE_MERCY_ON_OUR_SERVERS') """
if method == "PUT":
return (httplib.BAD_REQUEST, "", {}, httplib.responses[httplib.BAD_REQUEST])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3135(self, method, url, body, headers):
""" update_balancer(b, protocol='IMAPv3'), then get_balancer('3135') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'protocol': 'IMAPv2'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3135
response_body['loadBalancer']['protocol'] = 'IMAPv2'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3136(self, method, url, body, headers):
""" update_balancer(b, protocol='IMAPv3'), then get_balancer('3136') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'protocol': 'IMAPv3'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3136
response_body['loadBalancer']['protocol'] = 'IMAPv3'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_3137(self, method, url, body, headers):
""" update_balancer(b, protocol='IMAPv3'), then get_balancer('3137') """
if method == "PUT":
json_body = json.loads(body)
self.assertDictEqual(json_body, {'protocol': 'IMAPv4'})
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
elif method == "GET":
response_body = json.loads(
self.fixtures.load("v1_slug_loadbalancers_3xxx.json"))
response_body['loadBalancer']['id'] = 3137
response_body['loadBalancer']['protocol'] = 'IMAPv4'
return (httplib.OK, json.dumps(response_body), {}, httplib.responses[httplib.OK])
raise NotImplementedError
def _v1_0_11111_loadbalancers_8290_usage_current(self, method, url, body,
headers):
if method == 'GET':
body = self.fixtures.load(
'v1_0_slug_loadbalancers_8290_usage_current.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
raise NotImplementedError
class RackspaceLBWithVIPMockHttp(MockHttpTestCase):
fixtures = LoadBalancerFileFixtures('rackspace')
auth_fixtures = OpenStackFixtures()
def _v2_0_tokens(self, method, url, body, headers):
body = self.fixtures.load('_v2_0__auth.json')
return (httplib.OK, body, headers,
httplib.responses[httplib.OK])
def _v1_0_11111_loadbalancers(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('v1_slug_loadbalancers.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == "POST":
json_body = json.loads(body)
loadbalancer_json = json_body['loadBalancer']
self.assertEqual(loadbalancer_json['virtualIps'][0]['id'], '12af')
body = self.fixtures.load('v1_slug_loadbalancers_post.json')
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
raise NotImplementedError
if __name__ == "__main__":
sys.exit(unittest.main())
|
|
"""
My standard utilities. Intended to be included in all projects
Obviously everything included here needs to be in the standard library (or numpy)
"""
import contextlib
import fractions
import operator
import os
import re
import shutil
import string
import sys
import tempfile
import threading
from cStringIO import StringIO
from uuid import uuid4
import webcolors
def make_none(): return None
@contextlib.contextmanager
def recursionlimit_atleast(n=1000):
"""Context manager for temporarily raising the context manager's
the interpreter's maximum call stack size (misleading called the ``recursion limit``)
Notes:
This will explicitly reset the the recursion limit when we exit the context;
any intermediate recursion limit changes will be lost
This will not lower the limit ``n`` is less than the current recursion limit.
"""
current_limit = sys.getrecursionlimit()
if n >= current_limit:
sys.setrecursionlimit(n)
yield
sys.setrecursionlimit(current_limit)
def if_not_none(item, default):
""" Equivalent to `item if item is not None else default` """
if item is None:
return default
else:
return item
def printflush(s, newline=True):
if newline:
print s
else:
print s,
sys.stdout.flush()
class methodcaller:
"""The pickleable implementation of the standard library operator.methodcaller.
This was copied without modification from:
https://github.com/python/cpython/blob/065990fa5bd30fb3ca61b90adebc7d8cb3f16b5a/Lib/operator.py
The c-extension version is not pickleable, so we keep a copy of the pure-python standard library
code here. See https://bugs.python.org/issue22955
Original documentation:
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
__slots__ = ('_name', '_args', '_kwargs')
def __init__(*args, **kwargs):
if len(args) < 2:
msg = "methodcaller needs at least one argument, the method name"
raise TypeError(msg)
self = args[0]
self._name = args[1]
if not isinstance(self._name, str):
raise TypeError('method name must be a string')
self._args = args[2:]
self._kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self._name)(*self._args, **self._kwargs)
def __repr__(self):
args = [repr(self._name)]
args.extend(map(repr, self._args))
args.extend('%s=%r' % (k, v) for k, v in self._kwargs.items())
return '%s.%s(%s)' % (self.__class__.__module__,
self.__class__.__name__,
', '.join(args))
def __reduce__(self):
if not self._kwargs:
return self.__class__, (self._name,) + self._args
else:
from functools import partial
return partial(self.__class__, self._name, **self._kwargs), self._args
class textnotify(object):
""" Print a single, immediately flushed line to log the execution of a block.
Prints 'done' at the end of the line (or 'ERROR' if an uncaught exception)
Examples:
>>> import time
>>> with textnotify('starting to sleep'):
>>> time.sleep(3)
starting to sleep...done
>>> with textnotify('raising an exception...'):
>>> raise ValueError()
raising an exception...error
ValueError [...]
"""
def __init__(self, startmsg):
if startmsg.strip()[-3:] != '...':
startmsg = startmsg.strip() + '...'
self.startmsg = startmsg
def __enter__(self):
printflush(self.startmsg, newline=False)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
printflush('done')
else:
printflush('ERROR')
class progressbar(object):
""" Create a progress bar for a calculation
The context manager provides a callback which needs to be called as
set_progress(percent), where percent is a number between 0 and 100
Examples:
>>> import time
>>> with progressbar('count to 100') as set_progress:
>>> for i in xrange(100):
>>> time.sleep(0.5)
>>> set_progress(i+1)
"""
def __init__(self, description):
import ipywidgets as ipy
import traitlets
try:
self.progress_bar = ipy.FloatProgress(0, min=0, max=100, description=description)
except traitlets.TraitError:
self.progress_bar = None
def __enter__(self):
from IPython.display import display
if self.progress_bar is not None:
display(self.progress_bar)
return self.set_progress
def set_progress(self, percent):
if self.progress_bar is not None:
self.progress_bar.value = percent
def __exit__(self, exc_type, exc_val, exc_tb):
if self.progress_bar is not None:
self.value = 100.0
if exc_type is not None:
self.progress_bar.bar_style = 'danger'
else:
self.progress_bar.bar_style = 'success'
class PipedFile(object):
"""
Allows us to pass data by filesystem path without ever writing it to disk
To prevent deadlock, we spawn a thread to write to the pipe
Call it as a context manager:
>>> with PipedFile('file contents',filename='contents.txt') as pipepath:
>>> print open(pipepath,'r').read()
"""
def __init__(self, fileobj, filename='pipe'):
if type(fileobj) in (unicode,str):
self.fileobj = StringIO(fileobj)
else:
self.fileobj = fileobj
self.tempdir = None
assert '/' not in filename,"Filename must not include directory"
self.filename = filename
def __enter__(self):
self.tempdir = tempfile.mkdtemp()
self.pipe_path = os.path.join(self.tempdir, self.filename)
os.mkfifo(self.pipe_path)
self.pipe_thread = threading.Thread(target=self._write_to_pipe)
self.pipe_thread.start()
return self.pipe_path
def _write_to_pipe(self):
with open(self.pipe_path,'w') as pipe:
pipe.write(self.fileobj.read())
def __exit__(self, type, value, traceback):
if self.tempdir is not None:
shutil.rmtree(self.tempdir)
def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in xrange(2, len(dirs)):
found_dirs.add('/'.join(dirs[:i]))
paths = [path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1] != '/']
return paths
def make_local_temp_dir():
tempdir = '/tmp/%s' % uuid4()
os.mkdir(tempdir)
return tempdir
class BaseTable(object):
def __init__(self, categories, fileobj=None):
self.categories = categories
self.lines = []
self.fileobj = fileobj
def add_line(self, obj):
if hasattr(obj, 'keys'):
newline = [obj.get(cat, '') for cat in self.categories]
else:
assert len(obj) == len(self.categories)
newline = obj
self.lines.append(newline)
self.writeline(newline)
def writeline(self, newline):
raise NotImplementedError()
def getstring(self):
raise NotImplementedError()
class PrintTable(BaseTable):
def __init__(self, formatstr, fileobj=sys.stdout):
self.format = formatstr
categories = []
self._wrote_header = False
for field in string.Formatter().parse(formatstr):
key = field.split('.')[0]
categories.append(key)
super(PrintTable, self).__init__(categories, fileobj=fileobj)
def writeline(self, line):
if not self._wrote_header:
print >> self._fileobj, self.format.format(self.categories)
self._wrote_header = True
if self.fileobj is None: return
print >> self.fileobj, self.formatstr.format(**line)
def getstring(self):
s = StringIO()
for line in self.lines:
print >> s, self.format.format(line)
return s.getvalue()
class MarkdownTable(BaseTable):
def __init__(self, *categories):
super(MarkdownTable, self).__init__(categories)
def markdown(self, replace=None):
if replace is None: replace = {}
outlines = ['| ' + ' | '.join(self.categories) + ' |',
'|-' + ''.join('|-' for x in self.categories) + '|']
for line in self.lines:
nextline = [str(replace.get(val, val)) for val in line]
outlines.append('| ' + ' | '.join(nextline) + ' |')
return '\n'.join(outlines)
def writeline(self, newline):
pass
def getstring(self):
return self.markdown()
def binomial_coefficient(n, k):
# credit to http://stackoverflow.com/users/226086/nas-banov
return int(reduce(operator.mul,
(fractions.Fraction(n - i, i + 1) for i in range(k)), 1))
def pairwise_displacements(a):
"""
:type a: numpy.array
from http://stackoverflow.com/questions/22390418/pairwise-displacement-vectors-among-set-of-points
"""
import numpy as np
n = a.shape[0]
d = a.shape[1]
c = binomial_coefficient(n, 2)
out = np.zeros((c, d))
l = 0
r = l + n - 1
for sl in range(1, n): # no point1 - point1!
out[l:r] = a[:n - sl] - a[sl:]
l = r
r += n - (sl + 1)
return out
def is_printable(s):
import string
for c in s:
if c not in string.printable:
return False
else:
return True
class _RedirectStream(object):
"""From python3.4 stdlib
"""
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""From python3.4 stdlib"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""From python3.4 stdlib"""
_stream = "stderr"
GETFLOAT = re.compile(r'-?\d+(\.\d+)?(e[-+]?\d+)') # matches numbers, e.g. 1, -2.0, 3.5e50, 0.001e-10
def is_color(s):
""" Do our best to determine if "s" is a color spec that can be converted to hex
:param s:
:return:
"""
def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)
try:
if type(s) == int:
return in_range(s)
elif type(s) not in (str, unicode):
return False
elif s in webcolors.css3_names_to_hex:
return True
elif s[0] == '#':
return in_range(int('0x' + s[1:], 0))
elif s[0:2] == '0x':
return in_range(int(s, 0))
elif len(s) == 6:
return in_range(int('0x' + s, 0))
except ValueError:
return False
def from_filepath(func, filelike):
"""Run func on a temporary *path* assigned to filelike"""
if type(filelike) == str:
return func(filelike)
else:
with tempfile.NamedTemporaryFile() as outfile:
outfile.write(filelike.read())
outfile.flush()
result = func(outfile.name)
return result
def running_in_notebook():
""" Attempts to discover if this python interpreter is connected to a notebook or not
Returns:
bool: True if there is a connection to a notebook kernel
References:
Copied from
http://stackoverflow.com/a/34092072/1958900
"""
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), 'kernel', None) is not None
|
|
from future.standard_library import hooks
from lxml import etree, html
import binascii
import collections
import hmac
import json
import random
import time
from hashlib import sha1
from builtins import str
from builtins import range
try:
from http.cookiejar import CookieJar
except:
from future.backports.http.cookiejar import CookieJar
with hooks():
import urllib.request, urllib.parse, urllib.error
from urllib.parse import quote, unquote, urlencode, quote_plus as _quote_plus
from urllib.error import HTTPError, URLError
ua_skybot = 'Skybot/1.0 https://github.com/rmmh/skybot'
ua_firefox = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) ' \
'Gecko/20070725 Firefox/2.0.0.6'
ua_internetexplorer = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
def get_cookie_jar():
if not hasattr(get_cookie_jar, 'memo'):
get_cookie_jar.memo = CookieJar()
return get_cookie_jar.memo
def clear_expired_cookies():
get_cookie_jar().clear_expired_cookies()
def get(*args, **kwargs):
return open(*args, **kwargs).read().decode('utf-8')
def get_html(*args, **kwargs):
return html.fromstring(open(*args, **kwargs).read())
def get_xml(*args, **kwargs):
return etree.fromstring(open(*args, **kwargs).read())
def get_json(*args, **kwargs):
return json.loads(open(*args, **kwargs).read())
def open(url, query_params=None, post_data=None,
get_method=None, cookies=False, oauth=False, oauth_keys=None, headers=None, **kwargs):
if query_params is None:
query_params = {}
query_params.update(kwargs)
url = prepare_url(url, query_params)
if post_data and isinstance(post_data, collections.Mapping):
post_data = urllib.parse.urlencode(post_data)
post_data = post_data.encode('UTF-8')
request = urllib.request.Request(url, post_data)
if get_method is not None:
request.get_method = lambda: get_method
if headers is not None:
for header_key, header_value in headers.items():
request.add_header(header_key, header_value)
if 'User-Agent' not in request.headers:
request.add_header('User-Agent', ua_skybot)
if oauth:
nonce = oauth_nonce()
timestamp = oauth_timestamp()
api_url, req_data = url.split("?")
unsigned_request = oauth_unsigned_request(
nonce, timestamp, req_data, oauth_keys['consumer'], oauth_keys['access'])
signature = oauth_sign_request("GET", api_url, req_data, unsigned_request, oauth_keys[
'consumer_secret'], oauth_keys['access_secret'])
header = oauth_build_header(
nonce, signature, timestamp, oauth_keys['consumer'], oauth_keys['access'])
request.add_header('Authorization', header)
if cookies:
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(get_cookie_jar()))
else:
opener = urllib.request.build_opener()
return opener.open(request)
def prepare_url(url, queries):
if queries:
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(str(url))
query = dict(urllib.parse.parse_qsl(query))
query.update(queries)
query = urllib.parse.urlencode(dict((to_utf8(key), to_utf8(value))
for key, value in query.items()))
url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
return url
def to_utf8(s):
if isinstance(s, str):
return s.encode('utf8', 'ignore')
else:
return str(s)
def quote_plus(s):
return _quote_plus(to_utf8(s))
def oauth_nonce():
return ''.join([str(random.randint(0, 9)) for i in range(8)])
def oauth_timestamp():
return str(int(time.time()))
def oauth_unsigned_request(nonce, timestamp, req, consumer, token):
d = {
'oauth_consumer_key': consumer,
'oauth_nonce': nonce,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': timestamp,
'oauth_token': token,
'oauth_version': '1.0'
}
d.update(urllib.parse.parse_qsl(req))
request_items = d.items()
# TODO: Remove this when Python 2 is no longer supported.
# some of the fields are actual string and others are
# a wrapper of str for the python 3 migration.
# Convert them all so that they sort correctly.
request_items = [(str(k), str(v)) for k, v in request_items]
return quote(urllib.parse.urlencode(sorted(request_items, key=lambda key: key[0])))
def oauth_build_header(nonce, signature, timestamp, consumer, token):
d = {
'oauth_consumer_key': consumer,
'oauth_nonce': nonce,
'oauth_signature': signature,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': timestamp,
'oauth_token': token,
'oauth_version': '1.0'
}
header = 'OAuth '
for x in sorted(d, key=lambda key: key[0]):
header += x + '="' + d[x] + '", '
return header[:-1]
def oauth_sign_request(method, url, params, unsigned_request, consumer_secret, token_secret):
key = consumer_secret + "&" + token_secret
key = key.encode('utf-8', 'replace')
base = method + "&" + quote(url, '') + "&" + unsigned_request
base = base.encode('utf-8', 'replace')
hash = hmac.new(key, base, sha1)
signature = quote(binascii.b2a_base64(hash.digest())[:-1])
return signature
def unescape(s):
if not s.strip():
return s
return html.fromstring(s).text_content()
|
|
from pybench import Test
class SimpleListManipulation(Test):
version = 2.0
operations = 5* (6 + 6 + 6)
rounds = 130000
def test(self):
l = []
append = l.append
for i in xrange(self.rounds):
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
x = l[0]
x = l[1]
x = l[2]
x = l[3]
x = l[4]
x = l[5]
if len(l) > 10000:
# cut down the size
del l[:]
def calibrate(self):
l = []
append = l.append
for i in xrange(self.rounds):
pass
class ListSlicing(Test):
version = 2.0
operations = 25*(3+1+2+1)
rounds = 800
def test(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
l = n[:]
for j in r:
m = l[50:]
m = l[:25]
m = l[50:55]
l[:3] = n
m = l[:-1]
m = l[1:]
l[-1:] = n
def calibrate(self):
n = range(100)
r = range(25)
for i in xrange(self.rounds):
for j in r:
pass
class SmallLists(Test):
version = 2.0
operations = 5*(1+ 6 + 6 + 3 + 1)
rounds = 80000
def test(self):
for i in xrange(self.rounds):
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
l = []
append = l.append
append(2)
append(3)
append(4)
append(2)
append(3)
append(4)
l[0] = 3
l[1] = 4
l[2] = 5
l[3] = 3
l[4] = 4
l[5] = 5
l[:3] = [1,2,3]
m = l[:-1]
m = l[1:]
l[-1:] = [4,5,6]
def calibrate(self):
for i in xrange(self.rounds):
pass
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import mimetypes
import os
import random
import time
from cStringIO import StringIO
from email import charset as Charset
from email import encoders as Encoders
from email import generator, message_from_string
from email.header import Header
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, parseaddr
from speaklater import is_lazy_string
from werkzeug.urls import url_parse
from indico.util.string import to_unicode
def force_text(val):
if is_lazy_string(val):
val = val.value
return to_unicode(val)
# The following code is taken almost verbatim from `django.core.mail`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/tree/stable/1.11.x/django/core/mail/
# Credits of the original code go to the Django Software Foundation
# and their contributors.
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Copied from Python 3.2+ standard library, with the following modification:
# * Uses hostname from indico's BASE_URL as the default domain
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<142480216486.20800.16526388040877946887@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
from indico.core.config import config
timeval = int(time.time() * 100)
pid = os.getpid()
randint = random.getrandbits(64)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
domain = url_parse(config.BASE_URL).host
return '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, domain)
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def split_addr(addr, encoding):
"""
Split the address into local part and domain, properly encoded.
When non-ascii characters are present in the local part, it must be
MIME-word encoded. The domain name must be idna-encoded if it contains
non-ascii characters.
"""
if '@' in addr:
localpart, domain = addr.split('@', 1)
# Try to get the simplest encoding - ascii if possible so that
# [email protected] doesn't become [email protected]. This
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return localpart, domain
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# On Python 2, use the stdlib since `email.headerregistry` doesn't exist.
if localpart and domain:
addr = '@'.join([localpart, domain])
return formataddr((nm, addr))
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
as_bytes = as_string
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8':
has_long_lines = any(
len(l.encode('utf-8')) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = 'utf-8'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, basestring):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, basestring):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, basestring):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, basestring):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email
self.subject = subject
self.body = body
self.attachments = []
if attachments:
for attachment in attachments:
if isinstance(attachment, MIMEBase):
self.attach(attachment)
else:
self.attach(*attachment)
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from indico.util.emails.backend import EmailBackend
if not self.connection:
return EmailBackend(fail_silently=fail_silently)
return self.connection
def message(self):
msg = SafeMIMEText(self.body, self.content_subtype, self.encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, it will be decoded as UTF-8. If that fails,
the mimetype will be set to DEFAULT_ATTACHMENT_MIME_TYPE and the
content is not decoded.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, str):
try:
content = content.decode('utf-8')
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's actually
# binary, read() will raise a UnicodeDecodeError on Python 3.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attaches a file from the filesystem.
The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is
not specified and cannot be guessed.
For a text/* mimetype (guessed or specified), the file's content
will be decoded as UTF-8. If that fails, the mimetype will be set to
DEFAULT_ATTACHMENT_MIME_TYPE and the content is not decoded.
"""
filename = os.path.basename(path)
with open(path, 'rb') as file:
content = file.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=self.encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
attachment = SafeMIMEText(content, subtype, self.encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=self.encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
|
"""Script used to test all API views"""
import os
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse_lazy
import PIL
from rest_framework import status
from photo_editor.image_utils import ImageProcessor
from photo_editor.models import Image
from photo_editor.tests import factories
from .http_header import ApiHeaderAuthorization
class FolderViewTestSuite(ApiHeaderAuthorization):
"""Tests that folder can be created and deleted"""
def test_user_view_folder(self):
url = reverse_lazy('folder-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_can_create_folder(self):
url = reverse_lazy('folder-list')
data = {'name': 'Chilling'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_rename_folder(self):
url = reverse_lazy('folder-detail',
kwargs={'pk': self.image.folder.pk})
data = {'name': 'Unknown'}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_can_delete_folder(self):
url = reverse_lazy('folder-detail',
kwargs={'pk': self.image.folder.pk})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class ImageProcessorToolViewTestSuite(ApiHeaderAuthorization):
"""Tests the ImageProcessor View"""
def setUp(self):
self.effect_tool = factories.ImageProcessorToolFactory()
self.filter_tool = factories.ImageProcessorToolFactory(
name='color',
processor_type='filter'
)
super(ImageProcessorToolViewTestSuite, self).setUp()
def test_view_image_processors(self):
url = reverse_lazy('image_processor_tools')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['effectTools'][0].get('name'), 'gray scale')
self.assertEqual(response.data['filterTools'][0].get('name'), 'color')
class ImageCreateDeleteViewTestSuite(ApiHeaderAuthorization):
"""Tests the UploadImage view"""
def setUp(self):
self.image_to_upload = SimpleUploadedFile(
name='image_to_upload.png',
content=open(factories.IMAGE_PATH, 'rb').read(),
content_type='image/png'
)
super(ImageCreateDeleteViewTestSuite, self).setUp()
def test_user_can_upload_image(self):
url = reverse_lazy('create-image')
data = {
'image': self.image_to_upload,
'folder_id': self.image.folder.id,
'name': 'image_to_upload.png'
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn("https://res.cloudinary.com/andela-troupon/image/upload",
response.content)
def test_a_folder_cannot_have_image_of_duplicate_names(self):
url = reverse_lazy('create-image')
data = {
'image': self.image_to_upload,
'folder_id': self.image.folder.id,
'name': 'test.png'
}
response = self.client.post(url, data)
self.assertEqual(
response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR
)
self.assertIn('msg', response.data.keys())
def test_user_can_rename_image(self):
url = reverse_lazy('image-detail', kwargs={'image_id': self.image.id})
data = {'name': 'retest'}
response = self.client.put(url, data)
image = Image.objects.get(name=data['name'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(image.name, data['name'])
def test_user_can_delete_image(self):
url = reverse_lazy('image-detail', kwargs={'image_id': self.image.id})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_non_existent_image_returns_error_with_msg(self):
url = reverse_lazy('image-detail', kwargs={'image_id': 1000})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class ProcessImageTestSuite(ApiHeaderAuthorization):
"""Tests that ProcessImage view works appropriately"""
def setUp(self):
self.temp_file_path = '/static/photo_editor/img/temp_image.png'
super(ProcessImageTestSuite, self).setUp()
def test_grayscale(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'gray_scale'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_flip(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'flip'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_invert(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'invert'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_mirror(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'mirror'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_posterize(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'posterize'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_solarize(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'solarize'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_add_watermark(self):
url = reverse_lazy(
'process_image',
kwargs={'image_id': self.image.id, 'action': 'add_watermark'}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_crop(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'crop',
'left': 0, 'upper': 0, 'right': 1280, 'lower': 1280
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_contrast(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'contrast',
'option': '8'
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_brightness(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'brightness',
'option': '3'
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_sharpness(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'sharpness',
'option': '2'
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_color(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'color',
'option': '6'
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_contour(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'contour',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_edge_enhance_more(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'edge_enhance_more',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_gaussian_blur(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'gaussian_blur',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_max_filter(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'max_filter',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_unsharp_mask(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'unsharp_mask',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_mix_n_match(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'mix_n_match',
'option': 1
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_resize(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'resize',
'option': 'vsmall'
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_roll(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'roll',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
def test_rotate(self):
url = reverse_lazy(
'process_image',
kwargs={
'image_id': self.image.id, 'action': 'rotate',
}
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['url'], self.temp_file_path)
# test additive operation
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def tearDown(self):
image = Image.objects.all()[0]
image.delete()
full_path = '{0}/photo_editor{1}'.format(
os.path.dirname(settings.BASE_DIR), self.temp_file_path)
os.remove(full_path)
class ProcessedImageTestSuite(ApiHeaderAuthorization):
"""Creates data that would be used for tests that inherit from it"""
def setUp(self):
super(ProcessedImageTestSuite, self).setUp()
# convert image to grayscale
image = PIL.Image.open(factories.IMAGE_PATH)
image_processor = ImageProcessor(image, 'gray_scale')
image = image_processor.apply_pil_process_ops()
# these views will be expecting a temp file and a session variable
temp_image_path = (
'{0}/photo_editor/static/photo_editor/img/temp_image.png'
).format(os.path.dirname(settings.BASE_DIR))
image.save(temp_image_path, 'PNG')
session = self.client.session
session['original_image_url'] = self.image.large_image_url()
session['processed_image_path'] = temp_image_path
session.save()
class ApplyImageProcessingViewTestSuite(ProcessedImageTestSuite):
"""Tests the UploadImage view"""
def test_user_can_apply_changes_to_image(self):
url = reverse_lazy('apply_changes', kwargs={'image_id': self.image.id})
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("https://res.cloudinary.com/andela-troupon/image/upload",
response.content)
def tearDown(self):
image = Image.objects.all()[0]
image.delete()
class RevertToOriginalViewTestSuite(ProcessedImageTestSuite):
"""Tests the UploadImage view"""
def test_user_can_apply_changes_to_image(self):
url = reverse_lazy('cancel_changes')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("https://res.cloudinary.com/andela-troupon/image/upload",
response.content)
def tearDown(self):
image = Image.objects.all()[0]
image.delete()
|
|
# -*- coding: utf-8 -*-
"""
sphinx.theming
~~~~~~~~~~~~~~
Theming support for HTML builders.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import shutil
import zipfile
import tempfile
from os import path
from six import string_types, iteritems
from six.moves import configparser
try:
import pkg_resources
except ImportError:
pkg_resources = False
from sphinx import package_dir
from sphinx.errors import ThemeError
import alabaster
import sphinx_rtd_theme
NODEFAULT = object()
THEMECONF = 'theme.conf'
class Theme(object):
"""
Represents the theme chosen in the configuration.
"""
themes = {}
@classmethod
def init_themes(cls, confdir, theme_path, warn=None):
"""Search all theme paths for available themes."""
cls.themepath = list(theme_path)
cls.themepath.append(path.join(package_dir, 'themes'))
for themedir in cls.themepath[::-1]:
themedir = path.join(confdir, themedir)
if not path.isdir(themedir):
continue
for theme in os.listdir(themedir):
if theme.lower().endswith('.zip'):
try:
zfile = zipfile.ZipFile(path.join(themedir, theme))
if THEMECONF not in zfile.namelist():
continue
tname = theme[:-4]
tinfo = zfile
except Exception:
if warn:
warn('file %r on theme path is not a valid '
'zipfile or contains no theme' % theme)
continue
else:
if not path.isfile(path.join(themedir, theme, THEMECONF)):
continue
tname = theme
tinfo = None
cls.themes[tname] = (path.join(themedir, theme), tinfo)
@classmethod
def load_extra_theme(cls, name):
if name in ('alabaster', 'sphinx_rtd_theme'):
if name == 'alabaster':
themedir = alabaster.get_path()
# alabaster theme also requires 'alabaster' extension, it will be loaded
# at sphinx.application module.
elif name == 'sphinx_rtd_theme':
themedir = sphinx_rtd_theme.get_html_theme_path()
else:
raise NotImplementedError('Programming Error')
else:
for themedir in load_theme_plugins():
if path.isfile(path.join(themedir, name, THEMECONF)):
break
else:
# specified theme is not found
return
cls.themepath.append(themedir)
cls.themes[name] = (path.join(themedir, name), None)
return
def __init__(self, name, warn=None):
if name not in self.themes:
self.load_extra_theme(name)
if name not in self.themes:
raise ThemeError('no theme named %r found '
'(missing theme.conf?)' % name)
self.name = name
# Do not warn yet -- to be compatible with old Sphinxes, people *have*
# to use "default".
# if name == 'default' and warn:
# warn("'default' html theme has been renamed to 'classic'. "
# "Please change your html_theme setting either to "
# "the new 'alabaster' default theme, or to 'classic' "
# "to keep using the old default.")
tdir, tinfo = self.themes[name]
if tinfo is None:
# already a directory, do nothing
self.themedir = tdir
self.themedir_created = False
else:
# extract the theme to a temp directory
self.themedir = tempfile.mkdtemp('sxt')
self.themedir_created = True
for name in tinfo.namelist():
if name.endswith('/'):
continue
dirname = path.dirname(name)
if not path.isdir(path.join(self.themedir, dirname)):
os.makedirs(path.join(self.themedir, dirname))
fp = open(path.join(self.themedir, name), 'wb')
fp.write(tinfo.read(name))
fp.close()
self.themeconf = configparser.RawConfigParser()
self.themeconf.read(path.join(self.themedir, THEMECONF))
try:
inherit = self.themeconf.get('theme', 'inherit')
except configparser.NoOptionError:
raise ThemeError('theme %r doesn\'t have "inherit" setting' % name)
# load inherited theme automatically #1794, #1884, #1885
self.load_extra_theme(inherit)
if inherit == 'none':
self.base = None
elif inherit not in self.themes:
raise ThemeError('no theme named %r found, inherited by %r' %
(inherit, name))
else:
self.base = Theme(inherit, warn=warn)
def get_confstr(self, section, name, default=NODEFAULT):
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
return self.themeconf.get(section, name)
except (configparser.NoOptionError, configparser.NoSectionError):
if self.base is not None:
return self.base.get_confstr(section, name, default)
if default is NODEFAULT:
raise ThemeError('setting %s.%s occurs in none of the '
'searched theme configs' % (section, name))
else:
return default
def get_options(self, overrides):
"""Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
chain.append(base.themeconf)
base = base.base
options = {}
for conf in reversed(chain):
try:
options.update(conf.items('options'))
except configparser.NoSectionError:
pass
for option, value in iteritems(overrides):
if option not in options:
raise ThemeError('unsupported theme option %r given' % option)
options[option] = value
return options
def get_dirchain(self):
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
chain = [self.themedir]
base = self.base
while base is not None:
chain.append(base.themedir)
base = base.base
return chain
def cleanup(self):
"""Remove temporary directories."""
if self.themedir_created:
try:
shutil.rmtree(self.themedir)
except Exception:
pass
if self.base:
self.base.cleanup()
def load_theme_plugins():
"""load plugins by using``sphinx_themes`` section in setuptools entry_points.
This API will return list of directory that contain some theme directory.
"""
if not pkg_resources:
return []
theme_paths = []
for plugin in pkg_resources.iter_entry_points('sphinx_themes'):
func_or_path = plugin.load()
try:
path = func_or_path()
except:
path = func_or_path
if isinstance(path, string_types):
theme_paths.append(path)
else:
raise ThemeError('Plugin %r does not response correctly.' %
plugin.module_name)
return theme_paths
|
|
import logging
from bson.codec_options import CodecOptions
from inspect import currentframe, getframeinfo
from pymongo import ASCENDING, DESCENDING, CursorType, MongoClient
from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure, ServerSelectionTimeoutError
from ssl import CERT_REQUIRED, CERT_NONE
from time import sleep
from mongodb_consistent_backup.Common import parse_config_bool
from mongodb_consistent_backup.Errors import DBAuthenticationError, DBConnectionError, DBOperationError, Error
def parse_read_pref_tags(tags_str):
tags = {}
for pair in tags_str.replace(" ", "").split(","):
if ":" in pair:
key, value = pair.split(":")
tags[key] = str(value)
return tags
class DB:
def __init__(self, uri, config, do_replset=False, read_pref='primaryPreferred', do_rp_tags=False,
do_connect=True, conn_timeout=5000, retries=5):
self.uri = uri
self.config = config
self.do_replset = do_replset
self.read_pref = read_pref
self.do_rp_tags = do_rp_tags
self.do_connect = do_connect
self.conn_timeout = conn_timeout
self.retries = retries
self.username = self.config.username
self.password = self.config.password
self.authdb = self.config.authdb
self.ssl_ca_file = self.config.ssl.ca_file
self.ssl_crl_file = self.config.ssl.crl_file
self.ssl_client_cert_file = self.config.ssl.client_cert_file
self.read_pref_tags = self.config.replication.read_pref_tags
self.username = self.config.username
self.password = self.config.password
self.authdb = self.config.authdb
self.ssl_ca_file = self.config.ssl.ca_file
self.ssl_crl_file = self.config.ssl.crl_file
self.ssl_client_cert_file = self.config.ssl.client_cert_file
self.replset = None
self._conn = None
self._is_master = None
self.connect()
self.auth_if_required()
def do_ssl(self):
return parse_config_bool(self.config.ssl.enabled)
def do_ssl_insecure(self):
return parse_config_bool(self.config.ssl.insecure)
def client_opts(self):
opts = {
"connect": self.do_connect,
"host": self.uri.hosts(),
"connectTimeoutMS": self.conn_timeout,
"serverSelectionTimeoutMS": self.conn_timeout,
"maxPoolSize": 1,
}
if self.do_replset:
self.replset = self.uri.replset
opts.update({
"replicaSet": self.replset,
"readPreference": self.read_pref,
"w": "majority"
})
if self.do_rp_tags and self.read_pref_tags:
logging.debug("Using read preference mode: %s, tags: %s" % (
self.read_pref,
parse_read_pref_tags(self.read_pref_tags)
))
self.read_pref_tags = self.read_pref_tags.replace(" ", "")
opts["readPreferenceTags"] = self.read_pref_tags
if self.do_ssl():
logging.debug("Using SSL-secured mongodb connection (ca_cert=%s, client_cert=%s, crl_file=%s, insecure=%s)" % (
self.ssl_ca_file,
self.ssl_client_cert_file,
self.ssl_crl_file,
self.do_ssl_insecure()
))
opts.update({
"ssl": True,
"ssl_ca_certs": self.ssl_ca_file,
"ssl_crlfile": self.ssl_crl_file,
"ssl_certfile": self.ssl_client_cert_file,
"ssl_cert_reqs": CERT_REQUIRED,
})
if self.do_ssl_insecure():
opts["ssl_cert_reqs"] = CERT_NONE
return opts
def connect(self):
try:
logging.debug("Getting MongoDB connection to %s (replicaSet=%s, readPreference=%s, readPreferenceTags=%s, ssl=%s)" % (
self.uri,
self.replset,
self.read_pref,
self.do_rp_tags,
self.do_ssl(),
))
conn = MongoClient(**self.client_opts())
if self.do_connect:
conn['admin'].command({"ping": 1})
except (ConfigurationError, ConnectionFailure, OperationFailure, ServerSelectionTimeoutError), e:
logging.error("Unable to connect to %s! Error: %s" % (self.uri, e))
raise DBConnectionError(e)
if conn is not None:
self._conn = conn
return self._conn
def auth_if_required(self):
if self.username is not None and self.password is not None:
try:
logging.debug("Authenticating connection with username: %s" % self.username)
self._conn[self.authdb].authenticate(self.username, self.password)
except OperationFailure, e:
logging.fatal("Unable to authenticate with host %s: %s" % (self.uri, e))
raise DBAuthenticationError(e)
else:
pass
def admin_command(self, admin_command, quiet=False):
tries = 0
status = None
while not status and tries < self.retries:
try:
status = self._conn['admin'].command(admin_command)
except OperationFailure, e:
if not quiet:
logging.error("Error running admin command '%s': %s" % (admin_command, e))
tries += 1
sleep(1)
if not status:
raise DBOperationError("Could not get output from command: '%s' after %i retries!" % (admin_command, self.retries))
return status
def server_version(self):
status = self.admin_command('serverStatus')
try:
if 'version' in status:
version = status['version'].split('-')[0]
return tuple(version.split('.'))
except Exception, e:
raise Error("Unable to determine version from serverStatus! Error: %s" % e)
def connection(self):
return self._conn
def is_mongos(self):
return self._conn.is_mongos
def is_master(self, force=False):
try:
if force or not self._is_master:
self._is_master = self.admin_command('isMaster', True)
except OperationFailure, e:
raise DBOperationError("Unable to run isMaster command! Error: %s" % e)
return self._is_master
def is_replset(self):
isMaster = self.is_master()
if 'setName' in isMaster and isMaster['setName'] != "":
return True
return False
def is_configsvr(self):
isMaster = self.is_master()
if 'configsvr' in isMaster and isMaster['configsvr']:
return True
return False
def replset(self):
isMaster = self.is_master()
if 'setName' in isMaster:
return isMaster['setName']
return None
def get_oplog_rs(self):
if not self._conn:
self.connect()
db = self._conn['local']
return db.oplog.rs.with_options(codec_options=CodecOptions(unicode_decode_error_handler="ignore"))
def get_oplog_tail_ts(self):
logging.debug("Gathering youngest 'ts' in %s oplog" % self.uri)
return self.get_oplog_rs().find_one(sort=[('$natural', DESCENDING)])['ts']
def get_oplog_head_ts(self):
logging.debug("Gathering oldest 'ts' in %s oplog" % self.uri)
return self.get_oplog_rs().find_one(sort=[('$natural', ASCENDING)])['ts']
def is_ts_covered_by_oplog(self, ts_to_check):
oldest_ts = self.get_oplog_head_ts()
covered = oldest_ts <= ts_to_check
logging.debug("Timestamp %s %s covered in %s oplog" % (ts_to_check, "is" if covered else "is NOT", self.uri))
return covered
def get_oplog_cursor_since(self, caller, ts=None):
frame = getframeinfo(currentframe().f_back)
comment = "%s:%s;%s:%i" % (caller.__name__, frame.function, frame.filename, frame.lineno)
if not ts:
ts = self.get_oplog_tail_ts()
query = {'ts': {'$gte': ts}}
logging.debug("Querying oplog on %s with query: %s" % (self.uri, query))
# http://api.mongodb.com/python/current/examples/tailable.html
return self.get_oplog_rs().find(query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True).comment(comment)
def get_simple_oplog_cursor_from_to(self, caller, ts_from, ts_to=None):
frame = getframeinfo(currentframe().f_back)
comment = "%s:%s;%s:%i" % (caller.__name__, frame.function, frame.filename, frame.lineno)
if not ts_to:
ts_to = self.get_oplog_tail_ts()
query = {'ts': {'$gte': ts_from, '$lte': ts_to}}
logging.debug("Querying all oplog changes between %s and %s on %s with query: %s" % (ts_from, ts_to, self.uri, query))
return self.get_oplog_rs().find(query, cursor_type=CursorType.NON_TAILABLE, oplog_replay=True).comment(comment)
def close(self):
if self._conn:
logging.debug("Closing connection to: %s" % self.uri)
return self._conn.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.