gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from .ctx_base import StandardBaseContext
import math
import cmath
import math2
import function_docs
from .libmp import mpf_bernoulli, to_float, int_types
import libmp
class FPContext(StandardBaseContext):
"""
Context for fast low-precision arithmetic (53-bit precision, giving at most
about 15-digit accuracy), using Python's builtin float and complex.
"""
def __init__(ctx):
StandardBaseContext.__init__(ctx)
# Override SpecialFunctions implementation
ctx.loggamma = math2.loggamma
ctx._bernoulli_cache = {}
ctx.pretty = False
ctx._init_aliases()
_mpq = lambda cls, x: float(x[0])/x[1]
NoConvergence = libmp.NoConvergence
def _get_prec(ctx): return 53
def _set_prec(ctx, p): return
def _get_dps(ctx): return 15
def _set_dps(ctx, p): return
_fixed_precision = True
prec = property(_get_prec, _set_prec)
dps = property(_get_dps, _set_dps)
zero = 0.0
one = 1.0
eps = math2.EPS
inf = math2.INF
ninf = math2.NINF
nan = math2.NAN
j = 1j
# Called by SpecialFunctions.__init__()
@classmethod
def _wrap_specfun(cls, name, f, wrap):
if wrap:
def f_wrapped(ctx, *args, **kwargs):
convert = ctx.convert
args = [convert(a) for a in args]
return f(ctx, *args, **kwargs)
else:
f_wrapped = f
f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
setattr(cls, name, f_wrapped)
def bernoulli(ctx, n):
cache = ctx._bernoulli_cache
if n in cache:
return cache[n]
cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True)
return cache[n]
pi = math2.pi
e = math2.e
euler = math2.euler
sqrt2 = 1.4142135623730950488
sqrt5 = 2.2360679774997896964
phi = 1.6180339887498948482
ln2 = 0.69314718055994530942
ln10 = 2.302585092994045684
euler = 0.57721566490153286061
catalan = 0.91596559417721901505
khinchin = 2.6854520010653064453
apery = 1.2020569031595942854
glaisher = 1.2824271291006226369
absmin = absmax = abs
def is_special(ctx, x):
return x - x != 0.0
def isnan(ctx, x):
return x != x
def isinf(ctx, x):
return abs(x) == math2.INF
def isnormal(ctx, x):
if x:
return x - x == 0.0
return False
def isnpint(ctx, x):
if type(x) is complex:
if x.imag:
return False
x = x.real
return x <= 0.0 and round(x) == x
mpf = float
mpc = complex
def convert(ctx, x):
try:
return float(x)
except:
return complex(x)
power = staticmethod(math2.pow)
sqrt = staticmethod(math2.sqrt)
exp = staticmethod(math2.exp)
ln = log = staticmethod(math2.log)
cos = staticmethod(math2.cos)
sin = staticmethod(math2.sin)
tan = staticmethod(math2.tan)
cos_sin = staticmethod(math2.cos_sin)
acos = staticmethod(math2.acos)
asin = staticmethod(math2.asin)
atan = staticmethod(math2.atan)
cosh = staticmethod(math2.cosh)
sinh = staticmethod(math2.sinh)
tanh = staticmethod(math2.tanh)
gamma = staticmethod(math2.gamma)
rgamma = staticmethod(math2.rgamma)
fac = factorial = staticmethod(math2.factorial)
floor = staticmethod(math2.floor)
ceil = staticmethod(math2.ceil)
cospi = staticmethod(math2.cospi)
sinpi = staticmethod(math2.sinpi)
cbrt = staticmethod(math2.cbrt)
_nthroot = staticmethod(math2.nthroot)
_ei = staticmethod(math2.ei)
_e1 = staticmethod(math2.e1)
_zeta = _zeta_int = staticmethod(math2.zeta)
# XXX: math2
def arg(ctx, z):
z = complex(z)
return math.atan2(z.imag, z.real)
def expj(ctx, x):
return ctx.exp(ctx.j*x)
def expjpi(ctx, x):
return ctx.exp(ctx.j*ctx.pi*x)
ldexp = math.ldexp
frexp = math.frexp
def mag(ctx, z):
if z:
return ctx.frexp(abs(z))[1]
return ctx.ninf
def isint(ctx, z):
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
if z.imag:
return False
z = z.real
try:
return z == int(z)
except:
return False
def nint_distance(ctx, z):
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
n = round(z.real)
else:
n = round(z)
if n == z:
return n, ctx.ninf
return n, ctx.mag(abs(z-n))
def _convert_param(ctx, z):
if type(z) is tuple:
p, q = z
return ctx.mpf(p) / q, 'R'
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
intz = int(z.real)
else:
intz = int(z)
if z == intz:
return intz, 'Z'
return z, 'R'
def _is_real_type(ctx, z):
return isinstance(z, float) or isinstance(z, int_types)
def _is_complex_type(ctx, z):
return isinstance(z, complex)
def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
coeffs = list(coeffs)
num = range(p)
den = range(p,p+q)
tol = ctx.eps
s = t = 1.0
k = 0
while 1:
for i in num: t *= (coeffs[i]+k)
for i in den: t /= (coeffs[i]+k)
k += 1; t /= k; t *= z; s += t
if abs(t) < tol:
return s
if k > maxterms:
raise ctx.NoConvergence
def atan2(ctx, x, y):
return math.atan2(x, y)
def psi(ctx, m, z):
m = int(m)
if m == 0:
return ctx.digamma(z)
return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z)
digamma = staticmethod(math2.digamma)
def harmonic(ctx, x):
x = ctx.convert(x)
if x == 0 or x == 1:
return x
return ctx.digamma(x+1) + ctx.euler
nstr = str
def to_fixed(ctx, x, prec):
return int(math.ldexp(x, prec))
def rand(ctx):
import random
return random.random()
_erf = staticmethod(math2.erf)
_erfc = staticmethod(math2.erfc)
def sum_accurately(ctx, terms, check_step=1):
s = ctx.zero
k = 0
for term in terms():
s += term
if (not k % check_step) and term:
if abs(term) <= 1e-18*abs(s):
break
k += 1
return s
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Model.hidden'
db.add_column('blogs_model', 'hidden',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'ModelFieldData.foreign'
db.add_column('blogs_modelfielddata', 'foreign',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blogs.ModelFieldData'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Model.hidden'
db.delete_column('blogs_model', 'hidden')
# Deleting field 'ModelFieldData.foreign'
db.delete_column('blogs_modelfielddata', 'foreign_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'analytics_account': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contributors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogcontributor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exclusion_end': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'exclusion_start': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'fb_page_access_token': ('django.db.models.fields.CharField', [], {'max_length': '260', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#C4BDB2'", 'max_length': '10', 'blank': 'True'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_close': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_left': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_right': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.model': {
'Meta': {'object_name': 'Model'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Custom_post'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'blogs.modeldata': {
'Meta': {'object_name': 'ModelData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '140'})
},
'blogs.modelfield': {
'Meta': {'object_name': 'ModelField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'post_type': ('django.db.models.fields.CharField', [], {'default': "'Text'", 'max_length': '40'}),
'rank': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '2'})
},
'blogs.modelfielddata': {
'Meta': {'object_name': 'ModelFieldData'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'blank': 'True'}),
'foreign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.ModelFieldData']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'longtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True'}),
'model_data': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.ModelData']", 'null': 'True'}),
'model_field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.ModelField']", 'null': 'True'}),
'nullboolean': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'onetofive': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'positiveinteger': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'relation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'relation'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['blogs.ModelData']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Model']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_25': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_26': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_27': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_28': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_29': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_30': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_31': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_32': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_33': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'soundcloud_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'temp_tag_field': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'vimeo_thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.subuser': {
'Meta': {'object_name': 'Subuser'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_user'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
# Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - cls isn't a a type, or a tuple
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# check that we don't mask non AttributeErrors
# see: http://bugs.python.org/issue1574217
def test_isinstance_dont_mask_non_attribute_error(self):
class C(object):
def getclass(self):
raise RuntimeError
__class__ = property(getclass)
c = C()
self.assertRaises(RuntimeError, isinstance, c, bool)
# test another code path
class D: pass
self.assertRaises(RuntimeError, isinstance, c, D)
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (int, (float, int))))
self.assertEqual(True, issubclass(str, (str, (Child, NewChild, str))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
if support.check_impl_detail(cpython=True):
RECURSION_LIMIT = sys.getrecursionlimit()
else:
# on non-CPython implementations, the maximum actual recursion
# limit might be higher, but probably not higher than 99999
RECURSION_LIMIT = 99999
for cnt in range(RECURSION_LIMIT + 5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
|
|
''' A set of standard sources, filters, sorters and sinks
'''
import random
import datetime
from track_manager import tlib
import json
class Annotator(object):
''' Annotates the tracks in a stream with external information
:param source: the source of tracks
:param type: the type of annotation (spotify, echonest)
'''
def __init__(self, source, type):
self.annotator = tlib.get_annotator(type)
self.name = source.name + ' annotated with ' + type + ' data'
self.source = source
self.buffer = []
self.fillbuf = []
def next_track(self):
while len(self.fillbuf) < self.annotator['batch_size']:
track = self.source.next_track()
if track:
self.buffer.append(track)
tinfo = tlib.get_track(track)
if type not in tinfo:
self.fillbuf.append(track)
else:
break
if len(self.fillbuf) > 0:
self._fetch_fillbuf()
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
def _fetch_fillbuf(self):
self.annotator['annotator'](self.fillbuf)
self.fillbuf = []
class FakeTrackSource(object):
'''
Generates a series of fake tracks, suitable for testing
param: count: the number of tracks to generate
'''
def __init__(self, count=10):
self.name = 'FakeTracks'
self.count = count
self.fake_id = 1000000
def next_track(self):
track = None
if self.count > 0:
track = tlib.make_track(self._fake_id(),
self._fake_name(), self._fake_name(), 180, 'FakeTrackSource')
self.count -= 1
return track
def _fake_id(self):
self.fake_id += 1
return str(self.fake_id)
def _fake_name(self):
nouns = 'vixen bear dog cat waters drums parade fire france'
adjectives = 'frumpy cold wet fast red jumpy strange weird nifty'
adj = random.choice(adjectives.split())
noun = random.choice(nouns.split())
return ' '.join([adj, noun])
class Split(object):
'''
Splits a stream into two streams
:param source: the source of the track stream
:param split_index: the index where the split occurs
'''
def __init__(self, source, split_index):
self.source = source
self.split_index = split_index
self.left_buffer = None
self.right_buffer = None
def _fill_buffer(self):
if self.left_buffer == None:
self.left_buffer = []
self.right_buffer = []
which = 0
while True:
track = self.source.next_track()
if track:
if which < self.split_index:
self.left_buffer.append(track)
else:
self.right_buffer.append(track)
else:
break
which += 1
class left_side(object):
def __init__(self, outer):
self.outer = outer
self.name = 'first ' + str(outer.split_index) \
+ ' tracks of ' + outer.source.name
def next_track(self):
self.outer._fill_buffer()
if len(self.outer.left_buffer) > 0:
return self.outer.left_buffer.pop(0)
else:
return None
class right_side(object):
def __init__(self, outer):
self.outer = outer
self.name = 'After the first ' + str(outer.split_index) \
+ ' tracks of ' + outer.source.name
def next_track(self):
self.outer._fill_buffer()
if len(self.outer.right_buffer) > 0:
return self.outer.right_buffer.pop(0)
else:
return None
def outputs(self):
return [self.left_side(self), self.right_side(self)]
class Looper(object):
'''
Given a source, generate a stream of a given size by circulating through
the tracks in the source
:param source: the stream source
:param max_size: the number of tracks returned
'''
def __init__(self, source, max_size=200):
self.name = 'looped ' + source.name
self.source = source
self.index = 0
self.buffer = []
self.looping = False
self.max_size = max_size
self.cur_size = 0
def next_track(self):
if self.cur_size >= self.max_size:
return None
if self.looping:
if len(self.buffer) == 0:
return None
else:
idx = self.index % len(self.buffer)
self.index += 1
track = self.buffer[idx]
else:
track = self.source.next_track()
if track == None:
self.looping = True
return self.next_track()
else:
self.buffer.append(track)
self.cur_size += 1
return track
class Shuffler(object):
''' Shuffles the tracks in the stream
:param source: the source of tracks
:param max_size: the maximum number of tracks to return
'''
def __init__(self, source, max_size=0):
self.name = 'shuffled ' + source.name
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
random.shuffle(self.buffer)
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class DeDup(object):
'''
Remove any duplicate tracks in the stream
:param source: the stream source
:param by_name: if True match by track ID and name
'''
def __init__(self, source, by_name = False):
self.name = 'dedupped ' + source.name
self.source = source
self.by_name = by_name
self.history = set()
def next_track(self):
track = None
while True:
track = self.source.next_track()
if track:
if self.by_name:
tname = tlib.get_tn(track).lower()
if tname in self.history:
continue
else:
self.history.add(tname)
if track in self.history:
continue
else:
self.history.add(track)
break
else:
break
return track
class Buffer(object):
'''
Buffer up the given number of tracks
:param source: the stream source
:param max_size: the size of the buffer
'''
def __init__(self, source, max_size=40):
self.name = 'buffered ' + source.name
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class LongerThan(object):
'''
Limit the stream, if possible, to tracks with a duration that is longer
than the given time
:param source: the source stream
:param time: the time in seconds
'''
def __init__(self, source, time=1200):
self.name = 'LongerThan ' + str(time) + ' secs'
self.source = source
self.time = time
self.cur_time = 0
def next_track(self):
if self.cur_time > self.time:
return None
else:
track = self.source.next_track()
if track:
duration = tlib.get_attr(track, 'duration')
self.cur_time += duration
return track
class ShorterThan(object):
'''
Limit the stream, if possible, to tracks with a duration that is just
shorter than the given time
:param source: the source stream
:param time: the time in seconds
'''
def __init__(self, source, time=1200):
self.name = 'Shorter Than ' + str(time) + ' secs'
self.source = source
self.time = time
self.cur_time = 0
def next_track(self):
if self.cur_time >= self.time:
return None
else:
track = self.source.next_track()
if track:
duration = tlib.get_attr(track, 'duration')
self.cur_time += duration
if self.cur_time >= self.time:
return None
return track
class Sorter(object):
'''
Sorts the tracks in the given stream by the given attribute
:param source: the source of the tracks
:param attr: the attribute to be sorted
:param reverse: if True reverse the sort
:param max_size: maximum tracks to sort
'''
def __init__(self, source, attr, reverse=False, max_size=0):
self.name = source.name + ' sorted by ' + attr + ('(reverse)' if reverse else '')
self.source = source
self.buffer = []
self.filling = True
self.max_size = max_size
self.attr = attr
self.reverse = reverse
self.annotator = get_annotator(source, attr)
def next_track(self):
while self.filling:
track = self.annotator.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
self.buffer.sort(reverse=self.reverse, key=lambda tid: tlib.get_attr(tid, self.attr))
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class CustomSorter(object):
'''
Sorts the tracks by a custom key
:param source: the source of the tracks
:param keyfunc: function that turns a track id into the sort key
:param reverse: if True reverse the sort
:param max_size: maximum tracks to sort
'''
def __init__(self, source, keyfunc, reverse=False, max_size=0):
self.name = source.name + ' custom sorted'
self.source = source
self.keyfunc = keyfunc
self.buffer = []
self.filling = True
self.max_size = max_size
self.reverse = reverse
def next_track(self):
while self.filling:
track = self.source.next_track()
if track and (self.max_size == 0 or len(self.buffer) < self.max_size):
self.buffer.append(track)
else:
self.filling = False
self.buffer.sort(reverse=self.reverse, key=self.keyfunc)
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class First(object):
'''
Returns the first tracks from a stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'first ' + str(sample_size) + ' of ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling and len(self.buffer) < self.sample_size:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) >= self.sample_size:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class Last(object):
'''
Returns the last tracks from a stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'last ' + str(sample_size) + ' of ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
self.buffer = self.buffer[-self.sample_size:]
if len(self.buffer) > 0:
return self.buffer.pop(0)
else:
return None
class Reverse(object):
'''
Reverses the order of the tracks in the stream
:param source: the source of tracks
'''
def __init__(self, source):
self.name = 'reverse of ' + source.name
self.source = source
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class Sample(object):
'''
Randomly sample tracks from the stream
:param source: the source of tracks
:param sample_size: the number of tracks to return
'''
def __init__(self, source, sample_size=10):
self.name = 'Sampling ' + str(sample_size) \
+ ' tracks from ' + source.name
self.source = source
self.sample_size = sample_size
self.buffer = []
self.filling = True
def next_track(self):
while self.filling:
track = self.source.next_track()
if track:
self.buffer.append(track)
else:
self.filling = False
random.shuffle(self.buffer)
self.buffer = self.buffer[:self.sample_size]
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return None
class Concatenate(object):
'''
Concatenate multiple streams
:param source_list: a list of sources
'''
def __init__(self, source_list):
self.name = 'concatenating ' + ' '.join([s.name for s in source_list])
self.source_list = source_list
self.index = 0
def next_track(self):
track = None
while self.index < len(self.source_list):
track = self.source_list[self.index].next_track()
if track:
break
else:
self.index += 1
return track
class Alternate(object):
'''
Alternate tracks from multiple streams
:param source_list: a list of sources
'''
def __init__(self, source_list, fail_fast=False):
self.name = 'alternating between ' + ', '.join([s.name for s in source_list])
self.source_list = source_list
self.index = 0
self.fail_fast = fail_fast
def next_track(self):
tries = len(self.source_list)
while tries > 0:
idx = self.index % len(self.source_list)
self.index += 1
track = self.source_list[idx].next_track()
if track:
return track
else:
if self.fail_fast:
break
else:
tries -= 1
return None
class Conditional(object):
'''
Alternate tracks from two streams based on a conditional
:param source: the source of tracks
:param cond_func: a function that returns a boolean
:param trueSource: source of tracks when conf_func returns True
:param falseSource: source of tracks when conf_func returns False
'''
def __init__(self, cond_func, trueSource, falseSource):
self.name = 'Conditional of ' + ' '.join([trueSource.name, falseSource.name])
self.trueSource = trueSource
self.falseSource = falseSource
self.cond_func = cond_func
def next_track(self):
if self.cond_func():
return self.trueSource.next_track()
else:
return self.falseSource.next_track()
class Case(object):
'''
Selects tracks from streams based upon a mapping function
:param source: the source of tracks
:param func: a function that returns the source_map key
:param source_map: a may of key to source streams
'''
def __init__(self, func, source_map):
def default_behavior():
return None
self.name = 'Case of ' + ', '.join([n +':' + s.name for n,s in source_map.items()])
self.source_map = source_map
self.func = func
if not 'default' in self.source_map:
self.source_map['default'] = default_behavior
def next_track(self):
key = self.func()
if not key in self.source_map:
key = 'default'
source = self.source_map[key]
return source.next_track()
'''
Some handy dandy conditional funcs
'''
def is_day_of_week(day_of_week):
''' checks if cur day is given day of the week
:param day_of_week: Monday is 0 and Sunday is 6.
'''
def cond_func():
return datetime.datetime.today().weekday() == day_of_week
return cond_func
def get_simple_day_part():
'''
returns the daypart
'''
hour = datetime.datetime.today().hour
if hour < 12:
return 'morning'
elif hour < 18:
return 'afternoon'
elif hour < 22:
return 'evening'
else:
return 'night'
class AttributeRangeFilter(object):
'''
Filters tracks based upon range check of an attribute
:param source: the source of tracks
:param attr: the attribute of interest
:param match: if not None, attribute value must match this exactly
:param min_val: if not None, attribute value must be at least this
:param max_val: if not None, attribute value must be no more than this
'''
def __init__(self, source, attr, match=None,min_val=None,max_val=None):
self.name = source.name + ' filtered by ' + attr
self.source = source
self.attr = attr
self.match = match
self.min_val = min_val
self.max_val = max_val
self.match = match
self.annotator = get_annotator(source, attr)
def next_track(self):
while True:
good = True
track = self.annotator.next_track()
if track:
attr_val = tlib.get_attr(track, self.attr)
if attr_val == None:
good = False
elif self.match != None and attr_val != self.match:
good = False
else:
if self.min_val and attr_val < self.min_val:
good = False
if self.max_val and attr_val > self.max_val:
good = False
if good:
break
return track
class TrackFilter(object):
'''
Removes tracks from the stream based on a second stream
:param source: the source of tracks
:param filter: the stream of bad tracks to be removed
'''
def __init__(self, source, filter, invert=False):
self.name = source.name + ('inverse' if invert else '') +'filtered by ' + filter.name
self.source = source
self.filter = filter
self.bad_tracks = None
self.invert = invert
self.debug = False
def next_track(self):
if self.bad_tracks == None:
self.bad_tracks = set()
while True:
track = self.filter.next_track()
if track:
self.bad_tracks.add(track)
else:
break
while True:
track = self.source.next_track()
if track:
if self.invert and (track in self.bad_tracks):
return track
elif (not self.invert) and (track not in self.bad_tracks):
return track
else:
if self.debug:
print 'filtered out', tlib.get_tn(track)
else:
break
return track
class ArtistFilter(object):
'''
Removes tracks from the stream that have the given artists
:param source: the source of tracks
:param artistNames: the names of the artists to be removed
'''
def __init__(self, source, artistNames):
self.name = source.name + ' with songs by ' + ', '.join(artistNames) + ' removed'
self.source = source
self.bad_artists = set([a.lower() for a in artistNames])
self.debug = False
def next_track(self):
while True:
track = self.source.next_track()
if track:
tinfo = tlib.get_track(track)
if tinfo['artist'].lower() not in self.bad_artists:
return track
else:
if self.debug:
print 'filtered out', tlib.get_tn(track)
else:
break
return track
class Dumper(object):
'''
Dumps tracks to the terminal
:param source: the source of tracks
:param props: list of property names to be included in the dump
'''
def __init__(self, source, props):
self.name = 'dumper'
self.source = source
self.which = 1
self.props = props
def next_track(self):
track = self.source.next_track()
if track:
print self.which, tlib.get_tn(track)
if len(self.props) > 0:
for prop in self.props:
val = tlib.get_attr(track, prop)
if val != None:
print ' ', prop, '->', val
self.which += 1
return track
class Debugger(object):
'''
Shows details on each track in the stream
:param source: the source of tracks
'''
def __init__(self, source):
self.name = 'dumper'
self.source = source
def next_track(self):
track = self.source.next_track()
if track:
tinfo = tlib.get_track(track)
print json.dumps(tinfo, indent=4)
print
return track
class SaveToJson(object):
'''
Saves the stream to json
:param source: the source of tracks
:param name: the name of the json file
:param max_size: the max tracks to save
'''
def __init__(self, source, name='playlist.json', max_size=100):
self.name = 'SaveToJson ' + name
self.source = source
self.playlist_name = name
self.max_size = max_size
self.saved = False
self.buffer = []
def next_track(self):
track = self.source.next_track()
if track and len(self.buffer) < self.max_size:
self.buffer.append(track)
elif not self.saved:
self._save_playlist()
return track
def _save_playlist(self):
self.saved = True
f = open(self.playlist_name, 'w')
out = []
for tid in self.buffer:
t = tlib.get_track(tid)
if t:
out.append(t)
print >> f, json.dumps(out, indent=4)
f.close()
def get_annotator(source, attr):
fields = attr.split('.')
if len(fields) == 2:
type, name = fields
return Annotator(source, type)
return source
class PushableSource(object):
''' A source that allows you to push tracks
back for later retrieval
'''
def __init__(self, source):
self.source = source
self.name = 'pushable ' + source.name
self.buffer = []
def next_track(self):
if len(self.buffer) > 0:
return self.buffer.pop()
else:
return self.source.next_track()
def push(self, track):
self.buffer.append(track)
|
|
from interfaces import Interface
from logicgates import And, Xor, Or
from components import Split, Power, MultiPower
from primitives import Cathode
from flipflops import JKFlipFlop
from mixins import InputAMixin, InputBMixin, InputCMixin, InputOperatorMixin, OutputSumMixin, OutputCarryMixin, InputAEightBitMixin, InputBEightBitMixin, OutputSumEightBitMixin, OutputCarryMixin, InputClockMixin, OutputEightBitMixin
class HalfAdder(Interface, InputAMixin, InputBMixin, OutputSumMixin, OutputCarryMixin):
def __init__(self):
x = Xor()
a = And()
#split input a and b to go to the xor and and gate
inputs = {}
inputs["input_a"] = Split(x.input_a, a.input_a).input
inputs["input_b"] = Split(x.input_b, a.input_b).input
#get the output from the xor and and gates
outputs = {}
outputs["sum"] = x.output
outputs["carry"] = a.output
super(HalfAdder, self).__init__(inputs, outputs)
def __str__(self):
return "HalfAdder: " + super(HalfAdder, self).__str__()
class FullAdder(Interface, InputAMixin, InputBMixin, InputCMixin, OutputSumMixin, OutputCarryMixin):
def __init__(self):
ha1 = HalfAdder()
ha2 = HalfAdder()
o = Or()
#create inputs
inputs = {}
inputs["input_a"] = ha1.input_a
inputs["input_b"] = ha1.input_b
inputs["input_c"] = ha2.input_b
#connect ha1 carry to ha2 input_a
ha1.sum.connect(ha2.input_a)
#connect the carrys to the or
ha1.carry.connect(o.input_a)
ha2.carry.connect(o.input_b)
#connect the outputs
outputs = {}
outputs["sum"] = ha2.sum
outputs["carry"] = o.output
super(FullAdder, self).__init__(inputs, outputs)
def __str__(self):
return "FullAdder: " + super(FullAdder, self).__str__()
class EightBitRippleCarryAdder(Interface, InputAEightBitMixin, InputBEightBitMixin, OutputSumEightBitMixin, OutputCarryMixin):
def __init__(self):
ha = HalfAdder()
fa1 = FullAdder()
fa2 = FullAdder()
fa3 = FullAdder()
fa4 = FullAdder()
fa5 = FullAdder()
fa6 = FullAdder()
fa7 = FullAdder()
#wire up outputs and inputs
inputs = {}
inputs["input_a"] = [ha.input_a, fa1.input_b, fa2.input_b, fa3.input_b, fa4.input_b, fa5.input_b, fa6.input_b, fa7.input_b]
inputs["input_b"] = [ha.input_b, fa1.input_c, fa2.input_c, fa3.input_c, fa4.input_c, fa5.input_c, fa6.input_c, fa7.input_c]
outputs = {}
outputs["sum"] = [ha.sum, fa1.sum, fa2.sum, fa3.sum, fa4.sum, fa5.sum, fa6.sum, fa7.sum]
outputs["carry"] = fa7.carry
#connect up adders
ha.carry.connect(fa1.input_a)
fa1.carry.connect(fa2.input_a)
fa2.carry.connect(fa3.input_a)
fa3.carry.connect(fa4.input_a)
fa4.carry.connect(fa5.input_a)
fa5.carry.connect(fa6.input_a)
fa6.carry.connect(fa7.input_a)
super(EightBitRippleCarryAdder, self).__init__(inputs, outputs)
def __str__(self):
return "EightBitRippleCarryAdder: inputs = {{input_a = {}, input_b = {}}}, outputs = {{sum = {}, carry = {}}}".format(self.input_a, self.input_b, self.sum, self.carry)
class EightBitRippleCarryAdderSubtractor(Interface, InputAEightBitMixin, InputBEightBitMixin, InputOperatorMixin, OutputSumEightBitMixin, OutputCarryMixin):
def __init__(self):
fa0 = FullAdder()
fa1 = FullAdder()
fa2 = FullAdder()
fa3 = FullAdder()
fa4 = FullAdder()
fa5 = FullAdder()
fa6 = FullAdder()
fa7 = FullAdder()
xo0 = Xor()
xo1 = Xor()
xo2 = Xor()
xo3 = Xor()
xo4 = Xor()
xo5 = Xor()
xo6 = Xor()
xo7 = Xor()
#send the op to the first full adder and the xors
op_split = Split(fa0.input_a, xo0.input_a, xo1.input_a, xo2.input_a, xo3.input_a, xo4.input_a, xo5.input_a, xo6.input_a, xo7.input_a)
#wire up outputs and inputs
inputs = {}
inputs["operator"] = op_split.input
inputs["input_a"] = [fa0.input_b, fa1.input_b, fa2.input_b, fa3.input_b, fa4.input_b, fa5.input_b, fa6.input_b, fa7.input_b]
inputs["input_b"] = [xo0.input_b, xo1.input_b, xo2.input_b, xo3.input_b, xo4.input_b, xo5.input_b, xo6.input_b, xo7.input_b]
outputs = {}
outputs["sum"] = [fa0.sum, fa1.sum, fa2.sum, fa3.sum, fa4.sum, fa5.sum, fa6.sum, fa7.sum]
outputs["carry"] = fa7.carry
#connect xors to adders
xo0.output.connect(fa0.input_c)
xo1.output.connect(fa1.input_c)
xo2.output.connect(fa2.input_c)
xo3.output.connect(fa3.input_c)
xo4.output.connect(fa4.input_c)
xo5.output.connect(fa5.input_c)
xo6.output.connect(fa6.input_c)
xo7.output.connect(fa7.input_c)
#connect up adders
fa0.carry.connect(fa1.input_a)
fa1.carry.connect(fa2.input_a)
fa2.carry.connect(fa3.input_a)
fa3.carry.connect(fa4.input_a)
fa4.carry.connect(fa5.input_a)
fa5.carry.connect(fa6.input_a)
fa6.carry.connect(fa7.input_a)
super(EightBitRippleCarryAdderSubtractor, self).__init__(inputs, outputs)
def __str__(self):
return "EightBitRippleCarryAdderSubtractor: inputs = {{input_a = {}, input_b = {}, operator = {}}}, outputs = {{sum = {}, carry = {}}}".format(self.input_a, self.input_b, self.operator, self.sum, self.carry)
class ALU(Interface, InputAEightBitMixin, InputBEightBitMixin, InputOperatorMixin, OutputSumEightBitMixin, OutputCarryMixin):
def __init__(self):
"""
Truth table for overflow
op carry overflow
1 1 0
1 0 1
0 0 0
0 1 1
Truth table for negative
op overflow negative
1 1 1
1 0 0
0 1 0
0 0 0
operator:
'0' - addition
'1' - subtraction
"""
rcas = EightBitRippleCarryAdderSubtractor()
overflow_xor = Xor()
neg_and = And()
#op input
#send the op code to the rcas, the overflow xor and the negative and
op_split = Split(rcas.operator, overflow_xor.input_a, neg_and.input_a)
op = op_split.input
inputs = {}
inputs["input_a"] = rcas.input_a.bits
inputs["input_b"] = rcas.input_b.bits
inputs["operator"] = op
# inputs = [rcas.input_a.bits, rcas.input_b.bits, op]
#carry output
#send the rcas carry to the carry output and the overflow xor
carry = Cathode()
carry_split = Split(carry, overflow_xor.input_b)
rcas.carry.connect(carry_split.input)
#overflow output
#send the overflow to the overflow output and the negative And
overflow = Cathode()
overflow_split = Split(overflow, neg_and.input_b)
overflow_xor.output.connect(overflow_split.input)
#negative output
negative = neg_and.output
outputs = {}
outputs["sum"] = rcas.sum.bits
outputs["carry"] = rcas.carry
outputs["overflow"] = overflow
outputs["negative"] = negative
#outputs = [rcas.sum.bits, rcas.carry, overflow, negative]
super(ALU, self).__init__(inputs, outputs)
#output flags
@property
def overflow(self):
return self.outputs["overflow"]
@property
def negative(self):
"""
'0' - positive
'1' - negative
"""
return self.outputs["negative"]
def __str__(self):
return "ALU: inputs = {{input_a = {}, input_b = {}, operator = {}}}, outputs = {{sum = {}, carry = {}, overflow = {}, negative = {}}}".format(self.input_a, self.input_b, self.operator, self.sum, self.carry, self.overflow, self.negative)
class EightBitRippleCounter(Interface, InputClockMixin, OutputEightBitMixin):
def __init__(self):
inputs = {}
outputs = {}
power = MultiPower()
power.on()
# create jks and output cathodes
jks = []
output_cathodes = []
for i in range(8):
# create jk
jk = JKFlipFlop()
jks.append(jk)
# connect up flip flops J K to power
power.connect(jk.input_j)
#power.connect(jk.input_k)
#create output cathode
output_cathodes.append(Cathode())
# connect up q's to output and next jk's clock
for i in range(0,7):
q_split = Split(jks[i + 1].clock, output_cathodes[i])
jks[i].output_q.connect(q_split.input)
# connect up final jk
jks[7].output_q.connect(output_cathodes[7])
inputs["clock"] = jks[0].clock
outputs["output"] = output_cathodes
super(EightBitRippleCounter, self).__init__(inputs, outputs)
def __str__(self):
return "EightBitRippleCounter: inputs = {{clock = {}}}, outputs = {{output = {}}}".format(self.clock, self.output)
|
|
from datetime import date, datetime
import numpy as np
import pandas
import pytest
import astropy.time
from astropy.time import Time
import sunpy.time as time
from sunpy.time import is_time_equal, parse_time
LANDING = Time('1966-02-03', format='isot')
def test_parse_time_24():
dt = parse_time("2010-10-10T24:00:00")
assert dt == Time('2010-10-11')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_24_2():
dt = parse_time("2010-10-10T24:00:00.000000")
assert dt == Time('2010-10-11')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_trailing_zeros():
# see issue #289 at https://github.com/sunpy/sunpy/issues/289
dt = parse_time('2010-10-10T00:00:00.00000000')
assert dt == Time('2010-10-10')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_tuple():
dt = parse_time((1966, 2, 3))
assert dt == LANDING
assert dt.format == 'isot'
assert dt.scale == 'utc'
dt = parse_time((1966, 2, 3, 12, 2, 3))
assert dt == Time('1966-2-3T12:2:3')
assert dt.format == 'isot'
assert dt.scale == 'utc'
dt = parse_time((1966, 2, 3, 12, 2, 3, 8266))
assert dt == Time('1966-2-3T12:2:3.008266')
assert dt.format == 'isot'
assert dt.scale == 'utc'
def test_parse_time_int():
# Once https://github.com/astropy/astropy/issues/6970 is fixed,
# remove .jd from equality check
dt1 = parse_time(765548612.0, format='utime')
assert dt1.jd == Time('2003-4-5T12:23:32').jd
assert dt1.format == 'utime'
dt2 = parse_time(1009685652.0, format='utime')
assert dt2.jd == Time('2010-12-30T4:14:12').jd
assert dt2.format == 'utime'
def test_parse_time_pandas_timestamp():
ts = pandas.Timestamp(LANDING.datetime)
dt = parse_time(ts)
assert isinstance(dt, astropy.time.Time)
assert dt == LANDING
def test_parse_time_pandas_series():
inputs = [datetime(2012, 1, i) for i in range(1, 13)]
ind = pandas.Series(inputs)
as_inps = Time(inputs)
dts = parse_time(ind)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == as_inps)
def test_parse_time_pandas_series_2():
inputs = [[datetime(2012, 1, 1, 0, 0), datetime(2012, 1, 2, 0, 0)],
[datetime(2012, 1, 3, 0, 0), datetime(2012, 1, 4, 0, 0)]]
ind = pandas.Series(inputs)
as_inps = Time(inputs)
apts = parse_time(ind)
assert isinstance(apts, astropy.time.Time)
assert np.all(apts == as_inps)
assert apts.shape == as_inps.shape
def test_parse_time_pandas_index():
inputs = [datetime(2012, 1, i) for i in range(1, 13)]
ind = pandas.DatetimeIndex(inputs)
as_inps = Time(inputs)
dts = parse_time(ind)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == as_inps)
def test_parse_time_numpy_date():
inputs = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
dts = parse_time(inputs)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == Time([str(dt.astype('M8[ns]')) for dt in inputs]))
def test_parse_time_numpy_datetime():
inputs = np.arange('2005-02-01T00', '2005-02-01T10', dtype='datetime64')
dts = parse_time(inputs)
assert isinstance(dts, astropy.time.Time)
assert np.all(dts == Time([str(dt.astype('M8[ns]')) for dt in inputs]))
def test_parse_time_individual_numpy_datetime():
dt64 = np.datetime64('2005-02-01T00')
dt = parse_time(dt64)
assert isinstance(dt, astropy.time.Time)
assert dt == Time('2005-02-01', format='isot')
def test_parse_time_numpy_datetime_timezone():
dt64 = np.datetime64('2014-02-07T16:47:51-0500')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T21:47:51', format='isot')
def test_parse_time_numpy_datetime_ns():
dt64 = np.datetime64('2014-02-07T16:47:51.008288000')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.008288000', format='isot')
dt64 = np.datetime64('2014-02-07T16:47:51.008288123')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.008288123', format='isot')
dt64 = np.datetime64('2014-02-07T16:47:51.234565999')
dt = parse_time(dt64)
assert dt == Time('2014-02-07T16:47:51.234565999')
def test_parse_time_astropy():
ip = astropy.time.Time(['2016-01-02T23:00:01'])
astropy_time = parse_time(ip)
assert astropy_time == ip
assert astropy_time.format == 'isot'
def test_parse_time_datetime():
dt = datetime(2014, 2, 7, 16, 47, 51, 8288)
assert parse_time(dt) == Time('2014-02-07 16:47:51.008288')
assert parse_time(dt).format == 'datetime'
def test_parse_time_date():
dt = parse_time(date(1966, 2, 3))
assert dt == Time('1966-2-3')
assert dt.format == 'iso'
def test_parse_time_now():
now = parse_time('now')
assert isinstance(now, astropy.time.Time)
assert now.format == 'datetime'
assert now.scale == 'utc'
def test_parse_time_ISO():
dt1 = Time('1966-02-03T20:17:40')
assert parse_time('1966-02-03').jd == LANDING.jd
assert (
parse_time('1966-02-03T20:17:40') == dt1
)
assert (
parse_time('19660203T201740') == dt1
)
dt2 = Time('2007-05-04T21:08:12.999999')
dt3 = Time('2007-05-04T21:08:12')
dt4 = Time('2007-05-04T21:08:00')
dt5 = Time('2007-05-04')
lst = [
('2007-05-04T21:08:12.999999', dt2),
('20070504T210812.999999', dt2),
('2007/05/04 21:08:12.999999', dt2),
('2007-05-04 21:08:12.999999', dt2),
('2007/05/04 21:08:12', dt3),
('2007-05-04 21:08:12', dt3),
('2007-05-04 21:08', dt4),
('2007-05-04T21:08:12', dt3),
('20070504T210812', dt3),
('2007-May-04 21:08:12', dt3),
('2007-May-04 21:08', dt4),
('2007-May-04', dt5),
('2007-05-04', dt5),
('2007/05/04', dt5),
('04-May-2007', dt5),
('04-May-2007 21:08:12.999999', dt2),
('20070504_210812', dt3),
]
for k, v in lst:
dt = parse_time(k)
assert is_time_equal(dt, v)
assert dt.format == 'isot'
def test_parse_time_tai():
dt = Time('2007-05-04T21:08:12', scale='tai')
dt2 = parse_time('2007.05.04_21:08:12_TAI')
assert dt == dt2
assert dt.scale == dt2.scale
def test_parse_time_leap_second():
dt1 = parse_time('1995-12-31 23:59:60')
dt2 = Time('1995-12-31T23:59:60')
assert dt1.jd == dt2.jd
dt3 = parse_time('1995-Dec-31 23:59:60')
assert dt2.jd == dt3.jd
@pytest.mark.parametrize("ts,fmt", [
(1950.0, 'byear'),
('B1950.0', 'byear_str'),
(63072064.184, 'cxcsec'),
(datetime(2000, 1, 2, 12, 0, 0), 'datetime'),
(2000.45, 'decimalyear'),
('2000-01-01T00:00:00.000(TAI)', 'fits'),
(630720013.0, 'gps'),
('2000-01-01 00:00:00.000', 'iso'),
('2000-01-01T00:00:00.000', 'isot'),
(2451544.5, 'jd'),
(2000.0, 'jyear'),
('J2000.0', 'jyear_str'),
(51544.0, 'mjd'),
(730120.0003703703, 'plot_date'),
(946684800.0, 'unix'),
('2000:001:00:00:00.000', 'yday')
])
def test_parse_time_astropy_formats(ts, fmt):
dt = parse_time(ts, format=fmt)
assert dt.format == fmt
def test_parse_time_int_float():
# int and float values are not unique
# The format has to be mentioned
with pytest.raises(ValueError):
parse_time(100)
with pytest.raises(ValueError):
parse_time(100.0)
@pytest.mark.parametrize("scale", [
'tai',
'tcb',
'tcg',
'tdb',
'tt',
'ut1',
'utc'
])
def test_parse_time_scale(scale):
dt = parse_time('2007-05-04T21:08:12', scale=scale)
dt2 = Time('2007-05-04T21:08:12', scale=scale)
assert is_time_equal(dt, dt2)
assert dt.scale == scale
dt = parse_time(np.datetime64('2007-05-04T21:08:12'), scale=scale)
dt2 = Time('2007-05-04T21:08:12', scale=scale)
assert dt == dt2
assert dt.scale == scale
dt = datetime(2014, 2, 7, 16, 47, 51)
dt = parse_time(dt, scale=scale)
dt2 = Time('2014-02-07T16:47:51', scale=scale)
assert dt == dt2
assert dt.scale == scale
dt = date(2014, 2, 7)
dt = parse_time(dt, scale=scale)
dt2 = Time('2014-02-07', scale=scale)
assert dt == dt2
assert dt.scale == scale
def test_parse_time_list():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
assert np.all(parse_time(tstrings) == Time(tstrings))
def test_parse_time_list_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
assert np.all(parse_time(tstrings) == Time(tstrings))
def test_is_time():
assert time.is_time(datetime.utcnow()) is True
assert time.is_time('2017-02-14 08:08:12.999') is True
assert time.is_time(Time.now()) is True
assert time.is_time(None) is False
assert time.is_time('2016-14-14 19:08') is False
def test_is_time_in_given_format():
assert time.is_time_in_given_format('2017-02-14 08:08:12.999', "%Y-%m-%d %H:%M:%S.%f") is True
assert time.is_time_in_given_format('2017-02-14 08:08:12.999', "%Y-%m-%dT%H:%M:%S.%f") is False
|
|
"""Test-runner for wxPython's unit test suite.
TODO: document all functionality"""
import os, sys
import unittest
import time
import wx
from optparse import OptionParser
# ----------------- Helper Functions / Classes ---------------------
# TODO: maybe change some variable names?
# TODO: maybe put this function as a method somewhere else?
def _make_clean_opt_string():
# which options was this run called with?
# replace short opts with long opts (explicit is better than implicit)
opt_string = ""
args = sys.argv[1:]
for arg in args:
if arg.startswith("-") and not arg.startswith("--"):
# handle the case where opt and arg are conjoined
arg2 = None
if len(arg) > 2:
arg2 = arg[2:]
arg = arg[:2]
# it's a short opt, now find it
for opt in parser.option_list:
if arg in opt._short_opts:
opt_string += opt._long_opts[0]
if opt.action == "store":
opt_string += "="
if arg2 != None:
opt_string += arg2
else:
opt_string += " "
else:
opt_string += arg
opt_string += " "
if opt_string == "":
opt_string = "NONE"
return opt_string
def wiki(string, level=3, reverse=False):
if options.wiki and not reverse or not options.wiki and reverse:
output(level, string)
def wiki_title(number, string):
if options.wiki:
title = "=" * number
return title + " " + string + " " + title
else:
return string
def wiki_bullet():
if options.wiki:
return " * "
else:
return ""
def wiki_bold(string):
if options.wiki:
return "'''" + string + "'''"
else:
return string
def wiki_summary_item(title, data):
# TODO: possible more elegant way with regexes?
if options.wiki:
# escape the CamelCase for wiki only
# ASSUME: max one CamelCase, right after a period
i = title.find(".")
if i != -1 and title[i+1].isupper():
title = title.replace(".", ".!")
return wiki_bullet() + wiki_bold(title) + ": %s" % (data)
def output(level, string):
if options.verbosity >= level:
print string
class UnitTestSuite:
def __init__(self, include="", exclude="", tests=""):
# error checking
if include != "" and exclude != "":
raise ValueError("include and exclude arguments are mutually exclusive")
# TODO: could this become a simple os.listdir(".")?
_rootdir = os.path.abspath(sys.path[0])
if not os.path.isdir(_rootdir):
_rootdir = os.path.dirname(_rootdir)
self.rootdir = _rootdir # to come in handy later
# a dict of all possible test modules that could be run
# ASSUME: each module name is unique not solely because of case
_module_names = {}
for _name in [ n[:-3] for n in os.listdir(self.rootdir)
if n.startswith("test") and n.endswith(".py") ]:
_module_names[ _name.lower() ] = _name
# make the include/exclude/tests lists
_module_specs = None
_spec_type = None
_test_specs = None
if include != "":
_module_specs = self._clean_listify(include)
_spec_type = "include"
elif exclude != "":
_module_specs = self._clean_listify(exclude)
_spec_type = "exclude"
if tests != "":
_test_specs = self._clean_listify(tests, False)
# make sure they all exist
if _module_specs != None: # TODO: got to be a better place to put this
for _mod in _module_specs:
if not _module_names.has_key(_mod.lower()):
parser.error("Module %s not found under test" % (_mod))
# now import the modules
if _module_specs == None:
self.modules = [ __import__(name) for name in _module_names.values() ]
elif _spec_type == "include":
self.modules = [ __import__(name) for name in _module_specs ]
elif _spec_type == "exclude":
self.modules = [ __import__(name) for name in _module_names.values()
if name not in _module_specs ]
# convert modules into suites
self.suites = []
for module in self.modules:
_classname = module.__name__[4:] + "Test"
_class = module.__getattribute__(_classname)
# build test suite (whether or not --tests are specified)
if _test_specs == None:
_suite = unittest.makeSuite(_class)
else:
_suite = unittest.TestSuite()
for _test_name in unittest.getTestCaseNames(_class,"test"):
for _test in _test_specs:
_docstr = getattr(_class, _test_name).__doc__
if _test_name.lower().find(_test.lower()) != -1 or \
_docstr != None and _docstr.lower().find(_test.lower()) != -1:
_suite.addTest(_class(_test_name))
break
# filter out tests that shouldn't be run in subclasses
_tests = _suite._tests
for _t in _tests:
# TODO: pull logic into wxtest
# or use the version of unittest instead
if sys.version_info[0:2] >= (2,5):
_mname = _t._testMethodName
else:
_mname = _t._TestCase__testMethodName
if _mname.find('_wx') != -1:
# grab the class: everything between '_wx' and 'Only' at the end
restriction = _mname[_mname.find('_wx')+3:-4]
if not _class.__name__.startswith(restriction):
#print "filtered: %s (class=%s)" % (mname,_class.__name__)
_tests.remove(_t)
# if suite is non-empty...
if _suite.countTestCases() > 0:
# add it to the list of suites :-)
self.suites.append(_suite)
def _clean_listify(self, string, include_or_exclude=True):
_clean_list = []
_list = string.split(",")
for s in _list:
if include_or_exclude:
if s.endswith(".py"):
s = s[:-3]
if s.startswith("wx."):
s = "test" + s[3:]
if not s.startswith("test"):
s = "test" + s
_clean_list.append(s)
# maintains capitalization
return _clean_list
def _start_figleaf(self):
if options.figleaf != "":
globals()["figleaf"] = __import__("figleaf")
# TODO: perhaps make this class-specific rather than global?
globals()["figfile"] = os.path.join(self.rootdir, options.figleaf_filename)
if os.path.exists(figfile):
os.remove(figfile)
figleaf.start(ignore_python_lib=False)
def _stop_figleaf(self):
if options.figleaf != "":
figleaf.stop()
figleaf.write_coverage(figfile)
def run(self):
test_run_data = UnitTestRunData()
self._start_figleaf()
self.start_time = time.time()
# run tests
for _suite in self.suites:
_result = unittest.TestResult()
_suite.run(_result)
_module_name = _suite._tests[0].__module__
test_run_data.addResult(_module_name, _result)
self.stop_time = time.time()
self._stop_figleaf()
# process results
test_run_data.setTime(self.start_time, self.stop_time)
test_run_data.process()
# return results
return test_run_data
class UnitTestRunData:
def __init__(self):
self.results = {}
def addResult(self, module_name, result):
self.results[module_name] = result
def setTime(self, start, stop):
self.startTime = start
self.stopTime = stop
def process(self):
# process data
self.elapsedTime = self.stopTime - self.startTime
self.countSuites = len(self.results)
self.countSuccesses = 0
self.countFailures = 0
self.countErrors = 0
self.rawData = {}
for _module_name, _result in self.results.iteritems():
# TODO: revisit all this processing, is everything necessary?
tmp = {}
# parse results individually
tmp["failures"] = len(_result.failures)
tmp["errors"] = len(_result.errors)
tmp["successes"] = _result.testsRun - tmp["failures"] - tmp["errors"]
# total results
self.countSuccesses += tmp["successes"]
self.countFailures += tmp["failures"]
self.countErrors += tmp["errors"]
# TODO: add processing here
tmp["failure_data"] = _result.failures
tmp["error_data"] = _result.errors
self.rawData[_module_name] = tmp
# -----------------------------------------------------------
# -------------------- Option Logic -------------------------
# Options
usage = "usage: python %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-v","--verbosity", default=3,
action="store", type="int", dest="verbosity",
help="An integer [from 0 to 5, default=3] determining " +
"how much test result data will be output.")
parser.add_option("-o", "--output-filename", default="",
action="store", dest="outfilename",
metavar="FILE",
help="redirect output from console to FILE")
parser.add_option("-f", "--figleaf", default="",
action="store", dest="figleaf", metavar="FILE",
help="use the figleaf code-coverage tool, and write figleaf output to " +
"FILE. you must have figleaf installed to use this option. " +
"using this option will result in a slower test run")
parser.add_option("-w", "--wiki", default=False,
action="store_true", dest="wiki",
help="write data in wiki-markup format (MoinMoin / wxPyWiki)")
parser.add_option("-i", "--include-modules", default="",
action="store", dest="module_list",
help="run only the comma-separated list of modules given. use either " +
"wx class names or the name of the desired test module. " +
"don't use spaces in the list")
parser.add_option("-e", "--exclude-modules", default="",
action="store", dest="module_ex_list",
help="run all modules excluding those given in the comma-separated " +
"list given. use either wx class names or the name of the desired " +
"test module.")
parser.add_option("-t", "--tests", default="",
action="store", dest="test_list",
help="run only a targeted list of tests. give a comma-separated list " +
"of strings, and each test whose name or docstring contains " +
"one of those given will be run.")
# TODO: add "--include-methods" and "--exclude-methods" functionality
(options, args) = parser.parse_args()
# Options error-checking
if options.module_list != "" and options.module_ex_list != "":
parser.error("options --exclude-modules and --include-modules are mutually exclusive")
# doesn't really matter, but the help screen says it, so enforce it
if options.verbosity < 0 or options.verbosity > 5:
parser.error("verbosity must be between 0 and 5")
# -----------------------------------------------------------
# ------------------- Test Running --------------------------
# File redirect
if options.outfilename != "":
origstdout = sys.stdout
try:
sys.stdout = open(options.outfilename,'w')
except IOError:
print "Error opening output file, defaulting to original stdout"
sys.stdout = origstdout
unit_test_suite = UnitTestSuite(include=options.module_list,
exclude=options.module_ex_list,
tests=options.test_list)
result_data = unit_test_suite.run()
# see refactored method above
opt_string = _make_clean_opt_string()
# -----------------------------------------------------------
# ------------------- Output Reporting ----------------------
output(1, "") # make things easier to read
wiki(wiki_title(3, "%s - %s" % (time.asctime(),wx.GetOsDescription())), level=2)
output(2, wiki_title(4, "Platform Information"))
output(2, wiki_summary_item("Platform [sys.platform]",sys.platform))
output(2, wiki_summary_item("Python Version [sys.version]",sys.version))
#output(2, wiki_summary_item("wx Version [wx.version()]",wx.version()))
output(2, wiki_summary_item("OS [wx.GetOsDescription()]",wx.GetOsDescription()))
output(2, wiki_summary_item("wx Info [wx.PlatformInfo]",str(wx.PlatformInfo)))
output(2, wiki_summary_item("runUnitTests.py options",opt_string))
wiki("\n----------------------\n", level=3, reverse=True)
output(1, wiki_title(4, "Summary"))
output(2, wiki_bullet() + "Run completed in %.2f seconds" % (result_data.elapsedTime))
output(2, wiki_bullet() + "%d classes tested" % (result_data.countSuites))
output(1, wiki_bullet() + "%d tests passed in total!" % (result_data.countSuccesses))
if result_data.countFailures > 0:
output(1, wiki_bullet() + "%d tests failed in total!" % (result_data.countFailures))
if result_data.countErrors > 0:
output(1, wiki_bullet() + "%d tests erred in total!" % (result_data.countErrors))
wiki("\n----------------------\n", level=3, reverse=True)
data_items = result_data.rawData.items()
data_items.sort()
output(3, wiki_title(4, "Module Data"))
for mod_name, results in data_items:
messages = ["%d passed" % (results["successes"])]
if results["failures"] > 0:
messages.append("%d failed" % (results["failures"]))
if results["errors"] > 0:
messages.append("%d erred" % (results["errors"]))
output(3, wiki_bullet() + "%s: %s" % (mod_name, ", ".join(messages)))
wiki("\n----------------------\n", level=4, reverse=True)
if result_data.countFailures + result_data.countErrors > 0:
output(4, wiki_title(4,"Failure Data"))
for mod_name, results in data_items:
# report on it
for failure in results["failure_data"] + results["error_data"]:
type = None
if failure in results["failure_data"]:
type = "Fail: "
elif failure in results["error_data"]:
type = "Error: "
if options.wiki:
output(4, wiki_bullet() + type + str(failure[0]).replace('.','.!'))
output(5," {{{" + str(failure[1]) + "}}}")
else:
output(4, " " + type + str(failure[0]))
output(5, " " + str(failure[1]).replace("\n","\n "))
|
|
# Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2012 Randall Ma
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 horsik
# Copyright (c) 2013 Tao Sauvage
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.config import Key, Screen, Group, Drag, Click
from libqtile.command import lazy
from libqtile import hook, layout, bar, widget
import os
import subprocess
import socket
configs = {}
configs['default'] = {'battery': False, 'num_screens': 1}
configs['simulcra'] = {'battery': False, 'num_screens': 2}
configs['ubik'] = {'battery': True, 'num_screens': 1}
configs['valis'] = {'battery': False, 'num_screens': 1}
hostname = socket.gethostname()
config = configs[hostname] if hostname in configs else configs['default']
mod = 'mod1'
control = 'control'
# xmonad style keybindings
keys = [
# layout management
Key([mod], 'space', lazy.next_layout()),
Key([mod], 'h', lazy.layout.grow()),
Key([mod], 'l', lazy.layout.shrink()),
Key([mod], 'k', lazy.layout.down()),
Key([mod], 'Tab', lazy.layout.down()),
Key([mod], 'j', lazy.layout.up()),
Key([mod, 'shift'], 'Tab', lazy.layout.up()),
Key([mod], 't', lazy.window.toggle_floating()),
# Move windows up or down in current stack
Key([mod, 'shift'], 'k', lazy.layout.shuffle_down()),
Key([mod, 'shift'], 'j', lazy.layout.shuffle_up()),
Key([mod], 'Return', lazy.layout.swap_left()),
# Switch window focus to other pane(s) of stack
# Key([mod], 'space', lazy.layout.next()),
# Swap panes of split stack
Key([mod, 'shift'], 'space', lazy.layout.rotate()),
# switch monitors
Key([mod], 'comma', lazy.to_screen(0)),
Key([mod], 'period', lazy.to_screen(1)),
# restarting qtile
Key([mod, 'shift'], 'q', lazy.shutdown()),
Key([mod], 'q', lazy.restart()),
# app launches
Key([mod, control], 't', lazy.spawn('gnome-terminal --hide-menubar')),
Key([mod, control], 'f', lazy.spawn('firefox')),
Key([mod], 'p', lazy.spawn('j4-dmenu-desktop')),
Key([mod, 'shift'], 'p', lazy.spawn('gmrun')),
Key([mod, control], 'l', lazy.spawn('gnome-screensaver-command -l')),
# kill
Key([mod, 'shift'], 'c', lazy.window.kill()),
# toggle touchpad
Key([mod, control], 'm', lazy.spawn('toggle-touchpad-scrolling.sh')),
# Key([mod, control], 'm', lazy.spawn('xterm -e "echo \"%s\" && bash"' % lazy.mouse)),
# media control
# Key(['shift'], 'F3', lazy.spawn('dbus-send --print-reply --dest=org.mpris.MediaPlayer2.pithos'
# ' /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Prev')),
# Key(['shift'], 'F4', lazy.spawn('dbus-send --print-reply --dest=org.mpris.MediaPlayer2.pithos'
# ' /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Next')),
# Key(['shift'], 'F5', lazy.spawn('amixer set Master 4-')),
# Key(['shift'], 'F6', lazy.spawn('amixer set Master 4+')),
# Key(['shift'], 'F7', lazy.spawn('amixer -D pulse set Master 1+ toggle')),
# Key(['shift'], 'F8', lazy.spawn('dbus-send --print-reply --dest=org.mpris.MediaPlayer2.pithos'
# ' /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.PlayPause')),
Key(['shift'], 'F3', lazy.spawn('to_all_media_players org.mpris.MediaPlayer2.Player.Prev')),
Key(['shift'], 'F4', lazy.spawn('to_all_media_players org.mpris.MediaPlayer2.Player.Next')),
Key(['shift'], 'F5', lazy.spawn('amixer set Master 4-')),
Key(['shift'], 'F6', lazy.spawn('amixer set Master 4+')),
Key(['shift'], 'F7', lazy.spawn('amixer -D pulse set Master 1+ toggle')),
Key(['shift'], 'F8', lazy.spawn('to_all_media_players org.mpris.MediaPlayer2.Player.PlayPause')),
]
groups = [Group(i) for i in '123456789']
for i in groups:
# mod1 + letter of group = switch to group
keys.append(Key([mod], i.name, lazy.group[i.name].toscreen()))
# mod1 + shift + letter of group = switch to & move focused window to group
keys.append(Key([mod, 'shift'], i.name, lazy.window.togroup(i.name)))
layouts = [
layout.MonadTall(border_normal='#333333'),
layout.Stack(border_focus='#ff0000', border_normal='#333333'),
layout.Max(),
]
widget_defaults = dict(
# font='Arial',
fontsize=16,
padding=3,
)
def get_status_bar_elements(conf, is_primary):
status_bar_elements = []
status_bar_elements += [
widget.TextBox(text='CPU:', fontsize=12),
# cpu uses family of dark orange colors
widget.CPUGraph(border_color='8b4500', fill_color='cd6600', graph_color='ee7600'),
widget.TextBox(text='Mem:', fontsize=12),
widget.MemoryGraph(),
]
if conf['battery']:
status_bar_elements += [
widget.TextBox(text='Bat:', fontsize=12),
widget.Battery(format='{char} {percent:2.0%}'),
]
status_bar_elements += [
widget.sep.Sep(),
widget.TextBox(text='Vol:', fontsize=12),
widget.Volume(),
widget.sep.Sep(),
widget.GroupBox(fontsize=12, this_current_screen_border='#FF0000', urgent_border='#00FF00', disable_drag=True),
widget.WindowName(fontsize=12),
widget.sep.Sep(),
]
if is_primary:
status_bar_elements.append(widget.Systray())
status_bar_elements += [
widget.Clock(format='%b %d %I:%M', fontsize=12),
]
return status_bar_elements
screens = [Screen(bottom=bar.Bar(get_status_bar_elements(config, i == 0), 26),) for i in range(config['num_screens'])]
def noop(qtile):
pass
# Drag floating layouts.
mouse = [
Drag([mod], 'Button1', lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], 'Button3', lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], 'Button2', lazy.window.bring_to_front()),
# Drag([], 'Button4', lazy.function(noop), focus=None),
# Drag([], 'Button5', lazy.function(noop), focus=None)
]
# Windows that float by default
float_rules = [
dict(wmclass='gmrun'),
dict(wmclass='unity-control-center'),
dict(wmclass='gazeb'),
dict(wmclass='rqt_gui'),
dict(wmclass='Matplotlib'),
dict(wmclass='mountain_car_human_controlled.py'),
]
group_assignments = {}
group_assignments['Slack'] = '8'
group_assignments['Pithos'] = '9'
@hook.subscribe.client_new
def handle_new_window(window):
# with open('/home/sam/temp/qtile.log', 'a') as f:
# print(window.window.get_wm_type(), window.window.get_wm_class(), file=f)
if (window.window.get_wm_type()) == 'dialog' or window.window.get_wm_transient_for():
window.floating = True
else:
type = window.window.get_wm_class()[1]
if type in group_assignments:
window.togroup(group_assignments[type])
dgroups_key_binder = None
dgroups_app_rules = []
main = None
follow_mouse_focus = False
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=float_rules, border_normal='#97ffff', border_focus='#FF0000')
auto_fullscreen = True
focus_on_window_activation = 'smart'
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, github issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = 'LG3D'
@hook.subscribe.startup_once
def autostart():
home = os.path.expanduser('~/.config/qtile/autostart.sh')
subprocess.call([home])
|
|
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""This module implements the stackless API on top of py.magic greenlet API
This way it is possible to run concurrence applications on top of normal python
using the greenlet module.
Because the greenlet module uses only 'hard' switching as opposed to stackless 'soft' switching
it is a bit slower (about 35%), but very usefull because you don't need to install stackless.
Note that this does not aim to be a complete implementation of stackless on top of greenlets,
just enough of the stackless API to make concurrence run.
This code was inspired by:
http://aigamedev.com/programming-tips/round-robin-multi-tasking and
also by the pypy implementation of the same thing (buggy, not being maintained?) at
https://codespeak.net/viewvc/pypy/dist/pypy/lib/stackless.py?view=markup
"""
try:
from py.magic import greenlet #as of version 1.0 of py, it does not supply greenlets anymore
except ImportError:
from greenlet import greenlet #there is an older package containing just the greenlet lib
from collections import deque
class TaskletExit(SystemExit):pass
import __builtin__
__builtin__.TaskletExit = TaskletExit
class bomb(object):
"""used as a result value for sending exceptions trough a channel"""
def __init__(self, exc_type = None, exc_value = None, exc_traceback = None):
self.type = exc_type
self.value = exc_value
self.traceback = exc_traceback
def raise_(self):
raise self.type, self.value, self.traceback
class channel(object):
"""implementation of stackless's channel object"""
def __init__(self):
self.balance = 0
self.preference = -1
self.queue = deque()
def receive(self):
return _scheduler._receive(self, self.preference)
def send(self, data):
return _scheduler._send(self, data, self.preference)
def send_exception(self, exp_type, *args):
self.send(bomb(exp_type, exp_type(*args)))
def send_sequence(self, iterable):
for item in iterable:
self.send(item)
class tasklet(object):
"""implementation of stackless's tasklet object"""
def __init__(self, f = None, greenlet = None, alive = False):
self.greenlet = greenlet
self.func = f
self.alive = alive
self.blocked = False
self.data = None
def bind(self, func):
if not callable(func):
raise TypeError('tasklet function must be a callable')
self.func = func
def __call__(self, *args, **kwargs):
"""this is where the new task starts to run, e.g. it is where the greenlet is created
and the 'task' is first scheduled to run"""
if self.func is None:
raise TypeError('tasklet function must be a callable')
def _func(*_args, **_kwargs):
try:
self.func(*args, **kwargs)
except TaskletExit:
pass #let it pass silently
except:
import logging
logging.exception('unhandled exception in greenlet')
#don't propagate to parent
finally:
assert _scheduler.current == self
_scheduler.remove(self)
if _scheduler._runnable: #there are more tasklets scheduled to run next
#this make sure that flow will continue in the correct greenlet, e.g. the next in the schedule
self.greenlet.parent = _scheduler._runnable[0].greenlet
self.alive = False
del self.greenlet
del self.func
del self.data
self.greenlet = greenlet(_func)
self.alive = True
_scheduler.append(self)
return self
def kill(self):
_scheduler.throw(self, TaskletExit)
def raise_exception(self, *args):
_scheduler.throw(self, *args)
def __str__(self):
return repr(self)
def __repr__(self):
if hasattr(self, 'name'):
_id = self.name
else:
_id = str(self.func)
return '<tasklet %s at %0x>' % (_id, id(self))
class scheduler(object):
def __init__(self):
self._main_task = tasklet(greenlet = greenlet.getcurrent(), alive = True)
#all non blocked tast are in this queue
#all tasks are only onces in this queue
#the current task is the first item in the queue
self._runnable = deque([self._main_task])
def schedule(self):
"""schedules the next tasks and puts the current task back at the queue of runnables"""
self._runnable.rotate(-1)
next_task = self._runnable[0]
next_task.greenlet.switch()
def schedule_block(self):
"""blocks the current task and schedules next"""
self._runnable.popleft()
next_task = self._runnable[0]
next_task.greenlet.switch()
def throw(self, task, *args):
if not task.alive: return #this is what stackless does
assert task.blocked or task in self._runnable
task.greenlet.parent = self._runnable[0].greenlet
if task.blocked:
self._runnable.appendleft(task)
else:
self._runnable.remove(task)
self._runnable.appendleft(task)
task.greenlet.throw(*args)
def _receive(self, channel, preference):
#Receiving 1):
#A tasklet wants to receive and there is
#a queued sending tasklet. The receiver takes
#its data from the sender, unblocks it,
#and inserts it at the end of the runnables.
#The receiver continues with no switch.
#Receiving 2):
#A tasklet wants to receive and there is
#no queued sending tasklet.
#The receiver will become blocked and inserted
#into the queue. The next sender will
#handle the rest through "Sending 1)".
if channel.balance > 0: #some sender
channel.balance -= 1
sender = channel.queue.popleft()
sender.blocked = False
data, sender.data = sender.data, None
if preference == 1:
#sender preference
self._runnable.rotate(-1)
self._runnable.appendleft(sender)
self._runnable.rotate(1)
self.schedule()
else:
#receiver preference
self._runnable.append(sender)
else: #no sender
current = self._runnable[0]
channel.queue.append(current)
channel.balance -= 1
current.blocked = True
try:
self.schedule_block()
except:
channel.queue.remove(current)
channel.balance += 1
current.blocked = False
raise
data, current.data = current.data, None
if isinstance(data, bomb):
data.raise_()
else:
return data
def _send(self, channel, data, preference):
# Sending 1):
# A tasklet wants to send and there is
# a queued receiving tasklet. The sender puts
# its data into the receiver, unblocks it,
# and inserts it at the top of the runnables.
# The receiver is scheduled.
# Sending 2):
# A tasklet wants to send and there is
# no queued receiving tasklet.
# The sender will become blocked and inserted
# into the queue. The next receiver will
# handle the rest through "Receiving 1)".
#print 'send q', channel.queue
if channel.balance < 0: #some receiver
channel.balance += 1
receiver = channel.queue.popleft()
receiver.data = data
receiver.blocked = False
#put receiver just after current task in runnable and schedule (which will pick it up)
if preference == -1: #receiver pref
self._runnable.rotate(-1)
self._runnable.appendleft(receiver)
self._runnable.rotate(1)
self.schedule()
else: #sender pref
self._runnable.append(receiver)
else: #no receiver
current = self.current
channel.queue.append(current)
channel.balance += 1
current.data = data
current.blocked = True
try:
self.schedule_block()
except:
channel.queue.remove(current)
channel.balance -= 1
current.data = None
current.blocked = False
raise
def remove(self, task):
assert task.blocked or task in self._runnable
if task in self._runnable:
self._runnable.remove(task)
def append(self, task):
assert task not in self._runnable
self._runnable.append(task)
@property
def runcount(self):
return len(self._runnable)
@property
def current(self):
return self._runnable[0]
#there is only 1 scheduler, this is it:
_scheduler = scheduler()
def getruncount():
return _scheduler.runcount
def getcurrent():
return _scheduler.current
def schedule():
return _scheduler.schedule()
|
|
#!/usr/bin/env python
r"""Compute SSP/PCA projections for ECG artifacts.
You can do for example:
$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" \
--l-freq 1 --h-freq 100 \
--rej-grad 3000 --rej-mag 4000 --rej-eeg 100
"""
from __future__ import print_function
# Authors : Alexandre Gramfort, Ph.D.
# Martin Luessi, Ph.D.
from mne.externals.six import string_types
import os
import sys
import mne
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--tmin", dest="tmin", type="float",
help="Time before event in seconds",
default=-0.2)
parser.add_option("--tmax", dest="tmax", type="float",
help="Time after event in seconds",
default=0.4)
parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
help="Number of SSP vectors for gradiometers",
default=2)
parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
help="Number of SSP vectors for magnetometers",
default=2)
parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
help="Number of SSP vectors for EEG",
default=2)
parser.add_option("--l-freq", dest="l_freq", type="float",
help="Filter low cut-off frequency in Hz",
default=1)
parser.add_option("--h-freq", dest="h_freq", type="float",
help="Filter high cut-off frequency in Hz",
default=100)
parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
help="Filter low cut-off frequency in Hz used "
"for ECG event detection",
default=5)
parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
help="Filter high cut-off frequency in Hz used "
"for ECG event detection",
default=35)
parser.add_option("-p", "--preload", dest="preload",
help="Temporary file used during computation "
"(to save memory)",
default=True)
parser.add_option("-a", "--average", dest="average", action="store_true",
help="Compute SSP after averaging",
default=False)
parser.add_option("--proj", dest="proj",
help="Use SSP projections from a fif file.",
default=None)
parser.add_option("--filtersize", dest="filter_length", type="int",
help="Number of taps to use for filtering",
default=2048)
parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
help="Number of jobs to run in parallel",
default=1)
parser.add_option("-c", "--channel", dest="ch_name",
help="Channel to use for ECG detection "
"(Required if no ECG found)",
default=None)
parser.add_option("--rej-grad", dest="rej_grad", type="float",
help="Gradiometers rejection parameter "
"in fT/cm (peak to peak amplitude)",
default=2000)
parser.add_option("--rej-mag", dest="rej_mag", type="float",
help="Magnetometers rejection parameter "
"in fT (peak to peak amplitude)",
default=3000)
parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
help="EEG rejection parameter in uV "
"(peak to peak amplitude)",
default=50)
parser.add_option("--rej-eog", dest="rej_eog", type="float",
help="EOG rejection parameter in uV "
"(peak to peak amplitude)",
default=250)
parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
help="Add EEG average reference proj",
default=False)
parser.add_option("--no-proj", dest="no_proj", action="store_true",
help="Exclude the SSP projectors currently "
"in the fiff file",
default=False)
parser.add_option("--bad", dest="bad_fname",
help="Text file containing bad channels list "
"(one per line)",
default=None)
parser.add_option("--event-id", dest="event_id", type="int",
help="ID to use for events",
default=999)
parser.add_option("--event-raw", dest="raw_event_fname",
help="raw file to use for event detection",
default=None)
parser.add_option("--tstart", dest="tstart", type="float",
help="Start artifact detection after tstart seconds",
default=0.)
parser.add_option("--qrsthr", dest="qrs_threshold", type="string",
help="QRS detection threshold. Between 0 and 1. Can "
"also be 'auto' for automatic selection",
default='auto')
options, args = parser.parse_args()
raw_in = options.raw_in
if raw_in is None:
parser.print_help()
sys.exit(1)
tmin = options.tmin
tmax = options.tmax
n_grad = options.n_grad
n_mag = options.n_mag
n_eeg = options.n_eeg
l_freq = options.l_freq
h_freq = options.h_freq
ecg_l_freq = options.ecg_l_freq
ecg_h_freq = options.ecg_h_freq
average = options.average
preload = options.preload
filter_length = options.filter_length
n_jobs = options.n_jobs
ch_name = options.ch_name
reject = dict(grad=1e-13 * float(options.rej_grad),
mag=1e-15 * float(options.rej_mag),
eeg=1e-6 * float(options.rej_eeg),
eog=1e-6 * float(options.rej_eog))
avg_ref = options.avg_ref
no_proj = options.no_proj
bad_fname = options.bad_fname
event_id = options.event_id
proj_fname = options.proj
raw_event_fname = options.raw_event_fname
tstart = options.tstart
qrs_threshold = options.qrs_threshold
if qrs_threshold != 'auto':
try:
qrs_threshold = float(qrs_threshold)
except ValueError:
raise ValueError('qrsthr must be "auto" or a float')
if bad_fname is not None:
with open(bad_fname, 'r') as fid:
bads = [w.rstrip() for w in fid.readlines()]
print('Bad channels read : %s' % bads)
else:
bads = []
if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
prefix = raw_in[:-8]
else:
prefix = raw_in[:-4]
ecg_event_fname = prefix + '_ecg-eve.fif'
if average:
ecg_proj_fname = prefix + '_ecg_avg-proj.fif'
else:
ecg_proj_fname = prefix + '_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_in, preload=preload)
if raw_event_fname is not None:
raw_event = mne.io.read_raw_fif(raw_event_fname)
else:
raw_event = raw
flat = None # XXX : not exposed to the user
projs, events = mne.preprocessing.compute_proj_ecg(
raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name, reject, flat, bads, avg_ref,
no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart, qrs_threshold,
copy=False)
raw.close()
if raw_event_fname is not None:
raw_event.close()
if proj_fname is not None:
print('Including SSP projections from : %s' % proj_fname)
# append the ecg projs, so they are last in the list
projs = mne.read_proj(proj_fname) + projs
if isinstance(preload, string_types) and os.path.exists(preload):
os.remove(preload)
print("Writing ECG projections in %s" % ecg_proj_fname)
mne.write_proj(ecg_proj_fname, projs)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, events)
is_main = (__name__ == '__main__')
if is_main:
run()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.manila import share as mshare
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
manila_template = """
heat_template_version: 2015-04-30
resources:
test_share:
type: OS::Manila::Share
properties:
share_protocol: NFS
size: 1
access_rules:
- access_to: 127.0.0.1
access_type: ip
access_level: ro
name: basic_test_share
description: basic test share
is_public: True
metadata: {"key": "value"}
"""
class DummyShare(object):
def __init__(self):
self.availability_zone = 'az'
self.host = 'host'
self.export_locations = 'el'
self.share_server_id = 'id'
self.created_at = 'ca'
self.status = 's'
self.project_id = 'p_id'
self.to_dict = lambda: {'attr': 'val'}
class ManilaShareTest(common.HeatTestCase):
def setUp(self):
super(ManilaShareTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
self.fake_share = mock.MagicMock(id="test_share_id")
self.available_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_AVAILABLE)
self.failed_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_ERROR)
self.deleting_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_DELETING)
def _init_share(self, stack_name):
tmp = template_format.parse(manila_template)
self.stack = utils.parse_stack(tmp, stack_name=stack_name)
res_def = self.stack.t.resource_definitions(self.stack)["test_share"]
share = mshare.ManilaShare("test_share", res_def, self.stack)
# replace clients and plugins with mocks
mock_client = mock.MagicMock()
client = mock.MagicMock(return_value=mock_client)
share.client = client
mock_plugin = mock.MagicMock()
client_plugin = mock.MagicMock(return_value=mock_plugin)
share.client_plugin = client_plugin
return share
def _create_share(self, stack_name):
share = self._init_share(stack_name)
share.client().shares.create.return_value = self.fake_share
share.client().shares.get.return_value = self.available_share
scheduler.TaskRunner(share.create)()
return share
def test_share_create(self):
share = self._create_share("stack_share_create")
expected_state = (share.CREATE, share.COMPLETE)
self.assertEqual(expected_state, share.state,
"Share is not in expected state")
self.assertEqual(self.fake_share.id, share.resource_id,
"Expected share ID was not propagated to share")
share.client().shares.allow.assert_called_once_with(
access="127.0.0.1", access_level="ro",
share=share.resource_id, access_type="ip")
args, kwargs = share.client().shares.create.call_args
message_end = " parameter was not passed to manila client"
self.assertEqual(u"NFS", kwargs["share_proto"],
"Share protocol" + message_end)
self.assertEqual(1, kwargs["size"], "Share size" + message_end)
self.assertEqual("basic_test_share", kwargs["name"],
"Share name" + message_end)
self.assertEqual("basic test share", kwargs["description"],
"Share description" + message_end)
self.assertEqual({u"key": u"value"}, kwargs["metadata"],
"Metadata" + message_end)
self.assertTrue(kwargs["is_public"])
share.client().shares.get.assert_called_once_with(self.fake_share.id)
def test_share_create_fail(self):
share = self._init_share("stack_share_create_fail")
share.client().shares.get.return_value = self.failed_share
exc = self.assertRaises(exception.ResourceInError,
share.check_create_complete,
self.failed_share)
self.assertIn("Error during creation", six.text_type(exc))
def test_share_create_unknown_status(self):
share = self._init_share("stack_share_create_unknown")
share.client().shares.get.return_value = self.deleting_share
exc = self.assertRaises(exception.ResourceUnknownStatus,
share.check_create_complete,
self.deleting_share)
self.assertIn("Unknown status", six.text_type(exc))
def test_share_delete(self):
share = self._create_share("stack_share_delete")
share.client().shares.get.side_effect = exception.NotFound()
share.client_plugin().ignore_not_found.return_value = None
scheduler.TaskRunner(share.delete)()
share.client().shares.delete.assert_called_once_with(
self.fake_share.id)
def test_share_delete_fail(self):
share = self._create_share("stack_share_delete_fail")
share.client().shares.delete.return_value = None
share.client().shares.get.return_value = self.failed_share
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(share.delete))
self.assertIn("Error during deleting share", six.text_type(exc))
def test_share_check(self):
share = self._create_share("stack_share_check")
scheduler.TaskRunner(share.check)()
expected_state = (share.CHECK, share.COMPLETE)
self.assertEqual(expected_state, share.state,
"Share is not in expected state")
def test_share_check_fail(self):
share = self._create_share("stack_share_check_fail")
share.client().shares.get.return_value = self.failed_share
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(share.check))
self.assertIn("Error: resources.test_share: 'status': expected "
"'['available']'", six.text_type(exc))
def test_share_update(self):
share = self._create_share("stack_share_update")
updated_share_props = copy.deepcopy(share.properties.data)
updated_share_props[mshare.ManilaShare.DESCRIPTION] = "desc"
updated_share_props[mshare.ManilaShare.NAME] = "name"
updated_share_props[mshare.ManilaShare.IS_PUBLIC] = True
share.client().shares.update.return_value = None
after = rsrc_defn.ResourceDefinition(share.name, share.type(),
updated_share_props)
scheduler.TaskRunner(share.update, after)()
kwargs = {
"display_name": "name",
"display_description": "desc",
}
share.client().shares.update.assert_called_once_with(
share.resource_id, **kwargs)
def test_share_update_access_rules(self):
share = self._create_share("stack_share_update_access_rules")
updated_share_props = copy.deepcopy(share.properties.data)
updated_share_props[mshare.ManilaShare.ACCESS_RULES] = [
{mshare.ManilaShare.ACCESS_TO: "127.0.0.2",
mshare.ManilaShare.ACCESS_TYPE: "ip",
mshare.ManilaShare.ACCESS_LEVEL: "ro"}]
share.client().shares.deny.return_value = None
current_rule = {
mshare.ManilaShare.ACCESS_TO: "127.0.0.1",
mshare.ManilaShare.ACCESS_TYPE: "ip",
mshare.ManilaShare.ACCESS_LEVEL: "ro",
"id": "test_access_rule"
}
rule_tuple = collections.namedtuple("DummyRule",
list(current_rule.keys()))
share.client().shares.access_list.return_value = [
rule_tuple(**current_rule)]
after = rsrc_defn.ResourceDefinition(share.name, share.type(),
updated_share_props)
scheduler.TaskRunner(share.update, after)()
share.client().shares.access_list.assert_called_once_with(
share.resource_id)
share.client().shares.allow.assert_called_with(
share=share.resource_id, access_type="ip",
access="127.0.0.2", access_level="ro")
share.client().shares.deny.assert_called_once_with(
share=share.resource_id, id="test_access_rule")
def test_share_update_metadata(self):
share = self._create_share("stack_share_update_metadata")
updated_share_props = copy.deepcopy(share.properties.data)
updated_share_props[mshare.ManilaShare.METADATA] = {
"fake_key": "fake_value"}
share.client().shares.update_all_metadata.return_value = None
after = rsrc_defn.ResourceDefinition(share.name, share.type(),
updated_share_props)
scheduler.TaskRunner(share.update, after)()
share.client().shares.update_all_metadata.assert_called_once_with(
share.resource_id,
updated_share_props[mshare.ManilaShare.METADATA])
def test_attributes(self):
share = self._create_share("share")
share.client().shares.get.return_value = DummyShare()
self.assertEqual('az', share.FnGetAtt('availability_zone'))
self.assertEqual('host', share.FnGetAtt('host'))
self.assertEqual('el', share.FnGetAtt('export_locations'))
self.assertEqual('id', share.FnGetAtt('share_server_id'))
self.assertEqual('ca', share.FnGetAtt('created_at'))
self.assertEqual('s', share.FnGetAtt('status'))
self.assertEqual('p_id', share.FnGetAtt('project_id'))
self.assertEqual({'attr': 'val'}, share.FnGetAtt('show'))
|
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import sys, numpy
from io_funcs.binary_io import BinaryIOCollection
import logging
from scipy.stats.stats import pearsonr
class DistortionComputation(object):
def __init__(self, cmp_dim, mgc_dim, bap_dim, lf0_dim):
self.total_frame_number = 0
self.distortion = 0.0
self.bap_distortion = 0.0
self.f0_distortion = 0.0
self.vuv_error = 0.0
self.cmp_dim = cmp_dim
self.mgc_dim = mgc_dim
self.bap_dim = bap_dim
self.lf0_dim = lf0_dim
def compute_distortion(self, file_id_list, reference_dir, generation_dir, cmp_ext, mgc_ext, bap_ext, lf0_ext):
total_voiced_frame_number = 0
for file_id in file_id_list:
reference_file_name = reference_dir + '/' + file_id + cmp_ext
mgc_file_name = generation_dir + '/' + file_id + mgc_ext
bap_file_name = generation_dir + '/' + file_id + bap_ext
lf0_file_name = generation_dir + '/' + file_id + lf0_ext
reference_cmp, ref_frame_number = self.load_binary_file(reference_file_name, self.cmp_dim)
generation_mgc, mgc_frame_number = self.load_binary_file(mgc_file_name, self.mgc_dim)
generation_bap, bap_frame_number = self.load_binary_file(bap_file_name, self.bap_dim)
generation_lf0, lf0_frame_number = self.load_binary_file(lf0_file_name, self.lf0_dim)
if ref_frame_number != mgc_frame_number:
print "The number of frames is not the same: %d vs %d. Error in compute_distortion.py\n." %(ref_frame_number, mgc_frame_number)
sys.exit(1)
reference_mgc = reference_cmp[:, 0:self.mgc_dim]
reference_lf0 = reference_cmp[:, self.mgc_dim*3:self.mgc_dim*3+self.lf0_dim]
reference_vuv = reference_cmp[:, self.mgc_dim*3+self.lf0_dim*3:self.mgc_dim*3+self.lf0_dim*3+1]
reference_bap = reference_cmp[:, self.mgc_dim*3+self.lf0_dim*3+1:self.mgc_dim*3+self.lf0_dim*3+1+self.bap_dim]
reference_lf0[reference_vuv<0.5] = 0.0
# print reference_vuv
temp_distortion = self.compute_mse(reference_mgc[:, 1:self.mgc_dim], generation_mgc[:, 1:self.mgc_dim])
self.distortion += temp_distortion * (10 /numpy.log(10)) * numpy.sqrt(2.0)
temp_bap_distortion = self.compute_mse(reference_bap, generation_bap)
self.bap_distortion += temp_bap_distortion * (10 /numpy.log(10)) * numpy.sqrt(2.0)
temp_f0_distortion, temp_vuv_error, voiced_frame_number = self.compute_f0_mse(reference_lf0, generation_lf0)
self.f0_distortion += temp_f0_distortion
self.vuv_error += temp_vuv_error
self.total_frame_number += ref_frame_number
total_voiced_frame_number += voiced_frame_number
self.distortion /= float(self.total_frame_number)
self.bap_distortion /= float(self.total_frame_number)
self.f0_distortion /= total_voiced_frame_number
self.f0_distortion = numpy.sqrt(self.f0_distortion)
self.vuv_error /= float(self.total_frame_number)
return self.distortion, self.bap_distortion, self.f0_distortion, self.vuv_error
def compute_f0_mse(self, ref_data, gen_data):
ref_vuv_vector = numpy.zeros((ref_data.size, 1))
gen_vuv_vector = numpy.zeros((ref_data.size, 1))
ref_vuv_vector[ref_data > 0.0] = 1.0
gen_vuv_vector[gen_data > 0.0] = 1.0
sum_ref_gen_vector = ref_vuv_vector + gen_vuv_vector
voiced_ref_data = ref_data[sum_ref_gen_vector == 2.0]
voiced_gen_data = gen_data[sum_ref_gen_vector == 2.0]
voiced_frame_number = voiced_gen_data.size
f0_mse = numpy.sum(((numpy.exp(voiced_ref_data) - numpy.exp(voiced_gen_data)) ** 2))
# f0_mse = numpy.sum((((voiced_ref_data) - (voiced_gen_data)) ** 2))
vuv_error_vector = sum_ref_gen_vector[sum_ref_gen_vector == 0.0]
vuv_error = numpy.sum(sum_ref_gen_vector[sum_ref_gen_vector == 1.0])
return f0_mse, vuv_error, voiced_frame_number
def compute_mse(self, ref_data, gen_data):
diff = (ref_data - gen_data) ** 2
sum_diff = numpy.sum(diff, axis=1)
sum_diff = numpy.sqrt(sum_diff) # ** 0.5
sum_diff = numpy.sum(sum_diff, axis=0)
return sum_diff
def load_binary_file(self, file_name, dimension):
fid_lab = open(file_name, 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
frame_number = features.size / dimension
features = features[:(dimension * frame_number)]
features = features.reshape((-1, dimension))
return features, frame_number
'''
to be refined. genertic class for various features
'''
class IndividualDistortionComp(object):
def __init__(self):
self.logger = logging.getLogger('computer_distortion')
def compute_distortion(self, file_id_list, reference_dir, generation_dir, file_ext, feature_dim):
total_voiced_frame_number = 0
distortion = 0.0
vuv_error = 0
total_frame_number = 0
io_funcs = BinaryIOCollection()
ref_all_files_data = numpy.reshape(numpy.array([]), (-1,1))
gen_all_files_data = numpy.reshape(numpy.array([]), (-1,1))
for file_id in file_id_list:
ref_file_name = reference_dir + '/' + file_id + file_ext
gen_file_name = generation_dir + '/' + file_id + file_ext
# print ref_file_name
# print gen_file_name
ref_data, ref_frame_number = io_funcs.load_binary_file_frame(ref_file_name, feature_dim)
gen_data, gen_frame_number = io_funcs.load_binary_file_frame(gen_file_name, feature_dim)
if ref_frame_number != gen_frame_number:
self.logger.critical("The number of frames is not the same: %d vs %d. Error in compute_distortion.py\n." %(ref_frame_number, gen_frame_number))
raise
if file_ext == '.lf0':
ref_all_files_data = numpy.concatenate((ref_all_files_data, ref_data), axis=0)
gen_all_files_data = numpy.concatenate((gen_all_files_data, gen_data), axis=0)
temp_distortion, temp_vuv_error, voiced_frame_number = self.compute_f0_mse(ref_data, gen_data)
vuv_error += temp_vuv_error
total_voiced_frame_number += voiced_frame_number
elif file_ext == '.dur':
ref_data = numpy.reshape(numpy.sum(ref_data, axis=1), (-1, 1))
gen_data = numpy.reshape(numpy.sum(gen_data, axis=1), (-1, 1))
ref_all_files_data = numpy.concatenate((ref_all_files_data, ref_data), axis=0)
gen_all_files_data = numpy.concatenate((gen_all_files_data, gen_data), axis=0)
continue;
elif file_ext == '.mgc':
temp_distortion = self.compute_mse(ref_data[:, 1:feature_dim], gen_data[:, 1:feature_dim])
else:
temp_distortion = self.compute_mse(ref_data, gen_data)
distortion += temp_distortion
total_frame_number += ref_frame_number
if file_ext == '.dur':
dur_rmse = self.compute_rmse(ref_all_files_data, gen_all_files_data)
dur_corr = self.compute_corr(ref_all_files_data, gen_all_files_data)
return dur_rmse, dur_corr
elif file_ext == '.lf0':
distortion /= float(total_voiced_frame_number)
vuv_error /= float(total_frame_number)
distortion = numpy.sqrt(distortion)
f0_corr = self.compute_f0_corr(ref_all_files_data, gen_all_files_data)
return distortion, f0_corr, vuv_error
else:
distortion /= float(total_frame_number)
return distortion
def compute_f0_mse(self, ref_data, gen_data):
ref_vuv_vector = numpy.zeros((ref_data.size, 1))
gen_vuv_vector = numpy.zeros((ref_data.size, 1))
ref_vuv_vector[ref_data > 0.0] = 1.0
gen_vuv_vector[gen_data > 0.0] = 1.0
sum_ref_gen_vector = ref_vuv_vector + gen_vuv_vector
voiced_ref_data = ref_data[sum_ref_gen_vector == 2.0]
voiced_gen_data = gen_data[sum_ref_gen_vector == 2.0]
voiced_frame_number = voiced_gen_data.size
f0_mse = (numpy.exp(voiced_ref_data) - numpy.exp(voiced_gen_data)) ** 2
f0_mse = numpy.sum((f0_mse))
vuv_error_vector = sum_ref_gen_vector[sum_ref_gen_vector == 0.0]
vuv_error = numpy.sum(sum_ref_gen_vector[sum_ref_gen_vector == 1.0])
return f0_mse, vuv_error, voiced_frame_number
def compute_f0_corr(self, ref_data, gen_data):
ref_vuv_vector = numpy.zeros((ref_data.size, 1))
gen_vuv_vector = numpy.zeros((ref_data.size, 1))
ref_vuv_vector[ref_data > 0.0] = 1.0
gen_vuv_vector[gen_data > 0.0] = 1.0
sum_ref_gen_vector = ref_vuv_vector + gen_vuv_vector
voiced_ref_data = ref_data[sum_ref_gen_vector == 2.0]
voiced_gen_data = gen_data[sum_ref_gen_vector == 2.0]
f0_corr = self.compute_corr(numpy.exp(voiced_ref_data), numpy.exp(voiced_gen_data))
return f0_corr
def compute_corr(self, ref_data, gen_data):
corr_coef = pearsonr(ref_data, gen_data)
return corr_coef[0]
def compute_rmse(self, ref_data, gen_data):
diff = (ref_data - gen_data) ** 2
total_frame_number = ref_data.size
sum_diff = numpy.sum(diff)
rmse = numpy.sqrt(sum_diff/total_frame_number)
return rmse
def compute_mse(self, ref_data, gen_data):
diff = (ref_data - gen_data) ** 2
sum_diff = numpy.sum(diff, axis=1)
sum_diff = numpy.sqrt(sum_diff) # ** 0.5
sum_diff = numpy.sum(sum_diff, axis=0)
return sum_diff
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BERT classification finetuning runner in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
import REDACTED
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from REDACTED.tf2_common.modeling import model_training_utils
from REDACTED.tf2_bert import optimization
from REDACTED.tf2_bert.bert import bert_models
from REDACTED.tf2_bert.bert import common_flags
from REDACTED.tf2_bert.bert import configs as bert_configs
from REDACTED.tf2_bert.bert import input_pipeline
from REDACTED.tf2_bert.bert import model_saving_utils
from REDACTED.tf2_common.utils.misc import distribution_utils
from REDACTED.tf2_common.utils.misc import keras_utils
flags.DEFINE_enum(
'mode', 'train_and_eval', ['train_and_eval', 'export_only'],
'One of {"train_and_eval", "export_only"}. `train_and_eval`: '
'trains the model and evaluates in the meantime. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`.')
flags.DEFINE_string('train_data_path', None,
'Path to training data for BERT classifier.')
flags.DEFINE_string('eval_data_path', None,
'Path to evaluation data for BERT classifier.')
# Model training specific flags.
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
def get_loss_fn(num_classes, loss_factor=1.0):
"""Gets the classification loss function."""
def classification_loss_fn(labels, logits):
"""Classification loss."""
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
loss *= loss_factor
return loss
return classification_loss_fn
def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size,
is_training):
"""Gets a closure to create a dataset."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_classifier_dataset(
input_file_pattern,
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx)
return dataset
return _dataset_fn
def run_bert_classifier(strategy,
bert_config,
input_meta_data,
model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
initial_lr,
init_checkpoint,
train_input_fn,
eval_input_fn,
custom_callbacks=None,
run_eagerly=False,
use_keras_compile_fit=False):
"""Run BERT classifier training using low-level API."""
max_seq_length = input_meta_data['max_seq_length']
num_classes = input_meta_data['num_labels']
def _get_classifier_model():
"""Gets a classifier model."""
classifier_model, core_model = (
bert_models.classifier_model(
bert_config,
num_classes,
max_seq_length,
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=FLAGS.hub_module_trainable))
classifier_model.optimizer = optimization.create_optimizer(
initial_lr, steps_per_epoch * epochs, warmup_steps)
if FLAGS.fp16_implementation == 'graph_rewrite':
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
classifier_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
classifier_model.optimizer)
return classifier_model, core_model
# During distributed training, loss used for gradient computation is
# summed over from all replicas. When Keras compile/fit() API is used,
# the fit() API internally normalizes the loss by dividing the loss by
# the number of replicas used for computation. However, when custom
# training loop is used this is not done automatically and should be
# done manually by the end user.
loss_multiplier = 1.0
if FLAGS.scale_loss and not use_keras_compile_fit:
loss_multiplier = 1.0 / strategy.num_replicas_in_sync
loss_fn = get_loss_fn(num_classes, loss_factor=loss_multiplier)
# Defines evaluation metrics function, which will create metrics in the
# correct device and strategy scope.
def metric_fn():
return tf.keras.metrics.SparseCategoricalAccuracy(
'test_accuracy', dtype=tf.float32)
if use_keras_compile_fit:
# Start training using Keras compile/fit API.
logging.info('Training using TF 2.0 Keras compile/fit API with '
'distribution strategy.')
return run_keras_compile_fit(
model_dir,
strategy,
_get_classifier_model,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
eval_steps,
custom_callbacks=None)
# Use user-defined loop to start training.
logging.info('Training using customized training loop TF 2.0 with '
'distribution strategy.')
return model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_classifier_model,
loss_fn=loss_fn,
model_dir=model_dir,
steps_per_epoch=steps_per_epoch,
steps_per_loop=steps_per_loop,
epochs=epochs,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_steps=eval_steps,
init_checkpoint=init_checkpoint,
metric_fn=metric_fn,
custom_callbacks=custom_callbacks,
run_eagerly=run_eagerly)
def run_keras_compile_fit(model_dir,
strategy,
model_fn,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
eval_steps,
custom_callbacks=None):
"""Runs BERT classifier model using Keras compile/fit API."""
with strategy.scope():
training_dataset = train_input_fn()
evaluation_dataset = eval_input_fn()
bert_model, sub_model = model_fn()
optimizer = bert_model.optimizer
if init_checkpoint:
checkpoint = tf.train.Checkpoint(model=sub_model)
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
bert_model.compile(optimizer=optimizer, loss=loss_fn, metrics=[metric_fn()])
summary_dir = os.path.join(model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(model_dir, 'checkpoint')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
if custom_callbacks is not None:
custom_callbacks += [summary_callback, checkpoint_callback]
else:
custom_callbacks = [summary_callback, checkpoint_callback]
bert_model.fit(
x=training_dataset,
validation_data=evaluation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=custom_callbacks)
return bert_model
def export_classifier(model_export_path, input_meta_data,
restore_model_using_load_weights,
bert_config, model_dir):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
restore_model_using_load_weights: Whether to use checkpoint.restore() API
for custom checkpoint or to use model.load_weights() API.
There are 2 different ways to save checkpoints. One is using
tf.train.Checkpoint and another is using Keras model.save_weights().
Custom training loop implementation uses tf.train.Checkpoint API
and Keras ModelCheckpoint callback internally uses model.save_weights()
API. Since these two API's cannot be used together, model loading logic
must be take into account how model checkpoint was saved.
bert_config: Bert configuration file to define core bert layers.
model_dir: The directory where the model weights and training/evaluation
summaries are stored.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
if not model_dir:
raise ValueError('Export path is not specified: %s' % model_dir)
# Export uses float32 for now, even if training uses mixed precision.
tf.keras.mixed_precision.experimental.set_policy('float32')
classifier_model = bert_models.classifier_model(
bert_config, input_meta_data['num_labels'],
input_meta_data['max_seq_length'])[0]
model_saving_utils.export_bert_model(
model_export_path,
model=classifier_model,
checkpoint_dir=model_dir,
restore_model_using_load_weights=restore_model_using_load_weights)
def run_bert(strategy,
input_meta_data,
model_config,
train_input_fn=None,
eval_input_fn=None):
"""Run BERT training."""
if FLAGS.mode == 'export_only':
# As Keras ModelCheckpoint callback used with Keras compile/fit() API
# internally uses model.save_weights() to save checkpoints, we must
# use model.load_weights() when Keras compile/fit() is used.
export_classifier(FLAGS.model_export_path, input_meta_data,
FLAGS.use_keras_compile_fit,
model_config, FLAGS.model_dir)
return
if FLAGS.mode != 'train_and_eval':
raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode)
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_config_v2(FLAGS.enable_xla)
epochs = FLAGS.num_train_epochs
train_data_size = input_meta_data['train_data_size']
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
if not strategy:
raise ValueError('Distribution strategy has not been specified.')
trained_model = run_bert_classifier(
strategy,
model_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
FLAGS.steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
FLAGS.init_checkpoint,
train_input_fn,
eval_input_fn,
run_eagerly=FLAGS.run_eagerly,
use_keras_compile_fit=FLAGS.use_keras_compile_fit)
if FLAGS.model_export_path:
# As Keras ModelCheckpoint callback used with Keras compile/fit() API
# internally uses model.save_weights() to save checkpoints, we must
# use model.load_weights() when Keras compile/fit() is used.
model_saving_utils.export_bert_model(
FLAGS.model_export_path,
model=trained_model,
restore_model_using_load_weights=FLAGS.use_keras_compile_fit)
return trained_model
def main(_):
# Users should always run this script under TF 2.x
tf.enable_v2_behavior()
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
max_seq_length = input_meta_data['max_seq_length']
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
max_seq_length,
FLAGS.train_batch_size,
is_training=True)
eval_input_fn = get_dataset_fn(
FLAGS.eval_data_path,
max_seq_length,
FLAGS.eval_batch_size,
is_training=False)
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
run_bert(strategy, input_meta_data, bert_config, train_input_fn,
eval_input_fn)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('input_meta_data_path')
flags.mark_flag_as_required('model_dir')
app.run(main)
|
|
#!/usr/bin/env python
"""Multithreaded interactive interpreter with GTK and Matplotlib support.
WARNING:
As of 2010/06/25, this is not working, at least on Linux.
I have disabled it as a runnable script. - EF
Usage:
pyint-gtk.py -> starts shell with gtk thread running separately
pyint-gtk.py -pylab [filename] -> initializes matplotlib, optionally running
the named file. The shell starts after the file is executed.
Threading code taken from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65109, by Brian
McErlean and John Finlay.
Matplotlib support taken from interactive.py in the matplotlib distribution.
Also borrows liberally from code.py in the Python standard library."""
from __future__ import print_function
__author__ = "Fernando Perez <[email protected]>"
import sys
import code
import threading
import gobject
import gtk
try:
import readline
except ImportError:
has_readline = False
else:
has_readline = True
class MTConsole(code.InteractiveConsole):
"""Simple multi-threaded shell"""
def __init__(self, on_kill=None, *args, **kw):
code.InteractiveConsole.__init__(self, *args, **kw)
self.code_to_run = None
self.ready = threading.Condition()
self._kill = False
if on_kill is None:
on_kill = []
# Check that all things to kill are callable:
for _ in on_kill:
if not callable(_):
raise TypeError('on_kill must be a list of callables')
self.on_kill = on_kill
# Set up tab-completer
if has_readline:
import rlcompleter
try: # this form only works with python 2.3
self.completer = rlcompleter.Completer(self.locals)
except: # simpler for py2.2
self.completer = rlcompleter.Completer()
readline.set_completer(self.completer.complete)
# Use tab for completions
readline.parse_and_bind('tab: complete')
# This forces readline to automatically print the above list when tab
# completion is set to 'complete'.
readline.parse_and_bind('set show-all-if-ambiguous on')
# Bindings for incremental searches in the history. These searches
# use the string typed so far on the command line and search
# anything in the previous input history containing them.
readline.parse_and_bind('"\C-r": reverse-search-history')
readline.parse_and_bind('"\C-s": forward-search-history')
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
# Store code in self, so the execution thread can handle it
self.ready.acquire()
self.code_to_run = code
self.ready.wait() # Wait until processed in timeout interval
self.ready.release()
return False
def runcode(self):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback."""
self.ready.acquire()
if self._kill:
print('Closing threads...')
sys.stdout.flush()
for tokill in self.on_kill:
tokill()
print('Done.')
if self.code_to_run is not None:
self.ready.notify()
code.InteractiveConsole.runcode(self, self.code_to_run)
self.code_to_run = None
self.ready.release()
return True
def kill(self):
"""Kill the thread, returning when it has been shut down."""
self.ready.acquire()
self._kill = True
self.ready.release()
class GTKInterpreter(threading.Thread):
"""Run gtk.main in the main thread and a python interpreter in a
separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
GTK timeout callback.
"""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self, banner=None):
threading.Thread.__init__(self)
self.banner = banner
self.shell = MTConsole(on_kill=[gtk.main_quit])
def run(self):
self.pre_interact()
self.shell.interact(self.banner)
self.shell.kill()
def mainloop(self):
self.start()
gobject.timeout_add(self.TIMEOUT, self.shell.runcode)
try:
if gtk.gtk_version[0] >= 2:
gtk.gdk.threads_init()
except AttributeError:
pass
gtk.main()
self.join()
def pre_interact(self):
"""This method should be overridden by subclasses.
It gets called right before interact(), but after the thread starts.
Typically used to push initialization code into the interpreter"""
pass
class MatplotLibInterpreter(GTKInterpreter):
"""Threaded interpreter with matplotlib support.
Note that this explicitly sets GTKAgg as the backend, since it has
specific GTK hooks in it."""
def __init__(self, banner=None):
banner = """\nWelcome to matplotlib, a MATLAB-like python environment.
help(matlab) -> help on matlab compatible commands from matplotlib.
help(plotting) -> help on plotting commands.
"""
GTKInterpreter.__init__(self, banner)
def pre_interact(self):
"""Initialize matplotlib before user interaction begins"""
push = self.shell.push
# Code to execute in user's namespace
lines = ["import matplotlib",
"matplotlib.use('GTKAgg')",
"matplotlib.interactive(1)",
"import matplotlib.pylab as pylab",
"from matplotlib.pylab import *\n"]
map(push, lines)
# Execute file if given.
if len(sys.argv) > 1:
import matplotlib
matplotlib.interactive(0) # turn off interaction
fname = sys.argv[1]
try:
inFile = file(fname, 'r')
except IOError:
print('*** ERROR *** Could not read file <%s>' % fname)
else:
print('*** Executing file <%s>:' % fname)
for line in inFile:
if line.lstrip().find('show()') == 0:
continue
print('>>', line)
push(line)
inFile.close()
matplotlib.interactive(1) # turn on interaction
if __name__ == '__main__':
print("This demo is not presently functional, so running")
print("it as a script has been disabled.")
sys.exit()
# Quick sys.argv hack to extract the option and leave filenames in sys.argv.
# For real option handling, use optparse or getopt.
if len(sys.argv) > 1 and sys.argv[1] == '-pylab':
sys.argv = [sys.argv[0]] + sys.argv[2:]
MatplotLibInterpreter().mainloop()
else:
GTKInterpreter().mainloop()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from ducktape.mark import matrix, ignore
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.kafka import KafkaService
from kafkatest.services.streams import StreamsSmokeTestDriverService, StreamsSmokeTestJobRunnerService, \
StreamsUpgradeTestJobRunnerService
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, \
LATEST_2_0, LATEST_2_1, DEV_BRANCH, DEV_VERSION, KafkaVersion
# broker 0.10.0 is not compatible with newer Kafka Streams versions
broker_upgrade_versions = [str(LATEST_0_10_1), str(LATEST_0_10_2), str(LATEST_0_11_0), str(LATEST_1_0), str(LATEST_1_1), str(LATEST_2_0), str(LATEST_2_1), str(DEV_BRANCH)]
metadata_1_versions = [str(LATEST_0_10_0)]
metadata_2_versions = [str(LATEST_0_10_1), str(LATEST_0_10_2), str(LATEST_0_11_0), str(LATEST_1_0), str(LATEST_1_1)]
# once 0.10.1.2 is available backward_compatible_metadata_2_versions
# can be replaced with metadata_2_versions
backward_compatible_metadata_2_versions = [str(LATEST_0_10_2), str(LATEST_0_11_0), str(LATEST_1_0), str(LATEST_1_1)]
metadata_3_or_higher_versions = [str(LATEST_2_0), str(LATEST_2_1), str(DEV_VERSION)]
"""
After each release one should first check that the released version has been uploaded to
https://s3-us-west-2.amazonaws.com/kafka-packages/ which is the url used by system test to download jars;
anyone can verify that by calling
curl https://s3-us-west-2.amazonaws.com/kafka-packages/kafka_$scala_version-$version.tgz to download the jar
and if it is not uploaded yet, ping the dev@kafka mailing list to request it being uploaded.
This test needs to get updated, but this requires several steps
which are outlined here:
1. Update all relevant versions in tests/kafkatest/version.py this will include adding a new version for the new
release and bumping all relevant already released versions.
2. Add the new version to the "kafkatest.version" import above and include the version in the
broker_upgrade_versions list above. You'll also need to add the new version to the
"StreamsUpgradeTestJobRunnerService" on line 484 to make sure the correct arguments are passed
during the system test run.
3. Update the vagrant/bash.sh file to include all new versions, including the newly released version
and all point releases for existing releases. You only need to list the latest version in
this file.
4. Then update all relevant versions in the tests/docker/Dockerfile
5. Add a new "upgrade-system-tests-XXXX module under streams. You can probably just copy the
latest system test module from the last release. Just make sure to update the systout print
statement in StreamsUpgradeTest to the version for the release. After you add the new module
you'll need to update settings.gradle file to include the name of the module you just created
for gradle to recognize the newly added module
6. Then you'll need to update any version changes in gradle/dependencies.gradle
"""
class StreamsUpgradeTest(Test):
"""
Test upgrading Kafka Streams (all version combination)
If metadata was changes, upgrade is more difficult
Metadata version was bumped in 0.10.1.0 and
subsequently bumped in 2.0.0
"""
def __init__(self, test_context):
super(StreamsUpgradeTest, self).__init__(test_context)
self.topics = {
'echo' : { 'partitions': 5 },
'data' : { 'partitions': 5 },
}
self.leader = None
self.leader_counter = {}
processed_msg = "processed [0-9]* records"
def perform_broker_upgrade(self, to_version):
self.logger.info("First pass bounce - rolling broker upgrade")
for node in self.kafka.nodes:
self.kafka.stop_node(node)
node.version = KafkaVersion(to_version)
self.kafka.start_node(node)
@ignore
@cluster(num_nodes=6)
@matrix(from_version=broker_upgrade_versions, to_version=broker_upgrade_versions)
def test_upgrade_downgrade_brokers(self, from_version, to_version):
"""
Start a smoke test client then perform rolling upgrades on the broker.
"""
if from_version == to_version:
return
self.replication = 3
self.num_kafka_nodes = 3
self.partitions = 1
self.isr = 2
self.topics = {
'echo' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr}},
'data' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'min' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'max' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'sum' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'dif' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'cnt' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'avg' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'wcnt' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} },
'tagg' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": self.isr} }
}
# Setup phase
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# number of nodes needs to be >= 3 for the smoke test
self.kafka = KafkaService(self.test_context, num_nodes=self.num_kafka_nodes,
zk=self.zk, version=KafkaVersion(from_version), topics=self.topics)
self.kafka.start()
# allow some time for topics to be created
wait_until(lambda: self.confirm_topics_on_all_brokers(set(self.topics.keys())),
timeout_sec=60,
err_msg="Broker did not create all topics in 60 seconds ")
self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
processor = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)
with self.driver.node.account.monitor_log(self.driver.STDOUT_FILE) as driver_monitor:
self.driver.start()
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor:
processor.start()
monitor.wait_until(self.processed_msg,
timeout_sec=60,
err_msg="Never saw output '%s' on" % self.processed_msg + str(processor.node))
connected_message = "Discovered group coordinator"
with processor.node.account.monitor_log(processor.LOG_FILE) as log_monitor:
with processor.node.account.monitor_log(processor.STDOUT_FILE) as stdout_monitor:
self.perform_broker_upgrade(to_version)
log_monitor.wait_until(connected_message,
timeout_sec=120,
err_msg=("Never saw output '%s' on " % connected_message) + str(processor.node.account))
stdout_monitor.wait_until(self.processed_msg,
timeout_sec=60,
err_msg="Never saw output '%s' on" % self.processed_msg + str(processor.node.account))
# SmokeTestDriver allows up to 6 minutes to consume all
# records for the verification step so this timeout is set to
# 6 minutes (360 seconds) for consuming of verification records
# and a very conservative additional 2 minutes (120 seconds) to process
# the records in the verification step
driver_monitor.wait_until('ALL-RECORDS-DELIVERED\|PROCESSED-MORE-THAN-GENERATED',
timeout_sec=480,
err_msg="Never saw output '%s' on" % 'ALL-RECORDS-DELIVERED|PROCESSED-MORE-THAN-GENERATED' + str(self.driver.node.account))
self.driver.stop()
processor.stop()
processor.node.account.ssh_capture("grep SMOKE-TEST-CLIENT-CLOSED %s" % processor.STDOUT_FILE, allow_fail=False)
@matrix(from_version=metadata_2_versions, to_version=metadata_2_versions)
def test_simple_upgrade_downgrade(self, from_version, to_version):
"""
Starts 3 KafkaStreams instances with <old_version>, and upgrades one-by-one to <new_version>
"""
if from_version == to_version:
return
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics=self.topics)
self.kafka.start()
self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
self.driver.disable_auto_terminate()
self.processor1 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor2 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor3 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.driver.start()
self.start_all_nodes_with(from_version)
self.processors = [self.processor1, self.processor2, self.processor3]
counter = 1
random.seed()
# upgrade one-by-one via rolling bounce
random.shuffle(self.processors)
for p in self.processors:
p.CLEAN_NODE_ENABLED = False
self.do_stop_start_bounce(p, None, to_version, counter)
counter = counter + 1
# shutdown
self.driver.stop()
random.shuffle(self.processors)
for p in self.processors:
node = p.node
with node.account.monitor_log(p.STDOUT_FILE) as monitor:
p.stop()
monitor.wait_until("UPGRADE-TEST-CLIENT-CLOSED",
timeout_sec=60,
err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on" + str(node.account))
@matrix(from_version=metadata_1_versions, to_version=backward_compatible_metadata_2_versions)
@matrix(from_version=metadata_1_versions, to_version=metadata_3_or_higher_versions)
@matrix(from_version=metadata_2_versions, to_version=metadata_3_or_higher_versions)
def test_metadata_upgrade(self, from_version, to_version):
"""
Starts 3 KafkaStreams instances with version <from_version> and upgrades one-by-one to <to_version>
"""
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics=self.topics)
self.kafka.start()
self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
self.driver.disable_auto_terminate()
self.processor1 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor2 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor3 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.driver.start()
self.start_all_nodes_with(from_version)
self.processors = [self.processor1, self.processor2, self.processor3]
counter = 1
random.seed()
# first rolling bounce
random.shuffle(self.processors)
for p in self.processors:
p.CLEAN_NODE_ENABLED = False
self.do_stop_start_bounce(p, from_version[:-2], to_version, counter)
counter = counter + 1
# second rolling bounce
random.shuffle(self.processors)
for p in self.processors:
self.do_stop_start_bounce(p, None, to_version, counter)
counter = counter + 1
# shutdown
self.driver.stop()
random.shuffle(self.processors)
for p in self.processors:
node = p.node
with node.account.monitor_log(p.STDOUT_FILE) as monitor:
p.stop()
monitor.wait_until("UPGRADE-TEST-CLIENT-CLOSED",
timeout_sec=60,
err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on" + str(node.account))
def test_version_probing_upgrade(self):
"""
Starts 3 KafkaStreams instances, and upgrades one-by-one to "future version"
"""
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics=self.topics)
self.kafka.start()
self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
self.driver.disable_auto_terminate()
self.processor1 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor2 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.processor3 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
self.driver.start()
self.start_all_nodes_with("") # run with TRUNK
self.processors = [self.processor1, self.processor2, self.processor3]
self.old_processors = [self.processor1, self.processor2, self.processor3]
self.upgraded_processors = []
for p in self.processors:
self.leader_counter[p] = 2
self.update_leader()
for p in self.processors:
self.leader_counter[p] = 0
self.leader_counter[self.leader] = 3
counter = 1
current_generation = 3
random.seed()
random.shuffle(self.processors)
for p in self.processors:
p.CLEAN_NODE_ENABLED = False
current_generation = self.do_rolling_bounce(p, counter, current_generation)
counter = counter + 1
# shutdown
self.driver.stop()
random.shuffle(self.processors)
for p in self.processors:
node = p.node
with node.account.monitor_log(p.STDOUT_FILE) as monitor:
p.stop()
monitor.wait_until("UPGRADE-TEST-CLIENT-CLOSED",
timeout_sec=60,
err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on" + str(node.account))
def update_leader(self):
self.leader = None
retries = 10
while retries > 0:
for p in self.processors:
found = list(p.node.account.ssh_capture("grep \"Finished assignment for group\" %s" % p.LOG_FILE, allow_fail=True))
if len(found) == self.leader_counter[p] + 1:
if self.leader is not None:
raise Exception("Could not uniquely identify leader")
self.leader = p
self.leader_counter[p] = self.leader_counter[p] + 1
if self.leader is None:
retries = retries - 1
time.sleep(5)
else:
break
if self.leader is None:
raise Exception("Could not identify leader")
def get_version_string(self, version):
if version.startswith("0") or version.startswith("1") \
or version.startswith("2.0") or version.startswith("2.1"):
return "Kafka version : " + version
else:
return "Kafka version: " + version
def start_all_nodes_with(self, version):
kafka_version_str = self.get_version_string(version)
# start first with <version>
self.prepare_for(self.processor1, version)
node1 = self.processor1.node
with node1.account.monitor_log(self.processor1.STDOUT_FILE) as monitor:
with node1.account.monitor_log(self.processor1.LOG_FILE) as log_monitor:
self.processor1.start()
log_monitor.wait_until(kafka_version_str,
timeout_sec=60,
err_msg="Could not detect Kafka Streams version " + version + " " + str(node1.account))
monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node1.account))
# start second with <version>
self.prepare_for(self.processor2, version)
node2 = self.processor2.node
with node1.account.monitor_log(self.processor1.STDOUT_FILE) as first_monitor:
with node2.account.monitor_log(self.processor2.STDOUT_FILE) as second_monitor:
with node2.account.monitor_log(self.processor2.LOG_FILE) as log_monitor:
self.processor2.start()
log_monitor.wait_until(kafka_version_str,
timeout_sec=60,
err_msg="Could not detect Kafka Streams version " + version + " " + str(node2.account))
first_monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node1.account))
second_monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node2.account))
# start third with <version>
self.prepare_for(self.processor3, version)
node3 = self.processor3.node
with node1.account.monitor_log(self.processor1.STDOUT_FILE) as first_monitor:
with node2.account.monitor_log(self.processor2.STDOUT_FILE) as second_monitor:
with node3.account.monitor_log(self.processor3.STDOUT_FILE) as third_monitor:
with node3.account.monitor_log(self.processor3.LOG_FILE) as log_monitor:
self.processor3.start()
log_monitor.wait_until(kafka_version_str,
timeout_sec=60,
err_msg="Could not detect Kafka Streams version " + version + " " + str(node3.account))
first_monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node1.account))
second_monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node2.account))
third_monitor.wait_until("processed [0-9]* records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node3.account))
@staticmethod
def prepare_for(processor, version):
processor.node.account.ssh("rm -rf " + processor.PERSISTENT_ROOT, allow_fail=False)
if version == str(DEV_VERSION):
processor.set_version("") # set to TRUNK
else:
processor.set_version(version)
def do_stop_start_bounce(self, processor, upgrade_from, new_version, counter):
kafka_version_str = self.get_version_string(new_version)
first_other_processor = None
second_other_processor = None
for p in self.processors:
if p != processor:
if first_other_processor is None:
first_other_processor = p
else:
second_other_processor = p
node = processor.node
first_other_node = first_other_processor.node
second_other_node = second_other_processor.node
# stop processor and wait for rebalance of others
with first_other_node.account.monitor_log(first_other_processor.STDOUT_FILE) as first_other_monitor:
with second_other_node.account.monitor_log(second_other_processor.STDOUT_FILE) as second_other_monitor:
processor.stop()
first_other_monitor.wait_until("processed 100 records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(first_other_node.account))
second_other_monitor.wait_until("processed 100 records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(second_other_node.account))
node.account.ssh_capture("grep UPGRADE-TEST-CLIENT-CLOSED %s" % processor.STDOUT_FILE, allow_fail=False)
if upgrade_from is None: # upgrade disabled -- second round of rolling bounces
roll_counter = ".1-" # second round of rolling bounces
else:
roll_counter = ".0-" # first round of rolling boundes
node.account.ssh("mv " + processor.STDOUT_FILE + " " + processor.STDOUT_FILE + roll_counter + str(counter), allow_fail=False)
node.account.ssh("mv " + processor.STDERR_FILE + " " + processor.STDERR_FILE + roll_counter + str(counter), allow_fail=False)
node.account.ssh("mv " + processor.LOG_FILE + " " + processor.LOG_FILE + roll_counter + str(counter), allow_fail=False)
if new_version == str(DEV_VERSION):
processor.set_version("") # set to TRUNK
else:
processor.set_version(new_version)
processor.set_upgrade_from(upgrade_from)
grep_metadata_error = "grep \"org.apache.kafka.streams.errors.TaskAssignmentException: unable to decode subscription data: version=2\" "
with node.account.monitor_log(processor.STDOUT_FILE) as monitor:
with node.account.monitor_log(processor.LOG_FILE) as log_monitor:
with first_other_node.account.monitor_log(first_other_processor.STDOUT_FILE) as first_other_monitor:
with second_other_node.account.monitor_log(second_other_processor.STDOUT_FILE) as second_other_monitor:
processor.start()
log_monitor.wait_until(kafka_version_str,
timeout_sec=60,
err_msg="Could not detect Kafka Streams version " + new_version + " " + str(node.account))
first_other_monitor.wait_until("processed 100 records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(first_other_node.account))
found = list(first_other_node.account.ssh_capture(grep_metadata_error + first_other_processor.STDERR_FILE, allow_fail=True))
if len(found) > 0:
raise Exception("Kafka Streams failed with 'unable to decode subscription data: version=2'")
second_other_monitor.wait_until("processed 100 records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(second_other_node.account))
found = list(second_other_node.account.ssh_capture(grep_metadata_error + second_other_processor.STDERR_FILE, allow_fail=True))
if len(found) > 0:
raise Exception("Kafka Streams failed with 'unable to decode subscription data: version=2'")
monitor.wait_until("processed 100 records from topic",
timeout_sec=60,
err_msg="Never saw output 'processed 100 records from topic' on" + str(node.account))
def do_rolling_bounce(self, processor, counter, current_generation):
first_other_processor = None
second_other_processor = None
for p in self.processors:
if p != processor:
if first_other_processor is None:
first_other_processor = p
else:
second_other_processor = p
node = processor.node
first_other_node = first_other_processor.node
second_other_node = second_other_processor.node
with first_other_node.account.monitor_log(first_other_processor.LOG_FILE) as first_other_monitor:
with second_other_node.account.monitor_log(second_other_processor.LOG_FILE) as second_other_monitor:
# stop processor
processor.stop()
node.account.ssh_capture("grep UPGRADE-TEST-CLIENT-CLOSED %s" % processor.STDOUT_FILE, allow_fail=False)
node.account.ssh("mv " + processor.STDOUT_FILE + " " + processor.STDOUT_FILE + "." + str(counter), allow_fail=False)
node.account.ssh("mv " + processor.STDERR_FILE + " " + processor.STDERR_FILE + "." + str(counter), allow_fail=False)
node.account.ssh("mv " + processor.LOG_FILE + " " + processor.LOG_FILE + "." + str(counter), allow_fail=False)
self.leader_counter[processor] = 0
with node.account.monitor_log(processor.LOG_FILE) as log_monitor:
processor.set_upgrade_to("future_version")
processor.start()
self.old_processors.remove(processor)
self.upgraded_processors.append(processor)
log_monitor.wait_until("Kafka version: " + str(DEV_VERSION),
timeout_sec=60,
err_msg="Could not detect Kafka Streams version " + str(DEV_VERSION) + " in " + str(node.account))
log_monitor.offset = 5
log_monitor.wait_until("partition\.assignment\.strategy = \[org\.apache\.kafka\.streams\.tests\.StreamsUpgradeTest$FutureStreamsPartitionAssignor\]",
timeout_sec=60,
err_msg="Could not detect FutureStreamsPartitionAssignor in " + str(node.account))
if processor == self.leader:
self.update_leader()
else:
self.leader_counter[self.leader] = self.leader_counter[self.leader] + 1
if processor == self.leader:
leader_monitor = log_monitor
elif first_other_processor == self.leader:
leader_monitor = first_other_monitor
elif second_other_processor == self.leader:
leader_monitor = second_other_monitor
else:
raise Exception("Could not identify leader.")
monitors = {}
monitors[processor] = log_monitor
monitors[first_other_processor] = first_other_monitor
monitors[second_other_processor] = second_other_monitor
leader_monitor.wait_until("Received a future (version probing) subscription (version: 5). Sending empty assignment back (with supported version 4).",
timeout_sec=60,
err_msg="Could not detect 'version probing' attempt at leader " + str(self.leader.node.account))
if len(self.old_processors) > 0:
log_monitor.wait_until("Sent a version 5 subscription and got version 4 assignment back (successful version probing). Downgrading subscription metadata to received version and trigger new rebalance.",
timeout_sec=60,
err_msg="Could not detect 'successful version probing' at upgrading node " + str(node.account))
else:
log_monitor.wait_until("Sent a version 5 subscription and got version 4 assignment back (successful version probing). Setting subscription metadata to leaders supported version 5 and trigger new rebalance.",
timeout_sec=60,
err_msg="Could not detect 'successful version probing with upgraded leader' at upgrading node " + str(node.account))
first_other_monitor.wait_until("Sent a version 4 subscription and group leader.s latest supported version is 5. Upgrading subscription metadata version to 5 for next rebalance.",
timeout_sec=60,
err_msg="Never saw output 'Upgrade metadata to version 4' on" + str(first_other_node.account))
second_other_monitor.wait_until("Sent a version 4 subscription and group leader.s latest supported version is 5. Upgrading subscription metadata version to 5 for next rebalance.",
timeout_sec=60,
err_msg="Never saw output 'Upgrade metadata to version 4' on" + str(second_other_node.account))
log_monitor.wait_until("Version probing detected. Triggering new rebalance.",
timeout_sec=60,
err_msg="Could not detect 'Triggering new rebalance' at upgrading node " + str(node.account))
# version probing should trigger second rebalance
# now we check that after consecutive rebalances we have synchronized generation
generation_synchronized = False
retries = 0
while retries < 10:
processor_found = self.extract_generation_from_logs(processor)
first_other_processor_found = self.extract_generation_from_logs(first_other_processor)
second_other_processor_found = self.extract_generation_from_logs(second_other_processor)
if len(processor_found) > 0 and len(first_other_processor_found) > 0 and len(second_other_processor_found) > 0:
self.logger.info("processor: " + str(processor_found))
self.logger.info("first other processor: " + str(first_other_processor_found))
self.logger.info("second other processor: " + str(second_other_processor_found))
processor_generation = self.extract_highest_generation(processor_found)
first_other_processor_generation = self.extract_highest_generation(first_other_processor_found)
second_other_processor_generation = self.extract_highest_generation(second_other_processor_found)
if processor_generation == first_other_processor_generation and processor_generation == second_other_processor_generation:
current_generation = processor_generation
generation_synchronized = True
break
time.sleep(5)
retries = retries + 1
if generation_synchronized == False:
raise Exception("Never saw all three processors have the synchronized generation number")
if processor == self.leader:
self.update_leader()
else:
self.leader_counter[self.leader] = self.leader_counter[self.leader] + 1
if self.leader in self.old_processors or len(self.old_processors) > 0:
self.verify_metadata_no_upgraded_yet()
return current_generation
def extract_generation_from_logs(self, processor):
return list(processor.node.account.ssh_capture("grep \"Successfully joined group with generation\" %s| awk \'{for(i=1;i<=NF;i++) {if ($i == \"generation\") beginning=i+1; if($i== \"(org.apache.kafka.clients.consumer.internals.AbstractCoordinator)\") ending=i }; for (j=beginning;j<ending;j++) printf $j; printf \"\\n\"}\'" % processor.LOG_FILE, allow_fail=True))
def extract_highest_generation(self, found_generations):
return int(found_generations[-1])
def verify_metadata_no_upgraded_yet(self):
for p in self.processors:
found = list(p.node.account.ssh_capture("grep \"Sent a version 4 subscription and group leader.s latest supported version is 5. Upgrading subscription metadata version to 5 for next rebalance.\" " + p.LOG_FILE, allow_fail=True))
if len(found) > 0:
raise Exception("Kafka Streams failed with 'group member upgraded to metadata 4 too early'")
def confirm_topics_on_all_brokers(self, expected_topic_set):
for node in self.kafka.nodes:
match_count = 0
# need to iterate over topic_list_generator as kafka.list_topics()
# returns a python generator so values are fetched lazily
# so we can't just compare directly we must iterate over what's returned
topic_list_generator = self.kafka.list_topics(node=node)
for topic in topic_list_generator:
if topic in expected_topic_set:
match_count += 1
if len(expected_topic_set) != match_count:
return False
return True
|
|
# various utility functions for manipulating vector GIS data
#
# David J. Lampert ([email protected])
#
# last updated 08/07/2015
#
# contains the merge_shapes function that takes the name of an input polygon
# shapefile and then merges the shapes together using the combine_shapes method
# using a "trace" of the outside of the shapes.
import os, shutil, time, collections, numpy
from shapefile import Reader, Writer
def array_to_list(a,
tol = 6,
):
"""
Converts a 2-d array with two columns to a list of lists.
"""
return [[row[0].round(decimals = tol), row[1].round(decimals = tol)]
for row in a]
def format_shape(points,
omit = False,
tol = 6,
):
"""
Formats a list of points into list of polygons arranged into arrays.
"""
# check if there are multiple shapes, this is a major problem with NHDPlus
parts = []
i = 0
while i < len(points) - 1:
current = i
while points[i + 1] != points[current] and i < len(points) - 2: i+=1
parts.append([current, i + 1])
i+=2
formatted = []
for start, end in parts:
# create an array to store the points efficiently
a = numpy.empty((len(points[start:end + 1]), 2), dtype = 'float')
# format the points and add them to the array
for p, i in zip(points[start:end + 1], range(end - start + 1)):
a[i] = round(p[0], tol), round(p[1], tol)
if len(a) > 5: formatted.append(a)
elif not omit: formatted.append(a)
return formatted
def next_old(l,
i,
):
"""
Returns the index "i" of the next point in the array "a". Work around
for reaching the end to go to the start.
"""
if i == len(l) - 1: i = 1
else: i += 1
if l[i-1] == l[i]: i += 1
return i
def find_neighbor_indices(shape,
bboxes,
):
"""
Returns the indices of the list "bboxes" that overlap with the bounding
box of "shape."
"""
xmin = min([x for x, y in shape])
ymin = min([y for x, y in shape])
xmax = max([x for x, y in shape])
ymax = max([y for x, y in shape])
neighbors = []
for b, i in zip(bboxes, range(len(bboxes))):
if xmin < b[2] and xmax < b[0]: x = False
else: x = True
if ymin < b[3] and ymax < b[1]: y = False
else: y = True
if x and y: neighbors.append(i)
return neighbors
def get_all(shape,
shapes,
):
"""
Makes a list of all the points in a list of shapes common to shape.
"""
neighbor_points = []
for s in shapes:
for p in s:
if p not in neighbor_points and p in shape:
neighbor_points.append(p)
return neighbor_points
def combine_shapes2(shapes,
bboxes,
skip = False,
verbose = True,
):
"""
Combines a list of shapes into a single shape. Assumes the shapes are
simply connected with one common boundary. Also assumes the points are in
clockwise order. Starts at the smallest "x" value (longitude) and traces
the first shape until reaching a junction, then figures out which direction
to go next until reaching the first shape again.
"""
start = time.time()
# in the event of a single shape just return it
if len(shapes) == 1: return [(x, y) for x, y in shapes[0]]
# find all the edge points (only appear once)
all_points = [(x, y) for shape in shapes for x, y in shape]
# find all points that aren't duplicated (interior points are duplicated)
edges = [p for p, count in collections.Counter(all_points).items()
if count == 1]
xs, ys = zip(*edges)
# find the smallest value of x (which is an outside point) and start there
# also use the shape with the max x as a check the trace made it around
xmin = min(xs)
xmax = max(xs)
ymin = min(ys)
ymax = max(ys)
#xmin = min([shape[:, 0].min() for shape in shapes])
#xmax = max([shape[:, 0].max() for shape in shapes])
opposite_index = [shape[:, 0].max() for shape in shapes].index(xmax)
shape_index = [shape[:, 0].min() for shape in shapes].index(xmin)
start_index = shape_index
# get the total number of points in the shape list to use to prevent crash
total_points = len(all_points)
#total_points = sum([len(s) for s in shapes])
# get the index of the starting point in the first shape
i = numpy.where(shapes[shape_index] == xmin)[0][0]
# "current" is current shape being traced (as list); "points" are the trace
if verbose: print('tracing shape {}'.format(shape_index))
shapes = [array_to_list(s) for s in shapes]
current = shapes[shape_index]
points = [current[i]]
# go to the second point. note if the index reaches the end of the list
# a mechanism is needed to go back to the start of the list.
i = next_old(current, i)
# find the indices of all shapes with an overlapping bounding box
neighbor_indices = find_neighbor_indices(current, bboxes)
neighbor_indices.remove(shape_index)
# make a list of the neighbors and their points
neighbors = [shapes[j] for j in neighbor_indices]
if len(neighbors) == 0:
print('warning, no neighbors detected')
raise
neighboring_points = get_all(current, neighbors)
if len(neighboring_points) == 0:
print('warning, no neighbors detected')
raise
# trace the current shape until reaching a point common to the neighbors
#print(edges[:5])
#print(current[i] in edges)
#while current[i] in edges:
while current[i] not in neighboring_points:
points.append(current[i])
i = next_old(current, i)
points.append(current[i])
for neighbor, j in zip(neighbors, neighbor_indices):
if points[-1] in neighbor:
#if current[i] in neighbor:
shape_index = j
#i = shapes[shape_index].index(current[i])
current = shapes[shape_index]
i = current.index(points[-1])
i = next_old(current, i)
# repeat the process until reaching the first shape again, also check it
# made it to the opposite side
opposite = start_index == opposite_index
while shape_index != start_index or current[i] == points[0]:
if shape_index == opposite_index: opposite = True
if current[i] in points and skip:
i = next_old(current, i)
print('warning: repeated point, ignoring')
elif current[i] in points:
print('trace error occurred')
raise
if verbose: print('tracing shape', shape_index)
# find the neighbors
neighbor_indices = find_neighbor_indices(current, bboxes)
neighbor_indices.remove(shape_index)
neighbors = [shapes[j] for j in neighbor_indices]
neighboring_points = get_all(current, neighbors)
# trace the current shape until reaching a point common to the neighbors
while current[i] not in neighboring_points:
points.append(current[i])
i = next_old(current, i)
points.append(current[i])
for neighbor, j in zip(neighbors, neighbor_indices):
if points[-1] in neighbor:
shape_index = j
current = shapes[shape_index]
i = current.index(points[-1])
i = next_old(current, i)
# add the last points from the first shape
if verbose: print('tracing shape {}'.format(shape_index))
while current[i] != points[0]:
points.append(current[i])
i = next_old(current, i)
points.append(points[0])
if verbose:
v = time.time() - start
print('\nfinished tracing catchments in {:.1f} seconds\n'.format(v))
if opposite: return points
else:
if verbose:
print('\ntrace failed to reach opposite side, ' +
'trying alternate method')
raise
def get_distance(p1, p2):
"""
Returns the distance between p1 and p2.
"""
x1, y1 = p1
x2, y2 = p2
return (x2 - x1)**2 + (y2 - y1)**2
def find_closest(p, points):
"""
Returns the closest point in the list "points" to the point "p."
"""
distances = [get_distance(p, point) for point in points]
return points[distances.index(min(distances))]
def get_centroid(points):
"""
Calculates the centroid of a polygon with paired x-y values.
"""
xs = numpy.array([x for x, y in points] + [points[0][0]])
ys = numpy.array([y for x, y in points] + [points[0][1]])
a = xs[:-1] * ys[1:]
b = ys[:-1] * xs[1:]
A = numpy.sum(a - b) / 2.
cx = xs[:-1] + xs[1:]
cy = ys[:-1] + ys[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
return Cx, Cy
def next_index(l,
i,
):
"""
Returns the index "i" of the next point in the list "l." Work around
for reaching the end to go to the start.
"""
if i == len(l) - 1: i = 0
else: i += 1
return i
def find_neighbors(j,
shapes,
bboxes,
):
"""
Returns the neighbors of the list "shapes" with corresponding "bboxes"
that overlap with the bounding box "bbox."
"""
xmin, ymin, xmax, ymax = bboxes[j]
indices = []
for i, b in enumerate(bboxes):
if xmin < b[2] and xmax < b[0]: x = False
else: x = True
if ymin < b[3] and ymax < b[1]: y = False
else: y = True
if x and y and b != bboxes[j]: indices.append(i)
return [shapes[i] for i in indices]
def combine_shapes(shapes,
verbose = True,
):
"""
Combines a list of shapes into a single shape. Assumes the shapes are
simply connected with one common boundary. Also assumes the points are in
clockwise order. Starts at the smallest "x" value (longitude) and traces
the first shape until reaching a junction, then figures out which direction
to go next until reaching the first shape again.
"""
st = time.time()
# in the event of a single shape just return it
if len(shapes) == 1:
return [(round(x, 6), round(y, 6)) for x, y in shapes[0].points]
# find all the edge points (only appear once)
all_points = [(round(x, 6), round(y, 6))
for shape in shapes for x, y in shape.points]
if verbose: print('found', len(all_points), 'points')
# find all points that aren't duplicated (interior points are duplicated)
edges = [p for p, count in collections.Counter(all_points).items()
if count == 1]
# round everything to enable checking and keep track of the bounding boxes
# get rid of first and last points
bboxes = []
formatted = []
for shape in shapes:
# deal with shapes with multiple parts
points = [(round(x, 6), round(y, 6)) for x,y in shape.points]
duplicates = [p for p, count in collections.Counter(points).items()
if count > 1]
points = [p for p in points if p not in duplicates]
formatted.append(points)
bboxes.append(shape.bbox)
# keep the formatted shapes and bounding boxes
bboxes = [b for s, b in zip(formatted, bboxes)]
shapes = formatted
# get rid of shapes that have no edge points
if verbose: print('removing shapes with no edge points')
keepers = [i for i,s in enumerate(shapes) if any([p in edges for p in s])]
if verbose:
its = len(shapes), len(keepers)
print('intially {} shapes; finally {} shapes'.format(*its))
bboxes = [bboxes[i] for i in keepers]
shapes = [shapes[i] for i in keepers]
if verbose: print('found', len(edges), 'points')
# find the smallest value of x (which is an outside point) and start there
# also use the shape with the max x as a check the trace made it around
xs, ys = zip(*edges)
xmin = min(xs)
xmax = max(xs)
ymin = min(ys)
ymax = max(ys)
start = edges[xs.index(xmin)]
opposite = edges[xs.index(xmax)]
# "current" is current shape being traced (as list), "points" are the trace
current = shapes[[min(shape)[0] for shape in shapes].index(xmin)]
i = current.index(start)
# keep the results in a list
points = []
# iterate through using the existence of the opposite side point as
# the criteria that the trace went around
while opposite not in points or current[i] != start:
# index of the current shape
j = shapes.index(current)
if verbose:
its = j, current[i][0], current[i][1]
print('started tracing shape ' +
'{:3d} starting at {:8.4f}, {:7.4f}'.format(*its))
# find the neighbors
neighbors = find_neighbors(j, shapes, bboxes)
# trace the current shape until reaching a point not on the edge
while current[i] in edges:
points.append(current[i])
edges.remove(current[i])
i = next_index(current, i)
if verbose:
its = j, current[i][0], current[i][1]
print('finished tracing shape ' +
'{:3d} starting at {:8.4f}, {:7.4f}'.format(*its))
# make a list of the candidates for the next shape to trace
candidates = [neighbor for neighbor in neighbors
if current[i] in neighbor]
if current[i] == start: pass
elif len(candidates) == 0:
# couldn't find anything
if verbose:
print('unable to find neighboring shape at point ' +
'{:8.4f}, {:7.4f} '.format(*current[i]) +
'in shape {};\n'.format(j) +
'searching for closest point'.format(j))
# find the closest edge point
closest = find_closest(current[i], edges)
if verbose:
print('found close point {:8.4f}, {:7.4f}'.format(*closest))
# find the shape
n = 0
while closest not in shapes[n] and n < len(shapes) - 1: n += 1
i = shapes[n].index(closest)
current = shapes[n]
# remove the last point, since it's in a weird place
l = points.pop(-1)
elif len(candidates) == 1:
# only one choice, find the index of the starting point
points.append(current[i])
if current[i] in edges: edges.remove(current[i])
i = candidates[0].index(current[i])
current = candidates[0]
else:
if verbose: print('multiple candidates detected')
# find the candidate with an edge point
candidate_edges = [[p for p in candidate if p in edges]
for candidate in candidates]
winner = candidates[candidate_edges.index(max(candidate_edges))]
i = winner.index(current[i])
# remove the last point, since it's in a weird place
l = points.pop(-1)
current = winner
# work around for straight intersections
if current[i] != start and current[next_index(current, i)] not in edges:
if verbose: print('went the wrong way, finding the closest edge')
l = points.pop(-1)
# find the closest edge point
closest = find_closest(points[-1], edges)
if verbose:
print('found close point {:8.4f}, {:7.4f}'.format(*closest))
# find the shape
n = 0
while closest not in shapes[n] and n < len(shapes) - 1: n += 1
i = shapes[n].index(closest)
current = shapes[n]
# add the point
if current[i] not in points: points.append(current[i])
if current[i] in edges: edges.remove(current[i])
if current[i] != start: i = next_index(current, i)
if verbose:
v = time.time() - st
print('\nfinished tracing catchments in {:.1f} seconds\n'.format(v))
if opposite in points: return points
else:
print('\ntrace failed to reach opposite side, ' +
'trying alternate method')
raise
def merge_shapes(inputfile,
outputfile = None,
overwrite = False,
verbose = True,
vverbose = False,
):
"""
Merges all the shapes in a shapefile into a single shape.
"""
if outputfile is None: output = '{}/merged'.format(os.getcwd())
if os.path.isfile(outputfile + '.shp') and not overwrite:
if verbose:
print('combined watershed shapefile {} exists'.format(outputfile))
return
if verbose: print('combining shapes from {}\n'.format(inputfile) +
'this may take a while...\n')
# start by copying the projection files
shutil.copy(inputfile + '.prj', outputfile + '.prj')
# load the catchment and flowline shapefiles
r = Reader(inputfile, shapeType = 5)
try:
combined = combine_shapes(r.shapes(), verbose = vverbose)
except:
print('error: unable to combine shapes')
raise
# create the new file with the merged shapes
w = Writer(shapeType = 5)
w.poly(shapeType = 5, parts = [combined])
# copy the fields from the original and then the first record; note this
# can be adapted as needed
for field in r.fields: w.field(*field)
w.record(*r.record(0))
w.save(outputfile)
if verbose:
its = inputfile, outputfile
print('successfully combined shapes from {} to {}\n'.format(*its))
# pretty simple to use--point the inputfile to the shapefile with the shapes
# you want to merge (do NOT include the extension .shp, .prj, etc.) and the
# name you want for the output file (if unspecified it will be called "merged"
# and placed in the current directory).
#inputfile = r'C:\HSPF_data\07100008\22248777\catchments'
#outputfile = r'C:\HSPF_data\07100008\22248777\merged'
#merge_shapes(inputfile, outputfile = outputfile)
|
|
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
import re
import six
from xml import etree
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
class XMLAPIParser(object):
def __init__(self):
# The following Boolean acts as the flag for the common sub-element.
# For instance:
# <CifsServers>
# <li> server_1 </li>
# </CifsServers>
# <Alias>
# <li> interface_1 </li>
# </Alias>
self.tag = None
self.elt = {}
self.stack = []
@staticmethod
def _delete_ns(tag):
i = tag.find('}')
if i >= 0:
tag = tag[i + 1:]
return tag
def parse(self, xml):
result = {
'type': None,
'taskId': None,
'maxSeverity': None,
'objects': [],
'problems': [],
}
events = ("start", "end")
context = etree.ElementTree.iterparse(six.BytesIO(xml.encode('utf-8')),
events=events)
for action, elem in context:
self.tag = self._delete_ns(elem.tag)
func = self._get_func(action, self.tag)
self.track_stack(action, elem)
if func in vars(XMLAPIParser):
if action == 'start':
eval('self.' + func)(elem, result)
elif action == 'end':
eval('self.' + func)(elem, result)
return result
def track_stack(self, action, elem):
if action == 'start':
self.stack.append(elem)
elif action == 'end':
self.stack.pop()
@staticmethod
def _get_func(action, tag):
if tag == 'W2KServerData':
return action + '_' + 'w2k_server_data'
temp_list = re.sub(r"([A-Z])", r" \1", tag).split()
if temp_list:
func_name = action + '_' + '_'.join(temp_list)
else:
func_name = action + '_' + tag
return func_name.lower()
@staticmethod
def _copy_property(source, target):
for key in source:
target[key] = source[key]
@classmethod
def _append_elm_property(cls, elm, result, identifier):
for obj in result['objects']:
if cls.has_identifier(obj, elm, identifier):
for key, value in elm.attrib.items():
obj[key] = value
@staticmethod
def has_identifier(obj, elm, identifier):
return (identifier in obj and
identifier in elm.attrib and
elm.attrib[identifier] == obj[identifier])
def _append_element(self, elm, result, identifier):
sub_elm = {}
self._copy_property(elm.attrib, sub_elm)
for obj in result['objects']:
if self.has_identifier(obj, elm, identifier):
if self.tag in obj:
obj[self.tag].append(sub_elm)
else:
obj[self.tag] = [sub_elm]
def start_task_response(self, elm, result):
result['type'] = 'TaskResponse'
self._copy_property(elm.attrib, result)
@staticmethod
def start_fault(_, result):
result['type'] = 'Fault'
def _parent_tag(self):
if len(self.stack) >= 2:
parent = self.stack[-2]
ret = self._delete_ns(parent.tag)
else:
ret = None
return ret
def start_status(self, elm, result):
parent_tag = self._parent_tag()
if parent_tag == 'TaskResponse':
result['maxSeverity'] = elm.attrib['maxSeverity']
elif parent_tag in ['MoverStatus', 'Vdm', 'MoverHost']:
self.elt['maxSeverity'] = elm.attrib['maxSeverity']
def start_query_status(self, elm, result):
result['type'] = 'QueryStatus'
self._copy_property(elm.attrib, result)
def start_problem(self, elm, result):
self.elt = {}
self._copy_property(elm.attrib, self.elt)
result['problems'].append(self.elt)
def start_description(self, elm, _):
self.elt['Description'] = elm.text
def start_action(self, elm, _):
self.elt['Action'] = elm.text
def start_diagnostics(self, elm, _):
self.elt['Diagnostics'] = elm.text
def start_file_system(self, elm, result):
self._as_object(elm, result)
def start_file_system_capacity_info(self, elm, result):
identifier = 'fileSystem'
self._append_elm_property(elm, result, identifier)
def start_storage_pool(self, elm, result):
self._as_object(elm, result)
def start_system_storage_pool_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_mover(self, elm, result):
self._as_object(elm, result)
def start_mover_host(self, elm, result):
self._as_object(elm, result)
def start_nfs_export(self, elm, result):
self._as_object(elm, result)
def _as_object(self, elm, result):
self.elt = {}
self._copy_property(elm.attrib, self.elt)
result['objects'].append(self.elt)
def start_mover_status(self, elm, result):
identifier = 'mover'
self._append_elm_property(elm, result, identifier)
def start_mover_route(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_deduplication_settings(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_dns_domain(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_interface(self, elm, result):
self._append_element(elm, result, 'mover')
def start_logical_network_device(self, elm, result):
self._append_element(elm, result, 'mover')
def start_vdm(self, elm, result):
self._as_object(elm, result)
def _add_element(self, name, item):
if name not in self.elt:
self.elt[name] = []
self.elt[name].append(item)
def start_li(self, elm, _):
parent_tag = self._parent_tag()
host_nodes = ('AccessHosts', 'RwHosts', 'RoHosts', 'RootHosts')
if parent_tag == 'CifsServers':
self._add_element('CifsServers', elm.text)
elif parent_tag == 'Aliases':
self._add_element('Aliases', elm.text)
elif parent_tag == 'Interfaces':
self._add_element('Interfaces', elm.text)
elif parent_tag in host_nodes:
if parent_tag not in self.elt:
self.elt[parent_tag] = []
self.elt[parent_tag].append(elm.text)
def start_cifs_server(self, elm, result):
self._as_object(elm, result)
def start_w2k_server_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_cifs_share(self, elm, result):
self._as_object(elm, result)
def start_checkpoint(self, elm, result):
self._as_object(elm, result)
def start_ro_file_system_hosts(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_standalone_server_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_fibre_channel_device_data(self, elm, _):
self._copy_attrib_to_parent(elm)
def start_network_device_data(self, elm, _):
self._copy_attrib_to_parent(elm)
def _copy_attrib_to_parent(self, elm):
if len(self.stack) >= 2:
parent = self.stack[-2]
for k, v in elm.attrib.items():
parent.attrib[k] = v
def start_mover_motherboard(self, elm, result):
self._append_element(elm, result, 'moverHost')
def end_physical_device(self, elm, result):
self._append_element(elm, result, 'moverHost')
def start_fc_descriptor(self, elm, result):
self._append_element(elm, result, 'moverHost')
def start_mount(self, elm, result):
self._as_object(elm, result)
|
|
"""
Data API for Assembly entities. This API provides methods for retrieving
summary information such as GC content, total length, external source information
as well as methods for retrieving individual contig sequences and gathering contig lengths and contig GC.
"""
# Stdlib
import abc
import requests
import re
import string
import hashlib
try:
import cStringIO as StringIO
except ImportError:
import StringIO as StringIO
# Local
from doekbase.data_api.core import ObjectAPI
CHUNK_SIZE = 2**30
_CONTIGSET_TYPES = ['KBaseGenomes.ContigSet']
_ASSEMBLY_TYPES = ['KBaseGenomesCondensedPrototypeV2.Assembly']
TYPES = _CONTIGSET_TYPES + _ASSEMBLY_TYPES
class AssemblyInterface(object):
"""API for the assembled sequences associated with a Genome Annotation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_assembly_id(self):
"""Retrieve the id for an Assembly.
Returns:
str: string identifier for the Assembly
"""
pass
@abc.abstractmethod
def get_genome_annotations(self):
"""Retrieve the GenomeAnnotations that refer to this Assembly.
Returns:
list<GenomeAnnotationAPI>: List of GenomeAnnotationAPI objects
"""
pass
@abc.abstractmethod
def get_external_source_info(self):
"""Retrieve the external source information for this Assembly.
Returns:
str: string identifier for the Assembly
"""
pass
@abc.abstractmethod
def get_stats(self):
"""Retrieve the derived statistical information about this Assembly.
Returns:
dict: Statistics with the following keys and values:
gc_content : float
proportion of guanine (G) and cytosine (C) content
dna_size : int
total length of all dna sequences
num_contigs : int
total number of contiguous sequences
"""
pass
@abc.abstractmethod
def get_number_contigs(self):
"""Retrieve the number of contiguous sequences in this Assembly.
Returns:
int
"""
pass
@abc.abstractmethod
def get_gc_content(self):
"""Retrieve the total GC content for this Assembly.
Returns:
float: Proportion of GC content, 0 <= x <= 1.
"""
pass
@abc.abstractmethod
def get_dna_size(self):
"""Retrieve the total DNA size for this Assembly.
Returns:
int: Total DNA size
"""
pass
@abc.abstractmethod
def get_contig_lengths(self, contig_id_list=None):
"""Retrieve the length for every contiguous sequence.
Returns:
dict<str,int>: Mapping of sequence identifiers to lengths.
"""
pass
@abc.abstractmethod
def get_contig_gc_content(self, contig_id_list=None):
"""Retrieve the total GC content for each contiguous sequence
of this Assembly.
Returns:
dict<str,float>: Mapping of sequence identifiers to GC content.
"""
pass
@abc.abstractmethod
def get_contig_ids(self):
"""Retrieve the ids for every contiguous sequence in this Assembly.
Returns:
list<str>: Sequence identifiers
"""
pass
@abc.abstractmethod
def get_contigs(self, contig_id_list=None):
"""Retrieve contiguous sequences from this Assembly by id.
Args:
contig_id_list: list<str>
Returns:
dict<str,dict>: dictionary of contigs, with contig id as key
and each value itself a dict with the following key/value pairs:
contig_id : str
the contig identifier
length : integer
length of the contig
md5 : string
hex-digest of MD5 hash of the contig's contents,
name : string
name of the contig
description : string
description of the contig
is_complete : int
0 if this contig is complete, 1 otherwise
is_circular : int
0 or 1
sequence : string
actual contents of the sequence for this contig
"""
pass
class AssemblyAPI(ObjectAPI, AssemblyInterface):
def __init__(self, services, token, ref):
"""Defines which types and type versions that are legal.
"""
super(AssemblyAPI, self).__init__(services, token, ref)
is_assembly_type = self._typestring.split('-')[0] in _ASSEMBLY_TYPES
is_contigset_type = self._typestring.split('-')[0] in _CONTIGSET_TYPES
if not (is_assembly_type or is_contigset_type):
raise TypeError("Invalid type! Expected one of {0}, received {1}".format(TYPES, self._typestring))
if is_assembly_type:
self.proxy = _Prototype(services, token, ref)
else:
self.proxy = _KBaseGenomes_ContigSet(services, token, ref)
def get_assembly_id(self):
return self.proxy.get_assembly_id()
def get_genome_annotations(self):
return self.proxy.get_genome_annotations()
def get_external_source_info(self):
return self.proxy.get_external_source_info()
def get_stats(self):
return self.proxy.get_stats()
def get_number_contigs(self):
return self.proxy.get_number_contigs()
def get_gc_content(self):
return self.proxy.get_gc_content()
def get_dna_size(self):
return self.proxy.get_dna_size()
def get_contig_lengths(self, contig_id_list=None):
return self.proxy.get_contig_lengths(contig_id_list)
def get_contig_gc_content(self, contig_id_list=None):
return self.proxy.get_contig_gc_content(contig_id_list)
def get_contig_ids(self):
return self.proxy.get_contig_ids()
def get_contigs(self, contig_id_list=None):
return self.proxy.get_contigs(contig_id_list)
class _KBaseGenomes_ContigSet(ObjectAPI, AssemblyInterface):
def __init__(self, services, token, ref):
super(_KBaseGenomes_ContigSet, self).__init__(services, token, ref)
def get_assembly_id(self):
return self.get_data_subset(path_list=["id"])["id"]
def get_genome_annotations(self):
from doekbase.data_api.annotation.genome_annotation import TYPES as genome_annotation_types
from doekbase.data_api.annotation.genome_annotation import GenomeAnnotationAPI
annotations = list()
referrers = self.get_referrers()
for x in referrers:
if x.split("-")[0] in genome_annotation_types:
for ref in referrers[x]:
annotations.append(
GenomeAnnotationAPI(self.services, self._token, ref)
)
return annotations
def get_external_source_info(self):
data = self.get_data_subset(path_list=["source","source_id"])
output = dict()
output["external_source"] = data["source"]
output["external_source_id"] = data["source_id"]
output["external_source_origination_date"] = "Unknown"
return output
def get_stats(self):
contigs = self.get_data()["contigs"]
pattern = re.compile(r'g|G|c|C')
total_gc = 0
for c in contigs:
total_gc += len([s for s in re.finditer(pattern, c["sequence"])])
total_length = 0
for x in contigs:
if "length" in x:
total_length += x["length"]
else:
total_length += len(x["sequence"])
data = dict()
data["gc_content"] = total_gc/(total_length*1.0)
data["dna_size"] = total_length
data["num_contigs"] = len(contigs)
return data
def get_number_contigs(self):
return len(self.get_data()["contigs"])
def get_gc_content(self):
contigs = self.get_data()["contigs"]
pattern = re.compile(r'g|G|c|C')
total_gc = 0
total_length = 0
for c in contigs:
if "length" in c:
total_length += c["length"]
else:
total_length += len(c["sequence"])
total_gc += len([s for s in re.finditer(pattern, c["sequence"])])
return total_gc/(total_length*1.0)
def get_dna_size(self):
contigs = self.get_data()["contigs"]
return sum([c["length"] for c in contigs])
def get_contig_lengths(self, contig_id_list=None):
contigs = self.get_data()["contigs"]
if contig_id_list is None:
contig_id_list = [c["id"] for c in contigs]
contig_lengths = dict()
for c in contigs:
if c["id"] in contig_id_list:
if "length" in c:
contig_lengths[c["id"]] = c["length"]
else:
contig_lengths[c["id"]] = len(c["sequence"])
return contig_lengths
def get_contig_gc_content(self, contig_id_list=None):
contigs = self.get_data()["contigs"]
pattern = re.compile(r'g|G|c|C')
contigs_gc = dict()
if contig_id_list is None:
contig_id_list = [c["id"] for c in contigs]
for c in contigs:
if "length" in c:
length = c["length"] * 1.0
else:
length = len(c["sequence"]) * 1.0
contigs_gc[c["id"]] = len([s for s in re.finditer(pattern, c["sequence"])])/length
return contigs_gc
def get_contig_ids(self):
contigs = self.get_data()["contigs"]
return [c["id"] for c in contigs]
def get_contigs(self, contig_id_list=None):
pattern = re.compile(r'g|G|c|C')
contigs = dict()
raw_contigs = self.get_data()["contigs"]
if contig_id_list is None or len(contig_id_list) == 0:
matches = raw_contigs
else:
matches = [c for c in raw_contigs if c["id"] in contig_id_list]
for c in matches:
contigs[c["id"]] = dict()
contigs[c["id"]]["contig_id"] = c["id"]
contigs[c["id"]]["sequence"] = c["sequence"]
if "length" in c:
contigs[c["id"]]["length"] = c["length"]
else:
contigs[c["id"]]["length"] = len(c["sequence"])
if "md5" in c:
contigs[c["id"]]["md5"] = c["md5"]
else:
contigs[c["id"]]["md5"] = hashlib.md5(c["sequence"].upper()).hexdigest()
if "name" in c:
contigs[c["id"]]["name"] = c["name"]
else:
contigs[c["id"]]["name"] = None
if "description" in c:
contigs[c["id"]]["description"] = c["description"]
else:
contigs[c["id"]]["description"] = None
if "complete" in c:
contigs[c["id"]]["is_complete"] = c["complete"]
else:
contigs[c["id"]]["is_complete"] = 0
if "replicon_geometry" in c:
contigs[c["id"]]["is_circular"] = c["replicon_geometry"]
else:
contigs[c["id"]]["is_circular"] = "Unknown"
contigs[c["id"]]["gc_content"] = len([s for s in re.finditer(pattern, c["sequence"])])/(contigs[c["id"]]["length"] * 1.0)
return contigs
class _Prototype(ObjectAPI, AssemblyInterface):
def __init__(self, services, token, ref):
super(_Prototype, self).__init__(services, token, ref)
def get_assembly_id(self):
return self.get_data_subset(path_list=["assembly_id"])["assembly_id"]
def get_genome_annotations(self):
import doekbase.data_api.annotation.genome_annotation
referrers = self.get_referrers()
annotations = list()
for object_type in referrers:
if object_type.split('-')[0] in doekbase.data_api.annotation.genome_annotation.TYPES:
for x in referrers[object_type]:
annotations.append(doekbase.data_api.annotation.genome_annotation.GenomeAnnotationAPI(
self.services, self._token, ref=x))
return annotations
def get_external_source_info(self):
return self.get_data_subset(path_list=["external_source",
"external_source_id",
"external_source_origination_date"])
def get_stats(self):
return self.get_data_subset(path_list=["gc_content","dna_size","num_contigs"])
def get_number_contigs(self):
return self.get_data_subset(path_list=["num_contigs"])["num_contigs"]
def get_gc_content(self):
return self.get_data_subset(path_list=["gc_content"])["gc_content"]
def get_dna_size(self):
return self.get_data_subset(path_list=["dna_size"])["dna_size"]
def get_contig_lengths(self, contig_id_list=None):
if contig_id_list is None:
contigs = self.get_data()["contigs"]
return {c: contigs[c]["length"] for c in contigs}
else:
contigs = self.get_data_subset(["contigs/" + x for x in contig_id_list])["contigs"]
return {c: contigs[c]["length"] for c in contig_id_list}
def get_contig_gc_content(self, contig_id_list=None):
if contig_id_list is None:
contigs = self.get_data()["contigs"]
return {c: contigs[c]["gc_content"] for c in contigs}
else:
contigs = self.get_data_subset(["contigs/" + x for x in contig_id_list])["contigs"]
return {c: contigs[c]["gc_content"] for c in contig_id_list}
def get_contig_ids(self):
contigs = self.get_data()["contigs"]
return [contigs[c]["contig_id"] for c in contigs]
def get_contigs(self, contig_id_list=None):
data = self.get_data()
if contig_id_list is None:
contig_id_list = data["contigs"].keys()
num_contigs = len(contig_id_list)
total_contigs = data["num_contigs"]
fasta_ref = data["fasta_handle_ref"]
contigs = data["contigs"]
copy_keys = ["contig_id", "length", "md5", "name", "description", "is_complete", "is_circular"]
header = dict()
header["Authorization"] = "Oauth {0}".format(self._token)
if num_contigs > total_contigs/3 or num_contigs == 0:
Retrieve_url = self.services["shock_service_url"] + "node/" + fasta_ref + "?download_raw"
#Retrieve all sequence
data = requests.get(Retrieve_url, headers=header, stream=True)
buffer = StringIO.StringIO()
for chunk in data.iter_content(CHUNK_SIZE):
if chunk:
buffer.write(chunk)
sequence_data = buffer.getvalue()
buffer.close()
if num_contigs == 0:
contig_id_list = contigs.keys()
num_contigs = total_contigs
assert num_contigs == len(contig_id_list)
outContigs = dict()
for i in xrange(num_contigs):
c = contig_id_list[i]
outContigs[c] = dict()
for k in copy_keys:
if k in contigs[c]:
outContigs[c][k] = contigs[c][k]
outContigs[c]["sequence"] = sequence_data[contigs[c]["start_position"]:contigs[c]["start_position"] + \
contigs[c]["num_bytes"]].translate(None, string.whitespace)
else:
def fetch_contig(start, length):
fetch_url = self.services["shock_service_url"] + "node/" + fasta_ref + \
"?download&seek=" + str(start) + \
"&length=" + str(length)
#Retrieve individual sequences
data = requests.get(fetch_url, headers=header, stream=True)
buffer = StringIO.StringIO()
try:
for chunk in data.iter_content(CHUNK_SIZE):
if chunk:
buffer.write(chunk)
sequence = buffer.getvalue().translate(None, string.whitespace)
except:
raise
finally:
buffer.close()
return sequence
outContigs = dict()
sorted_contigs = sorted(contig_id_list,
cmp=lambda a,b: cmp(contigs[a]["start_position"], contigs[b]["start_position"]))
for c in sorted_contigs:
outContigs[c] = dict()
for k in copy_keys:
if k in contigs[c]:
outContigs[c][k] = contigs[c][k]
outContigs[c]["sequence"] = fetch_contig(contigs[c]["start_position"],contigs[c]["num_bytes"])
return outContigs
|
|
import logging
from ray.rllib.agents import with_common_config
from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.optimizers import SyncSamplesOptimizer, \
LocalMultiGPUOptimizer, TorchDistributedDataParallelOptimizer
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# Should use a critic as a baseline (otherwise don't use value baseline;
# required for using GAE).
"use_critic": True,
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# The GAE(lambda) parameter.
"lambda": 1.0,
# Initial coefficient for KL divergence.
"kl_coeff": 0.2,
# Size of batches collected from each worker.
"sample_batch_size": 200,
# Number of timesteps collected for each SGD round. This defines the size
# of each SGD epoch.
"train_batch_size": 4000,
# Total SGD batch size across all devices for SGD. This defines the
# minibatch size within each epoch.
"sgd_minibatch_size": 128,
# Whether to shuffle sequences in the batch when training (recommended).
"shuffle_sequences": True,
# Number of SGD iterations in each outer loop (i.e., number of epochs to
# execute per train batch).
"num_sgd_iter": 30,
# Stepsize of SGD.
"lr": 5e-5,
# Learning rate schedule.
"lr_schedule": None,
# Share layers for value function. If you set this to True, it's important
# to tune vf_loss_coeff.
"vf_share_layers": False,
# Coefficient of the value function loss. IMPORTANT: you must tune this if
# you set vf_share_layers: True.
"vf_loss_coeff": 1.0,
# Coefficient of the entropy regularizer.
"entropy_coeff": 0.0,
# Decay schedule for the entropy regularizer.
"entropy_coeff_schedule": None,
# PPO clip parameter.
"clip_param": 0.3,
# Clip param for the value function. Note that this is sensitive to the
# scale of the rewards. If your expected V is large, increase this.
"vf_clip_param": 10.0,
# If specified, clip the global norm of gradients by this amount.
"grad_clip": None,
# Target value for KL divergence.
"kl_target": 0.01,
# Whether to rollout "complete_episodes" or "truncate_episodes".
"batch_mode": "truncate_episodes",
# Which observation filter to apply to the observation.
"observation_filter": "NoFilter",
# Uses the sync samples optimizer instead of the multi-gpu one. This is
# usually slower, but you might want to try it if you run into issues with
# the default optimizer.
"simple_optimizer": False,
# Use the experimental torch multi-node SGD optimizer.
"distributed_data_parallel_optimizer": False,
# Use PyTorch as framework?
"use_pytorch": False
})
# __sphinx_doc_end__
# yapf: enable
def choose_policy_optimizer(workers, config):
if config["distributed_data_parallel_optimizer"]:
if not config["use_pytorch"]:
raise ValueError(
"Distributed data parallel is only supported for PyTorch")
if config["num_gpus"]:
raise ValueError(
"When using distributed data parallel, you should set "
"num_gpus=0 since all optimization "
"is happening on workers. Enable GPUs for workers by setting "
"num_gpus_per_worker=1.")
if config["batch_mode"] != "truncate_episodes":
raise ValueError(
"Distributed data parallel requires truncate_episodes "
"batch mode.")
if config["sample_batch_size"] != config["train_batch_size"]:
raise ValueError(
"Distributed data parallel requires sample_batch_size to be "
"equal to train_batch_size. Each worker will sample and learn "
"on train_batch_size samples per iteration.")
return TorchDistributedDataParallelOptimizer(
workers,
num_sgd_iter=config["num_sgd_iter"],
train_batch_size=config["train_batch_size"],
sgd_minibatch_size=config["sgd_minibatch_size"],
standardize_fields=["advantages"])
if config["simple_optimizer"]:
return SyncSamplesOptimizer(
workers,
num_sgd_iter=config["num_sgd_iter"],
train_batch_size=config["train_batch_size"],
sgd_minibatch_size=config["sgd_minibatch_size"],
standardize_fields=["advantages"])
return LocalMultiGPUOptimizer(
workers,
sgd_batch_size=config["sgd_minibatch_size"],
num_sgd_iter=config["num_sgd_iter"],
num_gpus=config["num_gpus"],
sample_batch_size=config["sample_batch_size"],
num_envs_per_worker=config["num_envs_per_worker"],
train_batch_size=config["train_batch_size"],
standardize_fields=["advantages"],
shuffle_sequences=config["shuffle_sequences"])
def update_kl(trainer, fetches):
if "kl" in fetches:
# single-agent
trainer.workers.local_worker().for_policy(
lambda pi: pi.update_kl(fetches["kl"]))
else:
def update(pi, pi_id):
if pi_id in fetches:
pi.update_kl(fetches[pi_id]["kl"])
else:
logger.debug("No data for {}, not updating kl".format(pi_id))
# multi-agent
trainer.workers.local_worker().foreach_trainable_policy(update)
def warn_about_bad_reward_scales(trainer, result):
if result["policy_reward_mean"]:
return # Punt on handling multiagent case.
# Warn about excessively high VF loss.
learner_stats = result["info"]["learner"]
if "default_policy" in learner_stats:
scaled_vf_loss = (trainer.config["vf_loss_coeff"] *
learner_stats["default_policy"]["vf_loss"])
policy_loss = learner_stats["default_policy"]["policy_loss"]
if trainer.config["vf_share_layers"] and scaled_vf_loss > 100:
logger.warning(
"The magnitude of your value function loss is extremely large "
"({}) compared to the policy loss ({}). This can prevent the "
"policy from learning. Consider scaling down the VF loss by "
"reducing vf_loss_coeff, or disabling vf_share_layers.".format(
scaled_vf_loss, policy_loss))
# Warn about bad clipping configs
if trainer.config["vf_clip_param"] <= 0:
rew_scale = float("inf")
else:
rew_scale = round(
abs(result["episode_reward_mean"]) /
trainer.config["vf_clip_param"], 0)
if rew_scale > 200:
logger.warning(
"The magnitude of your environment rewards are more than "
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
"This means that it will take more than "
"{} iterations for your value ".format(rew_scale) +
"function to converge. If this is not intended, consider "
"increasing `vf_clip_param`.")
def validate_config(config):
if config["entropy_coeff"] < 0:
raise DeprecationWarning("entropy_coeff must be >= 0")
if isinstance(config["entropy_coeff"], int):
config["entropy_coeff"] = float(config["entropy_coeff"])
if config["sgd_minibatch_size"] > config["train_batch_size"]:
raise ValueError(
"Minibatch size {} must be <= train batch size {}.".format(
config["sgd_minibatch_size"], config["train_batch_size"]))
if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]:
raise ValueError(
"Episode truncation is not supported without a value "
"function. Consider setting batch_mode=complete_episodes.")
if config["multiagent"]["policies"] and not config["simple_optimizer"]:
logger.info(
"In multi-agent mode, policies will be optimized sequentially "
"by the multi-GPU optimizer. Consider setting "
"simple_optimizer=True if this doesn't work for you.")
if config["simple_optimizer"]:
logger.warning(
"Using the simple minibatch optimizer. This will significantly "
"reduce performance, consider simple_optimizer=False.")
elif config["use_pytorch"] or (tf and tf.executing_eagerly()):
config["simple_optimizer"] = True # multi-gpu not supported
def get_policy_class(config):
if config.get("use_pytorch") is True:
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
return PPOTorchPolicy
else:
return PPOTFPolicy
PPOTrainer = build_trainer(
name="PPO",
default_config=DEFAULT_CONFIG,
default_policy=PPOTFPolicy,
get_policy_class=get_policy_class,
make_policy_optimizer=choose_policy_optimizer,
validate_config=validate_config,
after_optimizer_step=update_kl,
after_train_result=warn_about_bad_reward_scales)
|
|
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import (
VectorPlotter,
)
from .utils import (
locator_to_legend_entries,
adjust_legend_subtitles,
_default_color,
_deprecate_ci,
)
from ._statistics import EstimateAggregator
from .axisgrid import FacetGrid, _facet_docs
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["relplot", "scatterplot", "lineplot"]
_relational_narrative = DocstringComponents(dict(
# --- Introductory prose
main_api="""
The relationship between ``x`` and ``y`` can be shown for different subsets
of the data using the ``hue``, ``size``, and ``style`` parameters. These
parameters control what visual semantics are used to identify the different
subsets. It is possible to show up to three dimensions independently by
using all three semantic types, but this style of plot can be hard to
interpret and is often ineffective. Using redundant semantics (i.e. both
``hue`` and ``style`` for the same variable) can be helpful for making
graphics more accessible.
See the :ref:`tutorial <relational_tutorial>` for more information.
""",
relational_semantic="""
The default treatment of the ``hue`` (and to a lesser extent, ``size``)
semantic, if present, depends on whether the variable is inferred to
represent "numeric" or "categorical" data. In particular, numeric variables
are represented with a sequential colormap by default, and the legend
entries show regular "ticks" with values that may or may not exist in the
data. This behavior can be controlled through various parameters, as
described and illustrated below.
""",
))
_relational_docs = dict(
# --- Shared function parameters
data_vars="""
x, y : names of variables in ``data`` or vector data
Input data variables; must be numeric. Can pass data directly or
reference columns in ``data``.
""",
data="""
data : DataFrame, array, or list of arrays
Input data structure. If ``x`` and ``y`` are specified as names, this
should be a "long-form" DataFrame containing those columns. Otherwise
it is treated as "wide-form" data and grouping variables are ignored.
See the examples for the various ways this parameter can be specified
and the different effects of each.
""",
palette="""
palette : string, list, dict, or matplotlib colormap
An object that determines how colors are chosen when ``hue`` is used.
It can be the name of a seaborn palette or matplotlib colormap, a list
of colors (anything matplotlib understands), a dict mapping levels
of the ``hue`` variable to colors, or a matplotlib colormap object.
""",
hue_order="""
hue_order : list
Specified order for the appearance of the ``hue`` variable levels,
otherwise they are determined from the data. Not relevant when the
``hue`` variable is numeric.
""",
hue_norm="""
hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
Normalization in data units for colormap applied to the ``hue``
variable when it is numeric. Not relevant if it is categorical.
""",
sizes="""
sizes : list, dict, or tuple
An object that determines how sizes are chosen when ``size`` is used.
It can always be a list of size values or a dict mapping levels of the
``size`` variable to sizes. When ``size`` is numeric, it can also be
a tuple specifying the minimum and maximum size to use such that other
values are normalized within this range.
""",
size_order="""
size_order : list
Specified order for appearance of the ``size`` variable levels,
otherwise they are determined from the data. Not relevant when the
``size`` variable is numeric.
""",
size_norm="""
size_norm : tuple or Normalize object
Normalization in data units for scaling plot objects when the
``size`` variable is numeric.
""",
dashes="""
dashes : boolean, list, or dictionary
Object determining how to draw the lines for different levels of the
``style`` variable. Setting to ``True`` will use default dash codes, or
you can pass a list of dash codes or a dictionary mapping levels of the
``style`` variable to dash codes. Setting to ``False`` will use solid
lines for all subsets. Dashes are specified as in matplotlib: a tuple
of ``(segment, gap)`` lengths, or an empty string to draw a solid line.
""",
markers="""
markers : boolean, list, or dictionary
Object determining how to draw the markers for different levels of the
``style`` variable. Setting to ``True`` will use default markers, or
you can pass a list of markers or a dictionary mapping levels of the
``style`` variable to markers. Setting to ``False`` will draw
marker-less lines. Markers are specified as in matplotlib.
""",
style_order="""
style_order : list
Specified order for appearance of the ``style`` variable levels
otherwise they are determined from the data. Not relevant when the
``style`` variable is numeric.
""",
units="""
units : vector or key in ``data``
Grouping variable identifying sampling units. When used, a separate
line will be drawn for each unit with appropriate semantics, but no
legend entry will be added. Useful for showing distribution of
experimental replicates when exact identities are not needed.
""",
estimator="""
estimator : name of pandas method or callable or None
Method for aggregating across multiple observations of the ``y``
variable at the same ``x`` level. If ``None``, all observations will
be drawn.
""",
ci="""
ci : int or "sd" or None
Size of the confidence interval to draw when aggregating.
.. deprecated:: 0.12.0
Use the new `errorbar` parameter for more flexibility.
""",
n_boot="""
n_boot : int
Number of bootstraps to use for computing the confidence interval.
""",
seed="""
seed : int, numpy.random.Generator, or numpy.random.RandomState
Seed or random number generator for reproducible bootstrapping.
""",
legend="""
legend : "auto", "brief", "full", or False
How to draw the legend. If "brief", numeric ``hue`` and ``size``
variables will be represented with a sample of evenly spaced values.
If "full", every group will get an entry in the legend. If "auto",
choose between brief or full representation based on number of levels.
If ``False``, no legend data is added and no legend is drawn.
""",
ax_in="""
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
""",
ax_out="""
ax : matplotlib Axes
Returns the Axes object with the plot drawn onto it.
""",
)
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
facets=DocstringComponents(_facet_docs),
rel=DocstringComponents(_relational_docs),
stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),
)
class _RelationalPlotter(VectorPlotter):
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
# TODO where best to define default parameters?
sort = True
def add_legend_data(self, ax):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
legend_kwargs = {}
keys = []
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
if len(titles) == 1:
legend_title = titles.pop()
else:
legend_title = ""
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# Define the maximum number of ticks to use for "brief" legends
brief_ticks = 6
# -- Add a legend for hue semantics
brief_hue = self._hue_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._hue_map.levels) > brief_ticks)
)
if brief_hue:
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].infer_objects().dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if not legend_title and self.variables.get("hue", None) is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
brief_size = self._size_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._size_map.levels) > brief_ticks)
)
if brief_size:
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].infer_objects().dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if not legend_title and self.variables.get("size", None) is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if not legend_title and self.variables.get("style", None) is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_title = legend_title
self.legend_data = legend_data
self.legend_order = legend_order
class _LinePlotter(_RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(
self, *,
data=None, variables={},
estimator=None, ci=None, n_boot=None, seed=None,
sort=True, err_style=None, err_kws=None, legend=None,
errorbar=None,
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.ci = ci
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if ``hue`` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
agg_var = "y"
grouper = ["x"]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", "x", "y"]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if self.estimator is not None:
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(grouper, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, agg_var).reset_index()
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
ax.fill_between(
sub_data["x"], sub_data["ymin"], sub_data["ymax"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_deltas = (
sub_data["y"] - sub_data["ymin"],
sub_data["ymax"] - sub_data["y"],
)
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], error_deltas,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
class _ScatterPlotter(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
_legend_func = "scatter"
def __init__(
self, *,
data=None, variables={},
x_bins=None, y_bins=None,
estimator=None, ci=None, n_boot=None,
alpha=None, x_jitter=None, y_jitter=None,
legend=None
):
# TODO this is messy, we want the mapping to be agnoistic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.alpha = alpha
self.legend = legend
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
# Set defaults for other visual attributes
kws.setdefault("edgecolor", "w")
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# TODO this makes it impossible to vary alpha with hue which might
# otherwise be useful? Should we just pass None?
kws["alpha"] = 1 if self.alpha == "auto" else self.alpha
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependant default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
@_deprecate_positional_args
def lineplot(
*,
x=None, y=None,
hue=None, size=None, style=None,
data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
units=None, estimator="mean", ci="deprecated", n_boot=1000, seed=None,
sort=True, err_style="band", err_kws=None,
legend="auto",
errorbar=("ci", 95),
ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, ci=ci, n_boot=n_boot, seed=seed,
sort=sort, err_style=err_style, err_kws=err_kws, legend=legend,
errorbar=errorbar,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = """\
Draw a line plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
By default, the plot aggregates over multiple ``y`` values at each value of
``x`` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.dashes}
{params.rel.markers}
{params.rel.style_order}
{params.rel.units}
{params.rel.estimator}
{params.rel.ci}
{params.rel.n_boot}
{params.rel.seed}
sort : boolean
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars"
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional paramters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``.
{params.rel.legend}
{params.stat.errorbar}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.scatterplot}
{seealso.pointplot}
Examples
--------
.. include:: ../docstrings/lineplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def scatterplot(
*,
x=None, y=None,
hue=None, style=None, size=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None,
x_bins=None, y_bins=None,
units=None, estimator=None, ci=95, n_boot=1000,
alpha=None, x_jitter=None, y_jitter=None,
legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(
data=data, variables=variables,
x_bins=x_bins, y_bins=y_bins,
estimator=estimator, ci=ci, n_boot=n_boot,
alpha=alpha, x_jitter=x_jitter, y_jitter=y_jitter, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{{x,y}}_bins : lists or arrays or functions
*Currently non-functional.*
{params.rel.units}
*Currently non-functional.*
{params.rel.estimator}
*Currently non-functional.*
{params.rel.ci}
*Currently non-functional.*
{params.rel.n_boot}
*Currently non-functional.*
alpha : float
Proportional opacity of the points.
{{x,y}}_jitter : booleans or floats
*Currently non-functional.*
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def relplot(
*,
x=None, y=None,
hue=None, size=None, style=None, data=None,
row=None, col=None,
col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter",
height=5, aspect=1, facet_kws=None,
units=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = "Plot kind {} not recognized".format(kind)
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
grid_kws = {v: p.variables.get(v, None) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes
g.set_axis_labels(
variables.get("x", None), variables.get("y", None)
)
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
g.data = g.data.rename(columns=orig_cols)
return g
relplot.__doc__ = """\
Figure-level interface for drawing relational plots onto a FacetGrid.
This function provides access to several different axes-level functions
that show the relationship between two variables with semantic mappings
of subsets. The ``kind`` parameter selects the underlying axes-level
function to use:
- :func:`scatterplot` (with ``kind="scatter"``; the default)
- :func:`lineplot` (with ``kind="line"``)
Extra keyword arguments are passed to the underlying function, so you
should refer to the documentation for each to see kind-specific options.
{narrative.main_api}
{narrative.relational_semantic}
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, and other parameters.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce elements with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce elements with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce elements with different styles.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.facets.rowcol}
{params.facets.col_wrap}
row_order, col_order : lists of strings
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.style_order}
{params.rel.dashes}
{params.rel.markers}
{params.rel.legend}
kind : string
Kind of plot to draw, corresponding to a seaborn relational plot.
Options are {{``scatter`` and ``line``}}.
{params.facets.height}
{params.facets.aspect}
facet_kws : dict
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
{params.rel.units}
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
{returns.facetgrid}
Examples
--------
.. include:: ../docstrings/relplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
|
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "d01"
__email__ = "[email protected]"
__copyright__ = "Copyright (C) 2015-16, Florian JUNG"
__license__ = "MIT"
__version__ = "0.1.0"
__date__ = "2016-03-29"
# Created: 2015-03-21 24:00
import time
import threading
from .sensor import Sensor
from ..sensorInterface import SensorClientInterface, \
SensorUpdateException, SensorJoinException, SensorStartException
from .message import MsgType, Id, \
APPJoinMessage, APPUnjoinMessage, APPUpdateMessage, \
format_data
class SensorClient(Sensor, SensorClientInterface):
""" APP sensor client """
def __init__(self, settings=None):
if settings is None:
settings = {}
super(SensorClient, self).__init__(settings)
self._packet_timeout = settings.get('packet_wait_timeout', 1.5)
""" Time to wait for new packet event """
self._start_block_timeout = max(
self._packet_timeout, self._select_timeout
)
""" Time to block in start """
self._join_retry_timeout = settings.get('join_retry_timeout', 5.0)
""" Time to wait before resending join packet """
self._join_retry_count = settings.get('join_retry_number', 3)
""" Number of times to try to join before failing """
self._server_ip = None
""" Ip of server
:type : None | str """
self._server_port = None
""" Port of server
:type : None | int """
self._joined = threading.Event()
""" If set, currently joined the audience """
def join(self, people):
"""
Join the local audience
(a config message should be received on success)
Validates that there are people to join and that each of them
has a valid unique id
:param people: Which people does this sensor have
:type people: list[paps.person.Person]
:rtype: None
:raises SensorJoinException: Failed to join
"""
tries = 0
if not people:
raise SensorJoinException("No people given")
ids = set()
for person in people:
if not person.id and person.id != 0:
raise SensorJoinException("Invalid id for one or more people")
if person.id in ids:
raise SensorJoinException(
u"Id {} not unique".format(person.id)
)
ids.add(person.id)
while self._is_running and tries < self._join_retry_count:
packet = APPJoinMessage(
payload={'people': [person.to_dict() for person in people]}
)
self._send_packet(self._multicast_group, self._multicast_port,
packet)
if self._joined.wait(self._join_retry_timeout):
break
with self._seq_ack_lock:
# Got ack for packet?
packet_ackd = packet.header.sequence_number \
not in self._seq_ack
if packet_ackd and self._joined.wait(1.0):
# Packet already got acked
# -> wait another second for ._joined to clear
break
tries += 1
self.warning(
u"Unsuccessful attempt joining audience # {}".format(tries)
)
if not self._joined.is_set() or tries >= self._join_retry_count:
# Failed to join (no config packet received)
raise SensorJoinException("No config packet received")
self.info("Joined the audience")
def unjoin(self):
"""
Leave the local audience
:rtype: None
:raises SensorJoinException: Failed to leave
"""
self.debug("()")
if self._joined.is_set():
packet = APPUnjoinMessage(device_id=Id.NOT_SET)
self._send_packet(self._server_ip, self._server_port, packet)
self._joined.clear()
self.info("Left the audience")
def config(self, settings):
"""
Configuration has changed - config this module and lower layers
(calls on_config - if set)
:param settings: New configuration
:type settings: dict
:rtype: None
:raises SensorUpdateException: Failed to update
"""
self.debug("()")
# TODO synchronize access to vars
try:
self._device_id = settings['device_id']
self._packet_timeout = settings.get(
'packet_wait_timeout',
self._packet_timeout
)
self._server_ip = settings.get('server_ip', self._server_ip)
self._server_port = settings.get('server_port', self._server_port)
except KeyError:
raise SensorUpdateException("Key not in settings")
if callable(self.on_config):
try:
self.on_config(settings)
except:
self.exception("Failed to update remote config")
raise SensorUpdateException("Remote config failed")
def person_update(self, people):
"""
Update the status of people
:param people: All people of this sensor
:type people: list[paps.person.Person]
:rtype: None
:raises SensorUpdateException: Failed to update
"""
packet = APPUpdateMessage(device_id=Id.NOT_SET, people=people)
self._send_packet(
self._server_ip, self._server_port, packet,
acknowledge_packet=False
)
def _packet_loop(self):
"""
Packet processing loop
:rtype: None
"""
while self._is_running:
# Only wait if there are no more packets in the inbox
if self.inbox.empty() \
and not self.new_packet.wait(self._packet_timeout):
continue
ip, port, packet = self.inbox.get()
if self.inbox.empty():
self.new_packet.clear()
self.debug(u"{}".format(packet))
if packet.header.message_type == MsgType.CONFIG:
self._do_config_packet(packet, ip, port)
def _do_config_packet(self, packet, ip, port):
"""
Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None
"""
self.debug("()")
if packet.header.device_id != Id.SERVER:
# Only allow config packets from server
self.warning("Config packets only allowed from server")
return
try:
config = packet.payload
self.debug(u"{}".format(config))
if not isinstance(config, dict):
self.error("Wrong payload type")
raise RuntimeError("Wrong type")
config.setdefault("server_ip", ip)
config.setdefault("server_port", port)
self.config(config)
self._joined.set()
except:
self.exception("Failed to configure")
self.error(u"Faulty packet {}".format(format_data(packet.payload)))
return
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
:raises SensorStartException: Failed to start
"""
self.debug("()")
super(SensorClient, self).start(blocking=False)
try:
a_thread = threading.Thread(
target=self._thread_wrapper,
args=(self._packet_loop,)
)
a_thread.daemon = True
a_thread.start()
except:
self.exception("Failed to run packet loop")
raise SensorStartException("Packet loop failed")
self.info("Started")
# Blocking - call StartStopable.start
super(Sensor, self).start(blocking)
def stop(self):
"""
Stop the interface
:rtype: None
"""
self.debug("()")
try:
self.unjoin()
time.sleep(2)
except:
self.exception("Failed to leave audience")
super(SensorClient, self).stop()
|
|
import tempfile
from datetime import date
from nose.tools import eq_
from inferno.bin.run import _get_options
from inferno.bin.run import _get_settings
def assert_dicts_equal(d1, d2):
for item in d1.items():
eq_(item[1], d2[item[0]])
for item in d2.items():
eq_(item[1], d1[item[0]])
class TestOptions(object):
def test_defaults(self):
options, _ = _get_options([])
expected = {
'data_file': None,
'parameters': [],
'parameter_file': None,
'force': False,
'profile': False,
'debug': False,
'disco_debug': False,
'server': None,
'settings_file': None,
'source_tags': None,
'source_urls': None,
'just_query': False,
'result_tag': None,
'day_start': None,
'day_range': None,
'day_offset': None,
'immediate_rule': None,
'rules_directory': None,
'start_paused': False,
'example_rules': None,
'process_map': None,
'process_results': None,
'run_daemon': False,
}
assert_dicts_equal(options, expected)
def test_force(self):
self._assert_force('-f')
self._assert_force('--force')
def test_profile(self):
self._assert_profile('-p')
self._assert_profile('--profile')
def test_debug(self):
self._assert_debug('-d')
self._assert_debug('--debug')
def test_disco_debug(self):
self._assert_disco_debug('-D')
self._assert_disco_debug('--disco-debug')
def test_server(self):
self._assert_server('-s')
self._assert_server('--server')
def test_settings(self):
self._assert_settings('-e')
self._assert_settings('--settings')
def test_rules_directory(self):
self._assert_rules_directory('-y')
self._assert_rules_directory('--rules-directory')
def test_day_range(self):
self._assert_day_range('-R')
self._assert_day_range('--day-range')
def test_day_offset(self):
self._assert_day_offset('-O')
self._assert_day_offset('--day-offset')
def test_day_start(self):
self._assert_day_start('-S')
self._assert_day_start('--day-start')
def test_source_tags(self):
self._assert_source_tags('-t')
self._assert_source_tags('--source-tags')
def test_result_tag(self):
self._assert_result_tag('-r')
self._assert_result_tag('--result-tag')
def test_immediate_rule(self):
self._assert_immediate_rule('-i')
self._assert_immediate_rule('--immediate-rule')
def test_parameters(self):
self._assert_parameters('-P')
self._assert_parameters('--parameters')
def _assert_force(self, flag):
options, _ = _get_options([flag])
eq_(options['force'], True)
def _assert_profile(self, flag):
options, _ = _get_options([flag])
eq_(options['profile'], True)
def _assert_debug(self, flag):
options, _ = _get_options([flag])
eq_(options['debug'], True)
def _assert_disco_debug(self, flag):
options, _ = _get_options([flag])
eq_(options['disco_debug'], True)
def _assert_settings(self, flag):
options, _ = _get_options([flag, 'path_to_settings_file'])
eq_(options['settings_file'], 'path_to_settings_file')
def _assert_rules_directory(self, flag):
options, _ = _get_options([flag, 'path_to_rules_directory'])
eq_(options['rules_directory'], 'path_to_rules_directory')
def _assert_day_range(self, flag):
options, _ = _get_options([flag, '10'])
eq_(options['day_range'], 10)
def _assert_day_offset(self, flag):
options, _ = _get_options([flag, '5'])
eq_(options['day_offset'], 5)
def _assert_day_start(self, flag):
options, _ = _get_options([flag, '2012-12-01'])
eq_(options['day_start'], date(2012, 12, 1))
def _assert_immediate_rule(self, flag):
options, _ = _get_options([flag, 'some_module.some_rule'])
eq_(options['immediate_rule'], 'some_module.some_rule')
def _assert_result_tag(self, flag):
options, _ = _get_options([flag, 'some_result_tag'])
eq_(options['result_tag'], 'some_result_tag')
def _assert_parameters(self, flag):
# one param
options, _ = _get_options([flag, 'some_param: some_value'])
eq_(options['some_param'], 'some_value')
# many params
options, _ = _get_options([
flag, 'some_param_1: some_value_1',
flag, 'some_param_2: some_value_2'])
eq_(options['some_param_1'], 'some_value_1')
eq_(options['some_param_2'], 'some_value_2')
# integer
options, _ = _get_options([flag, 'some_int: 100'])
eq_(options['some_int'], 100)
# list of integers
options, _ = _get_options([flag, 'some_int_list: [100, 200, 300,]'])
eq_(options['some_int_list'], [100, 200, 300])
def _assert_source_tags(self, flag):
# one tag
options, _ = _get_options([flag, 'tag1'])
eq_(options['source_tags'], ['tag1'])
# many tags
options, _ = _get_options([flag, 'tag1,tag2'])
eq_(options['source_tags'], ['tag1', 'tag2'])
def _assert_server(self, flag):
options, _ = _get_options([flag, 'some_server'])
eq_(options['server'], 'some_server')
class TestSettings(object):
def test_get_settings(self):
# settings default
options, _ = _get_options(['-e', 'some_unknown_settings_file'])
settings = _get_settings(options)
eq_(settings['server'], 'localhost')
# options override
options, _ = _get_options(['-s', 'some_server'])
settings = _get_settings(options)
eq_(settings['server'], 'some_server')
# file override
settings_file = self._create_settings_file()
options, _ = _get_options(['-e', settings_file])
settings = _get_settings(options)
eq_(settings['server'], 'another_server')
# server from -s trumps -e
settings_file = self._create_settings_file()
options, _ = _get_options(['-e', settings_file, '-s', 'some_server'])
settings = _get_settings(options)
eq_(settings['server'], 'some_server')
def _create_settings_file(self):
(_, settings_file) = tempfile.mkstemp()
with open(settings_file, 'w') as f:
f.write("server: another_server\n")
return settings_file
|
|
import os
import scipy.io
import numpy as np
class ConnectomeInjury(object):
def __init__(self,
base_filename=os.path.join('data', 'base.mat'), # Base connectome filename.
n_injuries=2, # How many injuries to have (currently only works for 2).
signature_seed=333, # Random seed for the injury signature.
):
"""Use to create synthetic injury data."""
# Set the mean base connectome.
self.X_mn = self.load_base_connectome(base_filename)
# Generate the injury signatures (set the random seed so get same signatures).
r_state = np.random.RandomState(signature_seed)
self.sigs = self.generate_injury_signatures(self.X_mn, n_injuries, r_state)
def generate_injury(self, n_samples=1000, # How many samples to create.
noise_weight=0.125, # How much to weigh the noise.
):
"""Return n_samples of synthetic injury data and corresponding injury strength."""
# Generate phantoms with injuries of different strengths (and add noise)
# TODO: allow for N injury patterns
X, Y = self.sample_injury_strengths(n_samples, self.X_mn, self.sigs[0],
self.sigs[1], noise_weight)
# Make sure the number of samples matches what was specified.
assert X.shape[0] == n_samples
assert Y.shape[0] == n_samples
return X, Y
@staticmethod
def load_base_connectome(file_name, verbose=False):
"""Loads the connectome that serves as the base of the synthetic data"""
# Load the data.
X_mn = scipy.io.loadmat(file_name)
X_mn = X_mn['X_mn']
if verbose:
print 'Data shape: ', X_mn.shape, ' Min value: ', X_mn.min(), ' Max value: ', X_mn.max()
return X_mn
@staticmethod
def generate_injury_signatures(X_mn, n_injuries, r_state):
"""Generates the signatures that represent the underlying signal in our synthetic experiments.
d : (integer) the size of the input matrix (assumes is size dxd)
"""
# Get the strongest regions, which we will apply simulated injuries
sig_indexes = get_k_strongest_regions(X_mn, n_injuries, verbose=False)
d = X_mn.shape[0]
S = []
# Create a signature for
for idx, sig_idx in enumerate(sig_indexes):
# Okay, let's make some signature noise vectors.
A_vec = r_state.rand((d))
# B_vec = np.random.random((n))
# Create the signature matrix.
A = np.zeros((d, d))
A[:, sig_idx] = A_vec
A[sig_idx, :] = A_vec
S.append(A)
assert (A.T == A).all() # Check if matrix is symmetric.
return np.asarray(S)
@staticmethod
def sample_injury_strengths(n_samples, X_mn, A, B, noise_weight):
"""Returns n_samples connectomes with simulated injury from two sources."""
mult_factor = 10
n_classes = 2
# Range of values to predict.
n_start = 0.5
n_end = 1.4
# amt_increase = 0.1
# These will be our Y.
A_weights = np.random.uniform(n_start, n_end, [n_samples])
B_weights = np.random.uniform(n_start, n_end, [n_samples])
X_h5 = np.zeros((n_samples, 1, X_mn.shape[0], X_mn.shape[1]), dtype=np.float32)
Y_h5 = np.zeros((n_samples, n_classes), dtype=np.float32)
for idx in range(n_samples):
w_A = A_weights[idx]
w_B = B_weights[idx]
# Get the matrix.
X_sig = apply_injury_and_noise(X_mn, A, w_A * mult_factor, B, w_B * mult_factor, noise_weight)
# Normalize.
X_sig = (X_sig - X_sig.min()) / (X_sig.max() - X_sig.min())
# Put in h5 format.
X_h5[idx, 0, :, :] = X_sig
Y_h5[idx, :] = [w_A, w_B]
return X_h5, Y_h5
def get_symmetric_noise(m, n):
"""Return a random noise image of size m x n with values between 0 and 1."""
# Generate random noise image.
noise_img = np.random.rand(m, n)
# Make the noise image symmetric.
noise_img = noise_img + noise_img.T
# Normalize between 0 and 1.
noise_img = (noise_img - noise_img.min()) / (noise_img.max() - noise_img.min())
assert noise_img.max() == 1 # Make sure is between 0 and 1.
assert noise_img.min() == 0
assert (noise_img.T == noise_img).all() # Make sure symmetric.
return noise_img
def simulate_injury(X, weight_A, sig_A, weight_B, sig_B):
denom = (np.ones(X.shape) + (weight_A * sig_A)) * (np.ones(X.shape) + (weight_B * sig_B))
X_sig_AB = np.divide(X, denom)
return X_sig_AB
def apply_injury_and_noise(X, Sig_A, weight_A, Sig_B, weight_B, noise_weight):
"""Returns a symmetric, signed, noisy, adjacency matrix with simulated injury from two sources."""
X_sig_AB = simulate_injury(X, weight_A, Sig_A, weight_B, Sig_B)
# Get the noise image.
noise_img = get_symmetric_noise(X.shape[0], X.shape[1])
# Weight the noise image.
weighted_noise_img = noise_img * noise_weight
# Add the noise to the original image.
X_sig_AB_noise = X_sig_AB + weighted_noise_img
assert (X_sig_AB_noise.T == X_sig_AB_noise).all() # Make sure still is symmetric.
return X_sig_AB_noise
def get_k_strongest_regions(X, k, verbose=False):
"""Return the k regions (matrix columns) with the highest median values."""
# Make a copy of this array, since we will modify it, and will change the orginal X.
X = np.copy(X)
highest_col_indexes = []
# We combine the column based on the median value.
for idx in range(k):
# sum_cols = np.sum(X_mn, axis=0)
sum_cols = np.median(X, axis=0) #
max_idx = np.argmax(sum_cols)
highest_col_indexes.append(max_idx)
# Zero out the largest column so we can find the next largest one.
X[:, max_idx] = 0
if verbose:
print "%i => column index of largest averaged value: %i" % (idx, max_idx)
return highest_col_indexes
"""
def generate_injury_data(test_name, X_mn, S, n_samples=100, noise_weight=0.125,
dir_syn_data='./generated_synthetic_data'):
#Generate and write to disk synthetic injury data. Return the file path.
# Generate phantoms with injuries of different strengths (and add noise)
X, Y = sample_injury_strengths(n_samples, X_mn, S[0], S[1], noise_weight) # TODO: allow for N injury patterns
# Make sure the number of samples matches what was specified.
assert X.shape[0] == n_samples
assert Y.shape[0] == n_samples
# Vectorize connectome.
X_vec = vectorize_symmetric_list(np.squeeze(X))
# Write to disk in h5 format for caffe.
f_name = test_name + '-' + str(n_samples) + '_noise-' + str(noise_weight) + '.h5'
file_name = os.path.abspath(os.path.join(dir_syn_data, f_name))
caffe_write_h5(file_name, X, X_vec, Y)
return file_name
"""
|
|
"""
@file
@brief A function to download the content of a url.
"""
import os
from datetime import datetime
import socket
import gzip
import warnings
import hashlib
import urllib.error as urllib_error
import urllib.request as urllib_request
import http.client as http_client
try:
from http.client import InvalidURL
except ImportError:
InvalidURL = ValueError
class InternetException(Exception):
"""
Exception for the function @see fn get_url_content_timeout
"""
pass
def get_url_content_timeout(url, timeout=10, output=None, encoding="utf8",
raise_exception=True, chunk=None, fLOG=None):
"""
Downloads a file from internet (by default, it assumes
it is text information, otherwise, encoding should be None).
@param url (str) url
@param timeout (int) in seconds, after this time, the function drops an returns None, -1 for forever
@param output (str) if None, the content is stored in that file
@param encoding (str) utf8 by default, but if it is None, the returned information is binary
@param raise_exception (bool) True to raise an exception, False to send a warnings
@param chunk (int|None) save data every chunk (only if output is not None)
@param fLOG logging function (only applies when chunk is not None)
@return content of the url
If the function automatically detects that the downloaded data is in gzip
format, it will decompress it.
The function raises the exception @see cl InternetException.
"""
def save_content(content, append=False):
"local function"
app = "a" if append else "w"
if encoding is not None:
with open(output, app, encoding=encoding) as f:
f.write(content)
else:
with open(output, app + "b") as f:
f.write(content)
try:
if chunk is not None:
if output is None:
raise ValueError(
"output cannot be None if chunk is not None")
app = [False]
size = [0]
def _local_loop(ur):
while True:
res = ur.read(chunk)
size[0] += len(res) # pylint: disable=E1137
if fLOG is not None:
fLOG("[get_url_content_timeout] downloaded",
size, "bytes")
if len(res) > 0:
if encoding is not None:
res = res.decode(encoding=encoding)
save_content(res, app)
else:
break
app[0] = True # pylint: disable=E1137
if timeout != -1:
with urllib_request.urlopen(url, timeout=timeout) as ur:
_local_loop(ur)
else:
with urllib_request.urlopen(url) as ur:
_local_loop(ur)
app = app[0]
size = size[0]
else:
if timeout != -1:
with urllib_request.urlopen(url, timeout=timeout) as ur:
res = ur.read()
else:
with urllib_request.urlopen(url) as ur:
res = ur.read()
except (urllib_error.HTTPError, urllib_error.URLError,
ConnectionRefusedError) as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content, url='{0}'".format(url)) from e
warnings.warn(
"Unable to retrieve content from '{0}' exc: {1}".format(url, e), ResourceWarning)
return None
except socket.timeout as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content, url='{0}'".format(url)) from e
warnings.warn("unable to retrieve content from {0} because of timeout {1}: {2}".format(
url, timeout, e), ResourceWarning)
return None
except ConnectionResetError as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content, url='{0}'".format(url)) from e
warnings.warn(
"unable to retrieve content from {0} because of ConnectionResetError: {1}".format(url, e), ResourceWarning)
return None
except http_client.BadStatusLine as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content, url='{0}'".format(url)) from e
warnings.warn(
"Unable to retrieve content from '{0}' because of http.client.BadStatusLine: {1}".format(url, e), ResourceWarning)
return None
except http_client.IncompleteRead as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content url='{0}'".format(url)) from e
warnings.warn(
"Unable to retrieve content from '{0}' because of http.client.IncompleteRead: {1}".format(url, e), ResourceWarning)
return None
except (ValueError, InvalidURL) as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content url='{0}'".format(url)) from e
warnings.warn(
"Unable to retrieve content from '{0}' because of {1}".format(url, e), ResourceWarning)
return None
except Exception as e:
if raise_exception:
raise InternetException(
"Unable to retrieve content, url='{0}', exc={1}".format(url, e)) from e
warnings.warn(
"Unable to retrieve content from '{0}' because of unknown exception: {1}".format(url, e), ResourceWarning)
raise e
if chunk is None:
if len(res) >= 2 and res[:2] == b"\x1f\x8B":
# gzip format
res = gzip.decompress(res)
if encoding is not None:
try:
content = res.decode(encoding)
except UnicodeDecodeError as e:
# it tries different encoding
laste = [e]
othenc = ["iso-8859-1", "latin-1"]
for encode in othenc:
try:
content = res.decode(encode)
break
except UnicodeDecodeError as e:
laste.append(e)
content = None
if content is None:
mes = ["Unable to parse text from '{0}'.".format(url)]
mes.append("tried:" + str([encoding] + othenc))
mes.append("beginning:\n" + str([res])[:50])
for e in laste:
mes.append("Exception: " + str(e))
raise ValueError("\n".join(mes))
else:
content = res
else:
content = None
if output is not None and chunk is None:
save_content(content)
return content
def _hash_url(url):
m = hashlib.sha256()
m.update(url.encode('utf-8'))
return m.hexdigest()[:25]
def get_urls_content_timeout(urls, timeout=10, folder=None, encoding=None,
raise_exception=True, chunk=None, fLOG=None):
"""
Downloads data from urls (by default, it assumes
it is text information, otherwise, encoding should be None).
:param urls: urls
:param timeout: in seconds, after this time, the function drops an returns None, -1 for forever
:param folder: if None, the content is stored in that file
:param encoding: None by default, but if it is None, the returned information is binary
:param raise_exception: True to raise an exception, False to send a warnings
:param chunk: save data every chunk (only if output is not None)
:param fLOG: logging function (only applies when chunk is not None)
:return: list of downloaded content
If the function automatically detects that the downloaded data is in gzip
format, it will decompress it.
The function raises the exception @see cl InternetException.
"""
import pandas
import pandas.errors
if not isinstance(urls, list):
raise TypeError("urls must be a list")
if folder is None:
raise ValueError("folder should not be None")
summary = os.path.join(folder, "summary.csv")
if os.path.exists(summary):
try:
df = pandas.read_csv(summary)
except pandas.errors.EmptyDataError:
df = None
else:
df = None
if df is not None:
all_obs = [dict(url=df.loc[i, 'url'], # pylint: disable=E1101
size=df.loc[i, 'size'], # pylint: disable=E1101
date=df.loc[i, 'date'], # pylint: disable=E1101
dest=df.loc[i, 'dest']) # pylint: disable=E1101
for i in range(df.shape[0])] # pylint: disable=E1101
done = set(d['dest'] for d in all_obs)
else:
all_obs = []
done = set()
for i, url in enumerate(urls):
dest = _hash_url(url)
if dest in done:
continue
full_dest = os.path.join(folder, dest + '.bin')
content = get_url_content_timeout(url, timeout=timeout, output=full_dest,
encoding=encoding, chunk=chunk,
raise_exception=raise_exception)
if content is None:
continue
if fLOG is not None:
fLOG("{}/{} downloaded {} bytes from '{}' to '{}'.".format(
i + 1, len(urls), len(content), url, dest + '.bin'))
obs = dict(url=url, size=len(content), date=datetime.now(),
dest=dest)
all_obs.append(obs)
done.add(dest)
new_df = pandas.DataFrame(all_obs)
new_df.to_csv(summary, index=False)
return all_obs
def local_url(url, folder=None, envvar='REPO_LOCAL_URLS'):
"""
Replaces the url by a local file in a folder
or an environment variable
if *folder* is None.
:param url: url to replace
:param folder: local folder
:param envvar: environment variable
:return: local file or url
"""
if folder is None:
folder = os.environ.get(envvar, None) # pragma: no cover
if folder is None:
raise FileNotFoundError(
"Unable to find local folder '{}' or environment variable '{}'.".format(
folder, envvar))
loc = _hash_url(url)
name = os.path.join(folder, loc + '.bin')
if os.path.exists(name):
return name
return url
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._availability_group_listeners_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_group_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailabilityGroupListenersOperations:
"""AvailabilityGroupListenersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sqlvirtualmachine.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
availability_group_listener_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.AvailabilityGroupListener":
"""Gets an availability group listener.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param sql_virtual_machine_group_name: Name of the SQL virtual machine group.
:type sql_virtual_machine_group_name: str
:param availability_group_listener_name: Name of the availability group listener.
:type availability_group_listener_name: str
:param expand: The child resources to include in the response.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilityGroupListener, or the result of cls(response)
:rtype: ~azure.mgmt.sqlvirtualmachine.models.AvailabilityGroupListener
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityGroupListener"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
availability_group_listener_name=availability_group_listener_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailabilityGroupListener', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
availability_group_listener_name: str,
parameters: "_models.AvailabilityGroupListener",
**kwargs: Any
) -> "_models.AvailabilityGroupListener":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityGroupListener"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AvailabilityGroupListener')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
availability_group_listener_name=availability_group_listener_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailabilityGroupListener', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AvailabilityGroupListener', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
availability_group_listener_name: str,
parameters: "_models.AvailabilityGroupListener",
**kwargs: Any
) -> AsyncLROPoller["_models.AvailabilityGroupListener"]:
"""Creates or updates an availability group listener.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param sql_virtual_machine_group_name: Name of the SQL virtual machine group.
:type sql_virtual_machine_group_name: str
:param availability_group_listener_name: Name of the availability group listener.
:type availability_group_listener_name: str
:param parameters: The availability group listener.
:type parameters: ~azure.mgmt.sqlvirtualmachine.models.AvailabilityGroupListener
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AvailabilityGroupListener or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.sqlvirtualmachine.models.AvailabilityGroupListener]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityGroupListener"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
availability_group_listener_name=availability_group_listener_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AvailabilityGroupListener', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
availability_group_listener_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
availability_group_listener_name=availability_group_listener_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
availability_group_listener_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an availability group listener.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param sql_virtual_machine_group_name: Name of the SQL virtual machine group.
:type sql_virtual_machine_group_name: str
:param availability_group_listener_name: Name of the availability group listener.
:type availability_group_listener_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
availability_group_listener_name=availability_group_listener_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}'} # type: ignore
@distributed_trace
def list_by_group(
self,
resource_group_name: str,
sql_virtual_machine_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AvailabilityGroupListenerListResult"]:
"""Lists all availability group listeners in a SQL virtual machine group.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param sql_virtual_machine_group_name: Name of the SQL virtual machine group.
:type sql_virtual_machine_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilityGroupListenerListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sqlvirtualmachine.models.AvailabilityGroupListenerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityGroupListenerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_group_request(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_group_request(
resource_group_name=resource_group_name,
sql_virtual_machine_group_name=sql_virtual_machine_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilityGroupListenerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners'} # type: ignore
|
|
# Dynamic Time Warping
# J Eisenmann
# ACCAD
# December 2012
from math import sqrt
from Vector import *
class DTW:
def __init__( self, X, Y, subsequence=False, penalty=[0,5], maxPathLength=99999.0 ):
self.X = list(X)
self.Y = list(Y)
self.subsequence = subsequence
self.penalty = penalty
self.maxPathLen = maxPathLength
self.P = None
self.minCost = None
def DTW( self ): # x and y are lists with members of dimension N
""" Dynamic Time Warping distance:
returns the cost of warping time
series X to time series Y """
self.C = self.ComputeCostMatrix( self.X, self.Y )
self.ComputeAccumulatedCostMatrix()
if self.subsequence:
self.P, self.minCost = self.OptimalSubsequenceWarpingPath()
else:
self.P, self.minCost = self.OptimalWarpingPath()
return self.P, self.minCost, self.D
def UpdateX( self, newX ):
""" Given a newX list (which is assumed to be
identical to the original X with extra entries
at the end, add corresponding entries to the
cost matrices and recalculate the best path """
if not( len(newX) == len(self.X) and all( [ nx == x for nx,x in zip(newX, self.X) ] ) ):
# if newX is different than self.X, re-compute with the extra parts of newX
extraParts = newX[ len(self.X): ]
self.UpdateCostMatrix( extraParts )
self.X.extend(extraParts)
self.ComputeAccumulatedCostMatrix( iterative=True )
if self.subsequence:
self.P, self.minCost = self.OptimalSubsequenceWarpingPath()
else:
self.P, self.minCost = self.OptimalWarpingPath()
return self.P, self.minCost, self.D
def ComputeCostMatrix( self, A, B ):
""" Computes the two dimensional cost matrix between A and B """
#return [[EuclideanDistance(a,b) for b in B] for a in A]
return [[self.EuclideanDistanceSq(a,b) for b in B] for a in A] # skip the sqrt operation until later
def UpdateCostMatrix( self, extraXs ):
""" Given the additions to the X list (extraXs), update
the cost matrix """
for x in extraXs:
newRow = [ self.EuclideanDistanceSq(x,y) for y in self.Y ]
self.C.append(newRow)
def ComputeAccumulatedCostMatrix( self, iterative=False ):
""" Given the cost matrix C, calculate
the accumulated cost matrix """
start = 0
if not iterative: # if first time, make a matrix of zeros
self.D = [[0 for x in range(len(self.C[0]))] for y in range(len(self.C))]
else: # else add rows of zeros until D has the same dimensions as C
start = len(self.D)-1
for row in range( len(self.C) - len(self.D) ):
self.D.append( [ 0 for x in range(len(self.C[0])) ] )
for n in range( start, len(self.C) ):
for m in range( len(self.C[0]) ):
if n == 0:
if(self.subsequence):
self.D[n][m] = 0+self.C[n][m]
else:
self.D[n][m] = sum( self.C[n][:m] )
if m == 0:
## print "col 0, row %d, sum=%f"%(n,sum( [row[m] for row in C[:n]] ))
## print [row[m] for row in C[:n]]
self.D[n][m] = sum( [row[m]+self.penalty[1] for row in self.C[:n]] )
else:
self.D[n][m] = self.C[n][m] + min( self.D[n-1][m-1], self.D[n-1][m]+self.penalty[0], self.D[n][m-1]+self.penalty[1])
if n == len(self.C)-1: # last row
self.D[n][m] += self.penalty[1]*(len(self.C[0])-m) # bias the cost of the last row, so that the algorithm prefers to start near the bottom right corner
def OptimalWarpingPath( self, colStart=None ):
""" Given the cost matrix D, find the
lowest cost warping path from
D[0][0] to D[rows-1][cols-1] """
rows = len(self.D)
cols = len(self.D[0])
n = rows-1
m = cols-1
if colStart:
m=colStart
path = [(n,m)]
while n > 0 or m > 0:
if n == 0 :
path.insert(0,(0,m-1))
m -= 1
elif m == 0 :
path.insert(0,(n-1,0))
n -= 1
else:
minStep = min( self.D[n-1][m-1], self.D[n-1][m], self.D[n][m-1] )
if self.D[n-1][m-1] == minStep:
path.insert(0,(n-1,m-1))
n -= 1
m -= 1
elif self.D[n-1][m] == minStep:
path.insert(0,(n-1,m))
n -= 1
else: # self.D[n][m-1] == min:
path.insert(0,(n,m-1))
m -= 1
return path, self.CostOfPath( path, self.D )
def OptimalSubsequenceWarpingPath( self ):
""" Given the accumulated cost matrix D, find the
lowest cost subsequence warping path from
D[0][a*] to D[rows-1][b*] (Note: Y is assumed
to be longer than X) """
subseqCandidates = []
subseqCosts = []
lastRow = list(self.D[-1])
bStar = lastRow.index( min(lastRow) )
while lastRow[bStar] < self.maxPathLen or len(subseqCosts) == 0:
# find aStar with minimum distance for subsequences ending at bStar
P, cost = self.OptimalWarpingPath( bStar )
subseqCandidates.append( P )
subseqCosts.append( cost )
lastRow[bStar] = float("inf")
bStar = lastRow.index( min(lastRow) )
minCost = min(subseqCosts)
return subseqCandidates[ subseqCosts.index( minCost ) ], minCost
def CostOfPath( self, P, D ):
""" Given a path P and a cost matrix D,
return the path cost """
cost = 0
for tup in P:
cost += D[tup[0]][tup[1]]
return cost
def EuclideanDistanceSq( self, a, b ):
""" Computes the squared Euclidean distance
between two points in N-dimensions """
if not (type(a) == list or type(a) == Vector):
a = [a]
if not (type(b) == list or type(a) == Vector):
b = [b]
assert len(a) == len(b)
sqDist = 0
for x,y in zip(a,b):
sqDist += (x-y)**2
return sqDist
def EuclideanDistance( self, a, b ):
""" Computes the Euclidean distance
between two points in N-dimensions """
return sqrt( self.EuclideanDistanceSq(a,b) )
def DrawCostMatrixAndPath( self, fname ):
""" spits out a p5py file that will draw the matrix and path when run """
f = open(fname, 'w')
w = len(self.D[0])
h = len(self.D)
f.write("img=None\n\ndef setup():\n\tglobal img\n")
f.write("\tsize(%d,%d)\n"%( 5*w,5*h ))
f.write("\timg = createImage( %d, %d, RGB )\n"%(w,h))
f.write("\timg.loadPixels()\n")
mx = max([ max([ x for x in row]) for row in self.D])
pixels = []
i=0
for r,row in enumerate(self.D):
for c,cell in enumerate(row):
pixels.append( int( 255.0*cell/mx ) )
if( (r,c) in self.P ):
pixels[-1] = 255
i += 1
for i,p in enumerate(pixels):
f.write("\timg.pixels[%d] = color(%d)\n"%(i,p))
f.write("\timg.updatePixels()\n\ndef draw():\n\tglobal img\n\timage(img,0,0,%d,%d)\n"%(5*w,5*h))
f.close()
##### Module testing code:
##s1 = [0,0,0,0,0,1,2,3,4,3,2,1,0,0,1,2,3,2,1]
##sub = [4,3,2,1]
##print s1
##print sub
##print ""
##dtw = DTW( sub, s1, True )
##dtw.DTW()
##
##
##print "before"
##for c in dtw.D:
## print c
##
##P,C,M = dtw.UpdateX( [0,1] )
##
##print "after"
##for c in dtw.D:
## print c
# print P
# print C
# for m in M:
# print m
##C = ComputeCostMatrix( sub, s1 )
##D = ComputeAccumulatedCostMatrix( C, True )
##for c in C:
## print c
##print ""
##for d in D:
## print d
##path,cost = SubsequenceDTW( sub, s1, threshold = 10)
##print "final answer"
##print path
##print cost
##seq1 = [sin(x/5.0) for x in range(int(2*pi))]
##seq2 = [cos(x/5.0+pi/2) for x in range(int(3*pi))]
##
##print seq1
##print seq2
##print ""
##DTW(seq1,seq2)
|
|
#! /usr/bin/env python
__version__ = '$Revision: 1.1 $'
import os.path
import re
import string
import sys
from xml.sax.saxutils import quoteattr
bang_join = "!".join
null_join = "".join
REPLACEMENTS = [
# Hackish way to deal with macros replaced with simple text
(re.compile(r"\\ABC\b"), "ABC"),
(re.compile(r"\\ASCII\b"), "ASCII"),
(re.compile(r"\\Cpp\b"), "C++"),
(re.compile(r"\\EOF\b"), "EOF"),
(re.compile(r"\\NULL\b"), "NULL"),
(re.compile(r"\\POSIX\b"), "POSIX"),
(re.compile(r"\\UNIX\b"), "Unix"),
# deal with turds left over from LaTeX2HTML
(re.compile(r"<#\d+#>"), ""),
]
class Node:
continuation = 0
def __init__(self, link, str, seqno):
self.links = [link]
self.seqno = seqno
for pattern, replacement in REPLACEMENTS:
str = pattern.sub(replacement, str)
# build up the text
self.text = split_entry_text(str)
self.key = split_entry_key(str)
def __cmp__(self, other):
"""Comparison operator includes sequence number, for use with
list.sort()."""
return self.cmp_entry(other) or cmp(self.seqno, other.seqno)
def cmp_entry(self, other):
"""Comparison 'operator' that ignores sequence number."""
c = 0
for i in range(min(len(self.key), len(other.key))):
c = (cmp_part(self.key[i], other.key[i])
or cmp_part(self.text[i], other.text[i]))
if c:
break
return c or cmp(self.key, other.key) or cmp(self.text, other.text)
def __repr__(self):
return "<Node for %s (%s)>" % (bang_join(self.text), self.seqno)
def __str__(self):
return bang_join(self.key)
def dump(self):
return "%s\1%s###%s\n" \
% ("\1".join(self.links),
bang_join(self.text),
self.seqno)
def cmp_part(s1, s2):
result = cmp(s1, s2)
if result == 0:
return 0
l1 = s1.lower()
l2 = s2.lower()
minlen = min(len(s1), len(s2))
if len(s1) < len(s2) and l1 == l2[:len(s1)]:
result = -1
elif len(s2) < len(s1) and l2 == l1[:len(s2)]:
result = 1
else:
result = cmp(l1, l2) or cmp(s1, s2)
return result
def split_entry(str, which):
stuff = []
parts = str.split('!')
parts = [part.split('@') for part in parts]
for entry in parts:
if len(entry) != 1:
key = entry[which]
else:
key = entry[0]
stuff.append(key)
return stuff
_rmtt = re.compile(r"""(.*)<tt(?: class=['"][a-z0-9]+["'])?>(.*)</tt>(.*)$""",
re.IGNORECASE)
_rmparens = re.compile(r"\(\)")
def split_entry_key(str):
parts = split_entry(str, 1)
for i in range(len(parts)):
m = _rmtt.match(parts[i])
if m:
parts[i] = null_join(m.group(1, 2, 3))
else:
parts[i] = parts[i].lower()
# remove '()' from the key:
parts[i] = _rmparens.sub('', parts[i])
return map(trim_ignored_letters, parts)
def split_entry_text(str):
if '<' in str:
m = _rmtt.match(str)
if m:
str = null_join(m.group(1, 2, 3))
return split_entry(str, 1)
def load(fp):
nodes = []
rx = re.compile("(.*)\1(.*)###(.*)$")
while 1:
line = fp.readline()
if not line:
break
m = rx.match(line)
if m:
link, str, seqno = m.group(1, 2, 3)
nodes.append(Node(link, str, seqno))
return nodes
def trim_ignored_letters(s):
# ignore $ to keep environment variables with the
# leading letter from the name
if s.startswith("$"):
return s[1:].lower()
else:
return s.lower()
def get_first_letter(s):
if s.startswith("<tex2html_percent_mark>"):
return "%"
else:
return trim_ignored_letters(s)[0]
def split_letters(nodes):
letter_groups = []
if nodes:
group = []
append = group.append
letter = get_first_letter(nodes[0].text[0])
letter_groups.append((letter, group))
for node in nodes:
nletter = get_first_letter(node.text[0])
if letter != nletter:
letter = nletter
group = []
letter_groups.append((letter, group))
append = group.append
append(node)
return letter_groups
def group_symbols(groups):
entries = []
ident_letters = string.ascii_letters + "_"
while groups[0][0] not in ident_letters:
entries += groups[0][1]
del groups[0]
if entries:
groups.insert(0, ("Symbols", entries))
# need a function to separate the nodes into columns...
def split_columns(nodes, columns=1):
if columns <= 1:
return [nodes]
# This is a rough height; we may have to increase to avoid breaks before
# a subitem.
colheight = int(len(nodes) / columns)
numlong = int(len(nodes) % columns)
if numlong:
colheight = colheight + 1
else:
numlong = columns
cols = []
for i in range(numlong):
start = i * colheight
end = start + colheight
cols.append(nodes[start:end])
del nodes[:end]
colheight = colheight - 1
try:
numshort = int(len(nodes) / colheight)
except ZeroDivisionError:
cols = cols + (columns - len(cols)) * [[]]
else:
for i in range(numshort):
start = i * colheight
end = start + colheight
cols.append(nodes[start:end])
#
# If items continue across columns, make sure they are marked
# as continuations so the user knows to look at the previous column.
#
for i in range(len(cols) - 1):
try:
prev = cols[i][-1]
next = cols[i + 1][0]
except IndexError:
return cols
else:
n = min(len(prev.key), len(next.key))
for j in range(n):
if prev.key[j] != next.key[j]:
break
next.continuation = j + 1
return cols
DL_LEVEL_INDENT = " "
def format_column(nodes):
strings = ["<dl compact='compact'>"]
append = strings.append
level = 0
previous = []
for node in nodes:
current = node.text
count = 0
for i in range(min(len(current), len(previous))):
if previous[i] != current[i]:
break
count = i + 1
if count > level:
append("<dl compact='compact'>" * (count - level) + "\n")
level = count
elif level > count:
append("\n")
append(level * DL_LEVEL_INDENT)
append("</dl>" * (level - count))
level = count
# else: level == count
for i in range(count, len(current) - 1):
term = node.text[i]
level = level + 1
if node.continuation > i:
extra = " (continued)"
else:
extra = ""
append("\n<dt>%s%s\n<dd>\n%s<dl compact='compact'>"
% (term, extra, level * DL_LEVEL_INDENT))
append("\n%s<dt>%s%s</a>"
% (level * DL_LEVEL_INDENT, node.links[0], node.text[-1]))
for link in node.links[1:]:
append(",\n%s %s[Link]</a>" % (level * DL_LEVEL_INDENT, link))
previous = current
append("\n")
append("</dl>" * (level + 1))
return null_join(strings)
def format_nodes(nodes, columns=1):
strings = []
append = strings.append
if columns > 1:
colnos = range(columns)
colheight = int(len(nodes) / columns)
if len(nodes) % columns:
colheight = colheight + 1
colwidth = int(100 / columns)
append('<table width="100%"><tr valign="top">')
for col in split_columns(nodes, columns):
append('<td width="%d%%">\n' % colwidth)
append(format_column(col))
append("\n</td>")
append("\n</tr></table>")
else:
append(format_column(nodes))
return null_join(strings)
def format_letter(letter):
if letter == '.':
lettername = ". (dot)"
elif letter == '_':
lettername = "_ (underscore)"
else:
lettername = letter.capitalize()
return "\n<hr />\n<h2 id=%s>%s</h2>\n\n" \
% (quoteattr("letter-" + letter), lettername)
def format_html_letters(nodes, columns, group_symbol_nodes):
letter_groups = split_letters(nodes)
if group_symbol_nodes:
group_symbols(letter_groups)
items = []
for letter, nodes in letter_groups:
s = "<b><a href=\"#letter-%s\">%s</a></b>" % (letter, letter)
items.append(s)
s = ["<hr /><center>\n%s</center>\n" % " |\n".join(items)]
for letter, nodes in letter_groups:
s.append(format_letter(letter))
s.append(format_nodes(nodes, columns))
return null_join(s)
def format_html(nodes, columns):
return format_nodes(nodes, columns)
def collapse(nodes):
"""Collapse sequences of nodes with matching keys into a single node.
Destructive."""
if len(nodes) < 2:
return
prev = nodes[0]
i = 1
while i < len(nodes):
node = nodes[i]
if not node.cmp_entry(prev):
prev.links.append(node.links[0])
del nodes[i]
else:
i = i + 1
prev = node
def dump(nodes, fp):
for node in nodes:
fp.write(node.dump())
def process_nodes(nodes, columns, letters=0, group_symbol_nodes=0):
nodes.sort()
collapse(nodes)
if letters:
return format_html_letters(nodes, columns, group_symbol_nodes)
else:
return format_html(nodes, columns)
def main():
import getopt
ifn = "-"
ofn = "-"
columns = 1
letters = 0
group_symbol_nodes = 1
opts, args = getopt.getopt(sys.argv[1:], "c:lo:",
["columns=", "dont-group-symbols",
"group-symbols", "letters", "output="])
for opt, val in opts:
if opt in ("-o", "--output"):
ofn = val
elif opt in ("-c", "--columns"):
columns = int(val, 10)
elif opt in ("-l", "--letters"):
letters = 1
elif opt == "--group-symbols":
group_symbol_nodes = 1
elif opt == "--dont-group-symbols":
group_symbol_nodes = 0
if not args:
args = [ifn]
nodes = []
for fn in args:
nodes = nodes + load(open(fn))
num_nodes = len(nodes)
html = process_nodes(nodes, columns, letters, group_symbol_nodes)
program = os.path.basename(sys.argv[0])
if ofn == "-":
sys.stdout.write(html)
sys.stderr.write("\n%s: %d index nodes" % (program, num_nodes))
else:
open(ofn, "w").write(html)
print
print "%s: %d index nodes" % (program, num_nodes)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from csvn.core import *
from csvn.ext.listmixin import ListMixin
from UserDict import DictMixin
from shutil import copyfileobj
from tempfile import TemporaryFile
# This class contains Pythonic wrappers for generic Subversion and
# APR datatypes (e.g. dates, streams, hashes, arrays, etc).
#
# These wrappers are used by higher-level APIs in csvn to wrap
# raw C datatypes in a way that is easy for Python developers
# to use.
class SvnDate(str):
def as_apr_time_t(self):
"""Return this date to an apr_time_t object"""
pool = Pool()
when = apr_time_t()
svn_time_from_cstring(byref(when), self, pool)
return when
def as_human_string(self):
"""Return this date to a human-readable date"""
pool = Pool()
return str(svn_time_to_human_cstring(self.as_apr_time_t(), pool))
class Hash(DictMixin):
"""A dictionary wrapper for apr_hash_t"""
_keys = DictMixin.iterkeys
def __init__(self, type, items={}, wrapper=None, dup=None):
self.type = type
self.pool = Pool()
self.wrapper = wrapper
self.dup = dup
if dup:
self.hash = apr_hash_make(self.pool)
if items is None or isinstance(items, POINTER(apr_hash_t)):
items = Hash(type, items)
self.update(items)
elif isinstance(items, POINTER(apr_hash_t)):
self.hash = items
elif items is None:
self.hash = POINTER(apr_hash_t)()
elif isinstance(items, Hash):
self.hash = items.hash
else:
self.hash = apr_hash_make(self.pool)
self.update(items)
def __getitem__(self, key):
value = apr_hash_get(self, cast(key, c_void_p), len(key))
if not value:
raise KeyError(key)
value = cast(value, self.type)
if self.wrapper:
value = self.wrapper.from_param(value)
return value
def __setitem__(self, key, value):
if self.wrapper:
value = self.wrapper.to_param(value, self.pool)
if self.dup:
value = self.dup(value, self.pool)
apr_hash_set(self, apr_pstrdup(self.pool, key), len(key), value)
def __delitem__(self, key):
apr_hash_set(self, key, len(key), NULL)
def keys(self):
return list(self._keys())
def __iter__(self):
for (key, _) in self.items():
yield key
def items(self):
pool = Pool()
hi = apr_hash_first(pool, self)
while hi:
key_vp = c_void_p()
val_vp = c_void_p()
apr_hash_this(hi, byref(key_vp), None, byref(val_vp))
val = cast(val_vp, self.type)
if self.wrapper:
val = self.wrapper.from_param(val)
yield (string_at(key_vp), val)
hi = apr_hash_next(hi)
def __len__(self):
return int(apr_hash_count(self))
def byref(self):
return byref(self._as_parameter_)
def pointer(self):
return pointer(self._as_parameter_)
_as_parameter_ = property(fget=lambda self: self.hash)
class Array(ListMixin):
"""An array wrapper for apr_array_header_t"""
def __init__(self, type, items=None, size=0):
self.type = type
self.pool = Pool()
if not items:
self.header = apr_array_make(self.pool, size, sizeof(type))
elif isinstance(items, POINTER(apr_array_header_t)):
self.header = items
elif isinstance(items, Array):
self.header = apr_array_copy(self.pool, items)
else:
self.header = apr_array_make(self.pool, len(items),
sizeof(type))
self.extend(items)
_as_parameter_ = property(fget=lambda self: self.header)
elts = property(fget=lambda self: cast(self.header[0].elts.raw,
POINTER(self.type)))
def _get_element(self, i):
return self.elts[i]
def _set_element(self, i, value):
self.elts[i] = value
def __len__(self):
return self.header[0].nelts
def _resize_region(self, start, end, new_size):
diff = start-end+new_size
# Growing
if diff > 0:
l = len(self)
# Make space for the new items
for i in range(diff):
apr_array_push(self)
# Move the old items out of the way, if necessary
if end < l:
src_idx = max(end-diff,0)
memmove(byref(self.elts + end),
byref(self.elts[src_idx]),
(l-src_idx)*self.header[0].elt_size)
# Shrinking
elif diff < 0:
# Overwrite the deleted items with items we still need
if end < len(self):
memmove(byref(self.elts[end+diff]),
byref(self.elts[end]),
(len(self)-end)*self.header[0].elt_size)
# Shrink the array
for i in range(-diff):
apr_array_pop(self)
try:
# On Windows we need to do some magic to get the os-level file handle
from msvcrt import get_osfhandle
except ImportError:
get_osfhandle = lambda fileno: fileno
class APRFile(object):
"""Wrap a Python file-like object as an APR File"""
def __init__(self, pyfile):
self.pyfile = pyfile
self.pool = Pool()
self._as_parameter_ = POINTER(apr_file_t)()
self.tempfile = None
if hasattr(pyfile, "fileno"):
# Looks like this is a real file. We can just write
# directly to said file
osfile = apr_os_file_t(get_osfhandle(pyfile.fileno()))
else:
# Looks like this is a StringIO buffer or a fake file.
# Write to a temporary file and copy the output to the
# buffer when we are closed or flushed
self.tempfile = TemporaryFile()
osfile = apr_os_file_t(get_osfhandle(self.tempfile.fileno()))
apr_os_file_put(byref(self._as_parameter_), byref(osfile),
APR_CREATE | APR_WRITE | APR_BINARY, self.pool)
def flush(self):
"""Flush output to the underlying Python object"""
if self.tempfile:
self.tempfile.seek(0)
copyfileobj(self.tempfile, self.pyfile)
self.tempfile.truncate(0)
def close(self):
"""Close the APR file wrapper, leaving the underlying Python object
untouched"""
self.flush()
if self.tempfile:
self.tempfile.close()
self.tempfile = None
self.pool.destroy()
self.pool = None
def __del__(self):
if self.pool:
self.close()
class Stream(object):
def __init__(self, buffer, disown=False):
"""Create a stream which wraps a Python file or file-like object"""
self.pool = Pool()
self.buffer = buffer
self.stream = svn_stream_create(c_void_p(), self.pool)
svn_stream_set_read(self.stream, svn_read_fn_t(self._read))
svn_stream_set_write(self.stream, svn_write_fn_t(self._write))
if not disown:
svn_stream_set_close(self.stream, svn_close_fn_t(self._close))
_as_parameter_ = property(fget=lambda self: self.stream)
def _read(self, baton, buffer, l):
s = self.buffer.read(l[0])
memmove(buffer, string, len(s))
l[0] = len(s)
return SVN_NO_ERROR
def _write(self, baton, data, l):
s = string_at(data.raw, l[0])
self.buffer.write(s)
return SVN_NO_ERROR
def _close(self, baton):
self.buffer.close()
return SVN_NO_ERROR
class SvnStringPtr(object):
def to_param(obj, pool):
return svn_string_ncreate(obj, len(obj), pool)
to_param = staticmethod(to_param)
def from_param(obj):
assert isinstance(obj[0], svn_string_t)
# Convert from a raw svn_string_t object. Pass in the length, so that
# we handle binary property values with embedded NULLs correctly.
return string_at(obj[0].data.raw, obj[0].len)
from_param = staticmethod(from_param)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([0], dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testDtypeSurvivesIdentity(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testCreateRead(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
value = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32).eval()
self.assertAllEqual(1, value)
def testManyAssigns(self):
with self.test_session() as session:
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = session.run([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
def testAssignAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), 2)
def testScatterAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)).run()
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), [[3]])
def testGPU(self):
with self.test_session(use_gpu=True) as sess:
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
sess.run(variables.global_variables_initializer())
self.assertEqual(
resource_variable_ops.var_is_initialized_op(abc.handle).eval(),
True)
print(sess.run(abc))
def testInitFn(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testInitFnDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, v.value().dtype)
def testInitFnNoDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1)
self.assertEqual(dtypes.int32, v.value().dtype)
def testInitializeAllVariables(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32)
with self.assertRaises(errors.NotFoundError):
v.value().eval()
variables.global_variables_initializer().run()
self.assertEqual(1.0, v.value().eval())
def testOperatorOverload(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.assertEqual(2.0, (v+v).eval())
def testAssignMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign(2.0).eval()
self.assertEqual(2.0, v.value().eval())
def testLoad(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.load(2.0)
self.assertEqual(2.0, v.value().eval())
def testSparseRead(self):
with self.test_session():
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32))
variables.global_variables_initializer().run()
value = v.sparse_read([0, 3, 1, 2]).eval()
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
def testToFromProto(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
def testAssignAddMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign_add(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testAssignSubMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
v.assign_sub(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testDestroyResource(self):
with self.test_session() as sess:
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
self.assertEqual(3.0, v.value().eval())
sess.run(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
v.value().eval()
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
sess.run(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.test_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run([assign],
feed_dict={placeholder: np.zeros(shape=[2, 2],
dtype=np.float32)})
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = w.value().op.get_attr("_class")
def testSharedName(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var1")
v.initializer.run()
w = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, w_read.eval())
x = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1/")
x_read = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
with self.assertRaisesOpError("Resource .*/var1//.* does not exist"):
_ = x_read.eval()
def testShape(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(
name="var1", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
def testSetInitialValue(self):
with self.test_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Python
import datetime
import urllib2
import re
from urllib2 import URLError, HTTPError
from xml.dom import minidom
from lxml import etree
from pymongo.errors import OperationFailure
from pymongo.code import Code
# Django
from mongodb import *
from sgrstats.static import *
# Celery
from celery.task import Task
from celery.registry import tasks
from pymongo.errors import PyMongoError
class FetchPlayerStatsTask(Task):
ignore_result = True
max_retries = 30
default_retry_delay = 3 * 60 # Retry in 3 minutes
def run(self, user_data = None, **kwargs):
""" Fetches and saves player's stats into the MongoDB collection. """
objectives_to_fetch = ACCOUNT_OBJECTIVES_ALL + OBJECTIVES_MAPS_ALL + OBJECTIVES_CLASSES_ALL\
+ OBJECTIVES_WEAPONS_ALL
# When updating player data, user_data is a triple (account_id, user_id, username)
# account_id - firesky account id, user_id - website account user id
# username - website account username
date = datetime.datetime.now()
today = date.date()
date_value = "%s-%s-%s" % (today.year, today.month, today.day)
account_id, user_id, username = user_data
try:
player_objectives = self.fetch_player_objective_list(account_id)
except (IOError, URLError), exc:
# try to retry the task later on
self.retry((user_data,), kwargs, exc = exc)
return
dom = etree.fromstring(player_objectives)
# Loop over all the available objectives
empty_count = 0
player_data = {'account_id': account_id, 'user_id': user_id,
'username': username, 'date_short': date_value,
'date_full': date, 'data': {}}
player_objectives = player_data['data']
for node in dom.iter('{%s}Objective' % (OBJECTIVE_LIST_NS)):
objective_id = node.attrib.get('ObjectiveID', '')
objective_value = node.attrib.get('ProgressValue', '')
if objective_id in objectives_to_fetch:
try:
value = int(objective_value)
except ValueError:
value = 0
player_objectives['%s' % (objective_id)] = value
if objective_value == '':
empty_count += 1
if empty_count == len(player_objectives) - 2:
# All the values are empty, this player does not exist
return
# Calculate K/D and hit ratio
types = ['Account']
for type in types:
try:
player_objectives['%s_%s_%s' % ('SGR', type, 'KillDeathRatio')] = float(player_objectives['%s_%s_%s' % ('SGR', type, 'KillsTotal')]) / float(player_objectives['%s_%s_%s' % ('SGR', type, 'KilledTotal')])
player_objectives['%s_%s_%s' % ('SGR', type, 'HitRatio')] = (float(player_objectives['%s_%s_%s' % ('SGR', type, 'ShotsHit')]) / float(player_objectives['%s_%s_%s' % ('SGR', type, 'ShotsFired')])) * 100
except (ZeroDivisionError, KeyError):
player_objectives['%s_%s_%s' % ('SGR', type, 'KillDeathRatio')] = 0
player_objectives['%s_%s_%s' % ('SGR', type, 'HitRatio')] = 0
# Total account values for Leonops map (arena game type) are not recorded, so we need to
# calculate them manually
try:
player_objectives['SGR_Account_WinsOnArena'] = sum([int(player_objectives['SGR_%s_WinsOnArena' % (type)]) for type in types[3:]])
except:
pass
try:
player_objectives['SGR_Account_LossesOnArena'] = sum([int(player_objectives['SGR_%s_LossesOnArena' % (type)]) for type in types[3:]])
except:
pass
try:
database.rankings_data.save(player_data, safe = True)
except (OperationFailure), exc:
# Failed inserting document in the collection, retry later
self.retry((user_data,), kwargs, exc = exc)
def fetch_player_objective_list(self, account_id):
""" Fetch player objectives. """
post_data = '''<ns2:Registration xmlns:ns1="http://www.cheyenneme.com/xml/cmebase" xmlns:ns2="http://www.cheyenneme.com/xml/registration"><ns2:Service><ns2:AccountObjectiveGet><ns2:Request AccountID="''' + str(account_id) + '''" /></ns2:AccountObjectiveGet></ns2:Service></ns2:Registration>'''
request = urllib2.Request(OBJECTIVE_LIST_URL, data = post_data, headers = {
'User-Agent': USER_AGENT,
'Content-Type': 'text/plain',
})
response = urllib2.urlopen(request, timeout = 6)
return response.read()
class CalculateTopClassesTask(Task):
ignore_result = True
max_retries = 5
default_retry_delay = 2 * 60
def run(self, **kwargs):
# Map function
emit_string = ''
for objective in OBJECTIVES_CLASSES_ALL:
emit_string += 'emit("%s", this.data.%s);' % (objective, objective)
map = Code("function() { %s }" % (emit_string))
# Reduce function
reduce = Code("function(key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
today = datetime.datetime.now().date()
date = "%s-%s-%s" % (today.year, today.month, today.day)
# Calculate the result (map & reduce)
query = {'date_short': date}
result = database.rankings_data.map_reduce(map, reduce, query = query)
results = {}
for doc in result.find():
objective = doc['_id']
value = doc['value']
results[objective] = value
# Save the cumulatives in the top_classes collection
# Entry _id contains the current date (YYYY-mm-dd)
try:
database.top_classes.save({'_id': date, \
'data': results}, safe = True)
except (OperationFailure), exc:
self.retry((), kwargs, exc = exc)
class CalculateTopMapsTask(Task):
ignore_result = True
max_retries = 5
default_retry_delay = 2 * 60
def run(self, **kwargs):
# Map function
emit_string = ''
for objective in OBJECTIVES_MAPS_ALL:
emit_string += 'emit("%s", this.data.%s);' % (objective, objective)
map = Code("function() { %s }" % (emit_string))
# Reduce function
reduce = Code("function(key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
today = datetime.datetime.now().date()
date = "%s-%s-%s" % (today.year, today.month, today.day)
# Calculate the result (map & reduce)
query = {'date_short': date}
result = database.rankings_data.map_reduce(map, reduce, query = query)
results = {}
for doc in result.find():
objective = doc['_id']
value = doc['value']
results[objective] = value
try:
database.top_maps.save({'_id': date, \
'data': results}, safe = True)
except (OperationFailure), exc:
self.retry((), kwargs, exc = exc)
class CalculateTopWeaponsTask(Task):
ignore_result = True
max_retries = 5
default_retry_delay = 2 * 60
def run(self, **kwargs):
# Map function
emit_string = ''
for objective in OBJECTIVES_WEAPONS_ALL:
emit_string += 'emit("%s", this.data.%s);' % (objective, objective)
map = Code("function() { %s }" % (emit_string))
# Reduce function
reduce = Code("function(key, values) {"
" var total = 0;"
" for (var i = 0; i < values.length; i++) {"
" total += values[i];"
" }"
" return total;"
"}")
today = datetime.datetime.now().date()
date = "%s-%s-%s" % (today.year, today.month, today.day)
# Calculate the result (map & reduce)
query = {'date_short': date}
result = database.rankings_data.map_reduce(map, reduce, query = query)
results = {}
for doc in result.find():
objective = doc['_id']
value = doc['value']
results[objective] = value
try:
database.top_weapons.save({'_id': date, \
'data': results}, safe = True)
except (OperationFailure), exc:
self.retry((), kwargs, exc = exc)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.resources.types import offline_user_data_job
from google.ads.googleads.v10.services.types import (
offline_user_data_job_service,
)
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
OfflineUserDataJobServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import OfflineUserDataJobServiceGrpcTransport
class OfflineUserDataJobServiceClientMeta(type):
"""Metaclass for the OfflineUserDataJobService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[OfflineUserDataJobServiceTransport]]
_transport_registry["grpc"] = OfflineUserDataJobServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[OfflineUserDataJobServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class OfflineUserDataJobServiceClient(
metaclass=OfflineUserDataJobServiceClientMeta
):
"""Service to manage offline user data jobs."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
OfflineUserDataJobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
OfflineUserDataJobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> OfflineUserDataJobServiceTransport:
"""Returns the transport used by the client instance.
Returns:
OfflineUserDataJobServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def offline_user_data_job_path(
customer_id: str, offline_user_data_update_id: str,
) -> str:
"""Returns a fully-qualified offline_user_data_job string."""
return "customers/{customer_id}/offlineUserDataJobs/{offline_user_data_update_id}".format(
customer_id=customer_id,
offline_user_data_update_id=offline_user_data_update_id,
)
@staticmethod
def parse_offline_user_data_job_path(path: str) -> Dict[str, str]:
"""Parses a offline_user_data_job path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/offlineUserDataJobs/(?P<offline_user_data_update_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, OfflineUserDataJobServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the offline user data job service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, OfflineUserDataJobServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, OfflineUserDataJobServiceTransport):
# transport is a OfflineUserDataJobServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_offline_user_data_job(
self,
request: Union[
offline_user_data_job_service.CreateOfflineUserDataJobRequest, dict
] = None,
*,
customer_id: str = None,
job: offline_user_data_job.OfflineUserDataJob = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> offline_user_data_job_service.CreateOfflineUserDataJobResponse:
r"""Creates an offline user data job.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__
`NotAllowlistedError <>`__ `OfflineUserDataJobError <>`__
`QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.CreateOfflineUserDataJobRequest, dict]):
The request object. Request message for
[OfflineUserDataJobService.CreateOfflineUserDataJob][google.ads.googleads.v10.services.OfflineUserDataJobService.CreateOfflineUserDataJob].
customer_id (str):
Required. The ID of the customer for
which to create an offline user data
job.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.ads.googleads.v10.resources.types.OfflineUserDataJob):
Required. The offline user data job
to be created.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.CreateOfflineUserDataJobResponse:
Response message for
[OfflineUserDataJobService.CreateOfflineUserDataJob][google.ads.googleads.v10.services.OfflineUserDataJobService.CreateOfflineUserDataJob].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a offline_user_data_job_service.CreateOfflineUserDataJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
offline_user_data_job_service.CreateOfflineUserDataJobRequest,
):
request = offline_user_data_job_service.CreateOfflineUserDataJobRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_offline_user_data_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def add_offline_user_data_job_operations(
self,
request: Union[
offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest,
dict,
] = None,
*,
resource_name: str = None,
operations: Sequence[
offline_user_data_job_service.OfflineUserDataJobOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse:
r"""Adds operations to the offline user data job.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__ `MutateError <>`__
`OfflineUserDataJobError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.AddOfflineUserDataJobOperationsRequest, dict]):
The request object. Request message for
[OfflineUserDataJobService.AddOfflineUserDataJobOperations][google.ads.googleads.v10.services.OfflineUserDataJobService.AddOfflineUserDataJobOperations].
resource_name (str):
Required. The resource name of the
OfflineUserDataJob.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.OfflineUserDataJobOperation]):
Required. The list of operations to
be done.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.AddOfflineUserDataJobOperationsResponse:
Response message for
[OfflineUserDataJobService.AddOfflineUserDataJobOperations][google.ads.googleads.v10.services.OfflineUserDataJobService.AddOfflineUserDataJobOperations].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource_name, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest,
):
request = offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.add_offline_user_data_job_operations
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def run_offline_user_data_job(
self,
request: Union[
offline_user_data_job_service.RunOfflineUserDataJobRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Runs the offline user data job.
When finished, the long running operation will contain the
processing result or failure information, if any.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__
`HeaderError <>`__ `InternalError <>`__
`OfflineUserDataJobError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.RunOfflineUserDataJobRequest, dict]):
The request object. Request message for
[OfflineUserDataJobService.RunOfflineUserDataJob][google.ads.googleads.v10.services.OfflineUserDataJobService.RunOfflineUserDataJob].
resource_name (str):
Required. The resource name of the
OfflineUserDataJob to run.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource_name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a offline_user_data_job_service.RunOfflineUserDataJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, offline_user_data_job_service.RunOfflineUserDataJobRequest
):
request = offline_user_data_job_service.RunOfflineUserDataJobRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.run_offline_user_data_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=offline_user_data_job.OfflineUserDataJobMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("OfflineUserDataJobServiceClient",)
|
|
import numpy as np
import piece_valid_moves
from datetime import datetime
startTime = datetime.now()
# function to understand the position
error_count=0
def position_converter(position):
x=ord(position[0])-97
y=56-ord(position[1])
return((8*y)+x)
space_count=0
target_pos=0
num_index=1
# Initialising the 12*64 vector
# pairing of 12 one dimensional vector--> (0,1,2,3,4,5,6,7,8,9,10,11)~(k,q,n,r,b,p,K,Q,N,R,B,P)
features=['k','q','n','r','b','p','K','Q','N','R','B','P']
def Initial_State():
a=np.zeros((12,64),dtype=np.int)
a[0][4]=1
a[1][3]=1
a[2][1]=1
a[2][6]=1
a[3][0]=1
a[3][7]=1
a[4][2]=1
a[4][5]=1
for i in range(8,16):
a[5][i]=1
a[6][60]=1
a[7][59]=1
a[8][62]=1
a[8][57]=1
a[9][63]=1
a[9][56]=1
a[10][58]=1
a[10][61]=1
for i in range(48,56):
a[11][i]=1
return(a)
def king_killing(state,i,target_pos,turn):
State=state
State[target_pos]=State[i]
State[i]='null'
if(turn==0):
l=State.index('K')
for j in range(64):
if(State[j][0] in {'q','r','b'}):
if(piece_valid_moves.valid_moves(State,j,l)=='success'):
return 1
return 0
else:
l=State.index('k')
for j in range(64):
if(State[j][0] in {'Q','R','B'}):
if(piece_valid_moves.valid_moves(State,j,l)=='success'):
return 1
return 0
def matrix_manipulation(move):
# global validity
global error_count
global game
# print move
global output_vector
global input_vector
global state
global space_count
global promoted_piece_count
turn=space_count%3-1
# turn=0(black turn) turn=1(white)
check=0
kill=0
targeting_piece="null"
promoted_piece='none'
killer_piece="P"
active_file="none"
active_rank="none"
killer_pos=-1
# move can be "e4","de4","Ne4","Nd5e4","N5e4s","Nde4","dxe4","Nxe4","Ndxe4","O-O","O-O-O" and x can be added to any of them.
if(move[len(move)-1]=="+"):
check=1
move=move.translate(None, '+')
elif(move[len(move)-1]=="#"):
# checkmate, game over
move=move.translate(None, '#')
if(move=="O-O-O"):
# chessling on left side
if(turn==0):
state[58]='K'
state[60]='null'
state[59]='R1'
state[56]='null'
input_vector[6][58]=1
input_vector[6][60]=0
input_vector[9][59]=1
input_vector[9][56]=0
else:
state[2]='k'
state[4]='null'
state[3]='r1'
state[0]='null'
input_vector[0][2]=1
input_vector[0][4]=0
input_vector[3][3]=1
input_vector[3][0]=0
elif(move=="O-O"):
# chessling on right side
if(turn==0):
state[62]='K'
state[60]='null'
state[61]='R2'
state[63]='null'
input_vector[6][62]=1
input_vector[6][60]=0
input_vector[9][61]=1
input_vector[9][63]=0
else:
state[6]='k'
state[4]='null'
state[5]='r2'
state[7]='null'
input_vector[0][6]=1
input_vector[0][4]=0
input_vector[3][5]=1
input_vector[3][7]=0
else:
for i in move:
if(i in {'1','2','3','4','5','6','7','8'}):
num_index=move.index(i)
elif(i=="x"):
kill=1
move=move.translate(None,"x")
pos_str=move[num_index-1]+move[num_index]
target_pos=position_converter(pos_str)
if(len(move)!=num_index+1):
if(move[num_index+1]=="="):
# pawn promotion
promoted_piece=move[num_index+2]
promoted_piece_count+=1
move=move.translate(None,"=")
move=move.translate(None,promoted_piece)
if(num_index==2):
# de4,Ne4
if(move[0].islower()):
active_file=move[0]
else:
killer_piece=move[0]
elif(num_index==3):
# Nde4, N5e4
killer_piece=move[0]
if(move[1] in {'1','2','3','4','5','6','7','8'}):
active_rank=move[1]
else:
active_file=move[1]
elif(num_index==4):
# Nd5e4
killer_piece=move[0]
pos_str=move[1]+move[2]
killer_pos=position_converter(pos_str)
if(turn==0):
if(active_file=='none' and active_rank=="none"):
if(killer_piece=='P'):
if(state[target_pos]!='null'):
if(state[target_pos+7][0]=='P'):
killer_pos=target_pos+7
else:
killer_pos=target_pos+9
elif(state[target_pos]=='null' and kill==1):
# en_passant
if(state[target_pos+9][0]=='P'):
killer_pos=target_pos+9
else:
killer_pos=target_pos+7
input_vector[features.index('p')][target_pos+8]=0
state[target_pos+8]='null'
kill=0
else:
if(state[target_pos+8][0]=='P'):
killer_pos=target_pos+8
else:
killer_pos=target_pos+16
else:
for i in range(64):
if(state[i][0]==killer_piece and i!=target_pos):
if(piece_valid_moves.valid_moves(state,i,target_pos)=='success'):
killer_pos=i
break
elif(active_file!='none' and active_rank=='none'):
column=ord(active_file)-97
for i in range(8):
if(state[column][0]==killer_piece):
if(piece_valid_moves.valid_moves(state,column,target_pos)=='success'):
killer_pos=column
break
column=column+8
if(state[target_pos]=='null' and kill==1):
# en_passant
if((target_pos+9)%8==ord(active_file)-97):
killer_pos=target_pos+9
else:
killer_pos=target_pos+7
input_vector[features.index('p')][target_pos+8]=0
state[target_pos+8]='null'
kill=0
elif(active_file=='none' and active_rank!='none'):
row=(56-ord(active_rank))*8
for i in range(8):
if(state[row][0]==killer_piece):
if(piece_valid_moves.valid_moves(state,row,target_pos)=='success'):
killer_pos=row
break
row=row+1
else:
promoted_piece=promoted_piece.lower()
killer_piece=killer_piece.lower()
if(active_file=='none' and active_rank=="none"):
if(killer_piece=='p'):
if(state[target_pos]!='null'):
if(state[target_pos-7][0]=='p'):
killer_pos=target_pos-7
else:
killer_pos=target_pos-9
elif(state[target_pos]=='null' and kill==1):
# en_passant
if(state[target_pos-9][0]=='p'):
killer_pos=target_pos-9
else:
killer_pos=target_pos-7
input_vector[features.index(state[target_pos-8][0])][target_pos-8]=0
state[target_pos-8]='null'
kill=0
else:
if(state[target_pos-8][0]=='p'):
killer_pos=target_pos-8
else:
killer_pos=target_pos-16
else:
for i in range(64):
if(state[i][0]==killer_piece and i!=target_pos and state[i]!='null'):
if(piece_valid_moves.valid_moves(state,i,target_pos)=='success'):
killer_pos=i
break
elif(active_file!='none' and active_rank=='none'):
column=ord(active_file)-97
for i in range(8):
if(state[column][0]==killer_piece and state[column]!='null'):
if(piece_valid_moves.valid_moves(state,column,target_pos)=='success'):
killer_pos=column
break
column=column+8
if(state[target_pos]=='null' and kill==1):
# en_passant
if((target_pos-9)%8==ord(active_file)-97):
killer_pos=target_pos-9
else:
killer_pos=target_pos-7
input_vector[features.index('P')][target_pos-8]=0
state[target_pos-8]='null'
kill=0
elif(active_file=='none' and active_rank!='none'):
row=(56-ord(active_rank))*8
for i in range(8):
if(state[row][0]==killer_piece and state[row]!='null'):
if(piece_valid_moves.valid_moves(state,row,target_pos)=='success'):
killer_pos=row
break
row=row+1
input_vector[features.index(killer_piece)][killer_pos]=0
if(kill==1):
input_vector[features.index(state[target_pos][0])][target_pos]=0
input_vector[features.index(killer_piece)][target_pos]=1
state[target_pos]=state[killer_pos]
state[killer_pos]='null'
if(promoted_piece!='none'):
state[target_pos]=promoted_piece+chr(96+promoted_piece_count)
input_vector[features.index(killer_piece)][target_pos]=0
input_vector[features.index(promoted_piece)][target_pos]=1
# print state
# print killer_pos
# print '~'
# if(game==766):
# print state
# try:
# king_pos=state.index('k')
# king_pos=state.index('K')
# except:
# validity=0
# print input_vector
move_no=0
total_move=0
f = open("good_new.txt","r")
lines = f.readlines()
f.close()
game=0
with open("input_vector1.csv","w") as a, open("output_vector1.csv","w") as b:
for line in lines:
total_move+=move_no
game+=1
if(game==1002):
break
print game
if(game not in {766,2227,3031,3056,4352,5006,5737,7835,10411,11383,11437,13190,14745,15130,16213,18593,21087,26964,27165,28125,28563,29905,30332,32980,33737,34463,34844,40566,40995,42497,43505,46945,48195,48899}):
# en_passant validation left
output_vector=np.zeros((1,3),dtype=np.int)
end_index=line.index("}")
if(line[end_index+2]=='1'):
if(line[end_index+3]=='/'):
# draw
output_vector[0][1]=1
else:
# white wins
output_vector[0][0]=1
else:
# black wins
output_vector[0][2]=1
# print output_vector
# print game
move_no=0
space_count=0
# validity=1
promoted_piece_count=0
j=-1
# Intialise the input vector as the initial chess state.
state=['r1', 'n1', 'b1', 'q', 'k', 'b2', 'n2', 'r2', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'R1', 'N1', 'B1', 'Q', 'K', 'B2', 'N2', 'R2']
input_vector=Initial_State()
for i in line:
j=j+1
if(i=='{'):
break
elif(i=='.'):
move_no=move_no+1
elif(i==' '):
space_count=space_count+1
if(space_count%3!=0):
k=j+1
move=""
if(line[k]!='{'):
while(line[k]!=' '):
move=move+line[k]
k=k+1
# print game
# print move
matrix_manipulation(move)
b.write(" ".join("".join(map(str,output_vector.flatten()))))
b.write("\n")
a.write(" ".join("".join(map(str,input_vector.flatten()))))
a.write("\n")
# function call for training
a.close()
b.close()
print "end"
print datetime.now() - startTime
|
|
from django.core.mail.backends.base import BaseEmailBackend
from django_ses import settings
from boto.regioninfo import RegionInfo
from boto.ses import SESConnection
from datetime import datetime, timedelta
from time import sleep
# When changing this, remember to change it in setup.py
VERSION = (0, "6", 0)
__version__ = '.'.join([str(x) for x in VERSION])
__author__ = 'Harry Marr'
__all__ = ('SESBackend',)
# These would be nice to make class-level variables, but the backend is
# re-created for each outgoing email/batch.
# recent_send_times also is not going to work quite right if there are multiple
# email backends with different rate limits returned by SES, but that seems
# like it would be rare.
cached_rate_limits = {}
recent_send_times = []
def dkim_sign(message, dkim_domain=None, dkim_key=None, dkim_selector=None, dkim_headers=None):
"""Return signed email message if dkim package and settings are available."""
try:
import dkim
except ImportError:
pass
else:
if dkim_domain and dkim_key:
sig = dkim.sign(message,
dkim_selector,
dkim_domain,
dkim_key,
include_headers=dkim_headers)
message = sig + message
return message
class SESBackend(BaseEmailBackend):
"""A Django Email backend that uses Amazon's Simple Email Service.
"""
def __init__(self, fail_silently=False, aws_access_key=None,
aws_secret_key=None, aws_region_name=None,
aws_region_endpoint=None, aws_auto_throttle=None,
dkim_domain=None, dkim_key=None, dkim_selector=None,
dkim_headers=None, **kwargs):
super(SESBackend, self).__init__(fail_silently=fail_silently, **kwargs)
self._access_key_id = aws_access_key or settings.ACCESS_KEY
self._access_key = aws_secret_key or settings.SECRET_KEY
self._region = RegionInfo(
name=aws_region_name or settings.AWS_SES_REGION_NAME,
endpoint=aws_region_endpoint or settings.AWS_SES_REGION_ENDPOINT)
self._throttle = aws_auto_throttle or settings.AWS_SES_AUTO_THROTTLE
self.dkim_domain = dkim_domain or settings.DKIM_DOMAIN
self.dkim_key = dkim_key or settings.DKIM_PRIVATE_KEY
self.dkim_selector = dkim_selector or settings.DKIM_SELECTOR
self.dkim_headers = dkim_headers or settings.DKIM_HEADERS
self.connection = None
def open(self):
"""Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return False
try:
self.connection = SESConnection(
aws_access_key_id=self._access_key_id,
aws_secret_access_key=self._access_key,
region=self._region,
)
except:
if not self.fail_silently:
raise
def close(self):
"""Close any open HTTP connections to the API server.
"""
try:
self.connection.close()
self.connection = None
except:
if not self.fail_silently:
raise
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
new_conn_created = self.open()
if not self.connection:
# Failed silently
return
num_sent = 0
source = settings.AWS_SES_RETURN_PATH
for message in email_messages:
# Automatic throttling. Assumes that this is the only SES client
# currently operating. The AWS_SES_AUTO_THROTTLE setting is a
# factor to apply to the rate limit, with a default of 0.5 to stay
# well below the actual SES throttle.
# Set the setting to 0 or None to disable throttling.
if self._throttle:
global recent_send_times
now = datetime.now()
# Get and cache the current SES max-per-second rate limit
# returned by the SES API.
rate_limit = self.get_rate_limit()
# Prune from recent_send_times anything more than a few seconds
# ago. Even though SES reports a maximum per-second, the way
# they enforce the limit may not be on a one-second window.
# To be safe, we use a two-second window (but allow 2 times the
# rate limit) and then also have a default rate limit factor of
# 0.5 so that we really limit the one-second amount in two
# seconds.
window = 2.0 # seconds
window_start = now - timedelta(seconds=window)
new_send_times = []
for time in recent_send_times:
if time > window_start:
new_send_times.append(time)
recent_send_times = new_send_times
# If the number of recent send times in the last 1/_throttle
# seconds exceeds the rate limit, add a delay.
# Since I'm not sure how Amazon determines at exactly what
# point to throttle, better be safe than sorry and let in, say,
# half of the allowed rate.
if len(new_send_times) > rate_limit * window * self._throttle:
# Sleep the remainder of the window period.
delta = now - new_send_times[0]
total_seconds = (delta.microseconds + (delta.seconds +
delta.days * 24 * 3600) * 10**6) / 10**6
delay = window - total_seconds
if delay > 0:
sleep(delay)
recent_send_times.append(now)
# end of throttling
try:
response = self.connection.send_raw_email(
source=source or message.from_email,
destinations=message.recipients(),
raw_message=dkim_sign(message.message().as_string(),
dkim_key=self.dkim_key,
dkim_domain=self.dkim_domain,
dkim_selector=self.dkim_selector,
dkim_headers=self.dkim_headers)
)
message.extra_headers['status'] = 200
message.extra_headers['message_id'] = response[
'SendRawEmailResponse']['SendRawEmailResult']['MessageId']
message.extra_headers['request_id'] = response[
'SendRawEmailResponse']['ResponseMetadata']['RequestId']
num_sent += 1
except SESConnection.ResponseError as err:
# Store failure information so to post process it if required
error_keys = ['status', 'reason', 'body', 'request_id',
'error_code', 'error_message']
for key in error_keys:
message.extra_headers[key] = getattr(err, key, None)
if not self.fail_silently:
raise
if new_conn_created:
self.close()
return num_sent
def get_rate_limit(self):
if self._access_key_id in cached_rate_limits:
return cached_rate_limits[self._access_key_id]
new_conn_created = self.open()
if not self.connection:
raise Exception(
"No connection is available to check current SES rate limit.")
try:
quota_dict = self.connection.get_send_quota()
max_per_second = quota_dict['GetSendQuotaResponse'][
'GetSendQuotaResult']['MaxSendRate']
ret = float(max_per_second)
cached_rate_limits[self._access_key_id] = ret
return ret
finally:
if new_conn_created:
self.close()
|
|
# :coding: utf-8
import collections
from sphinx import addnodes
import docutils.parsers.rst.directives
from .base import BaseDirective
from .rst_generator import (
get_rst_attribute_elements,
get_rst_method_elements
)
def _parse_members(argument):
"""Convert the :members: options to class directive."""
if argument is None:
return True
return [arg.strip() for arg in argument.split(",")]
class AutoClassDirective(BaseDirective):
"""Directive to render :term:`Javascript` class documentation.
The unique argument should be the identifier of the class element.
.. sourcecode:: rest
.. js:autoclass:: module.AwesomeClass
The available options are:
* members:
This option can be boolean if no arguments are given to indicate that
all members should be documented, or a white list of member names to
display.
* skip-constructor:
Indicate whether the constructor method should be displayed if
available.
* skip-attribute-value:
Indicate whether attribute values within the class should be skipped.
* undoc-members:
Indicate whether members with no docstrings should be displayed.
* private-members:
Indicate whether private members (with a name starting with an
underscore) should be displayed.
* alias:
String element to replace the class name.
* module-alias:
String element to replace the module name.
* module-path-alias:
String element to replace the module path.
* force-partial-import:
Indicate whether the class import statement display should be indicated
with partial import if the class element is exported.
.. seealso::
:ref:`directive/autoclass`
.. seealso::
:ref:`configuration/js_class_options`
"""
#: Javascript class is callable
has_arguments = True
#: Define the Object type
objtype = "class"
#: classes options
option_spec = {
"members": _parse_members,
"skip-constructor": lambda x: True,
"skip-attribute-value": lambda x: True,
"undoc-members": lambda x: True,
"private-members": lambda x: True,
"alias": docutils.parsers.rst.directives.unchanged_required,
"module-alias": docutils.parsers.rst.directives.unchanged_required,
"module-path-alias": docutils.parsers.rst.directives.unchanged_required,
"force-partial-import": lambda x: True,
}
def handle_signature(self, signature, node):
"""Update the signature *node*."""
env = self.state.document.settings.env.element_environment
module_env = self.state.document.settings.env.module_environment
name = self.options.get("alias", env["name"])
module_id = env["module_id"]
module_name = self.options.get(
"module-alias", module_env[module_id]["name"]
)
node["type"] = "class"
node["id"] = env["id"]
node["module"] = module_name
node["fullname"] = name
node += addnodes.desc_type("class ", "class ")
node += addnodes.desc_addname(module_name + ".", module_name + ".")
node += addnodes.desc_name(name, name)
param_list = addnodes.desc_parameterlist()
for method_environment in env["method"].values():
if method_environment["name"] == "constructor":
for argument in method_environment["arguments"]:
param_list += addnodes.desc_parameter(argument, argument)
node += param_list
return name, module_name
def before_content(self):
"""Update the content.
Compute the description and import statement if available, and generate
the class nested directive elements to integrate to the content.
"""
env = self.state.document.settings.env.element_environment
module_env = self.state.document.settings.env.module_environment
self.content = self.generate_import_statement(
env, module_env, self.options.get("force-partial-import")
)
self.content += self.generate_description(env)
# Automatic boolean options
options = self.env.config.js_class_options
# Options manually set
skip_constructor = self.options.get(
"skip-constructor", "skip-constructor" in options
)
undoc_members = self.options.get(
"undoc-members", "undoc-members" in options
)
private_members = self.options.get(
"private-members", "private-members" in options
)
members = self.options.get("members", "members" in options)
if members:
rst_elements = {}
whitelist = (
members if isinstance(members, collections.Iterable) else None
)
# Gather class attributes
rst_elements = get_rst_attribute_elements(
env,
whitelist_names=whitelist,
blacklist_ids=env["method"].keys(),
undocumented_members=undoc_members,
private_members=private_members,
skip_value=self.options.get(
"skip-attribute-value", "skip-attribute-value" in options
),
rst_elements=rst_elements
)
# Gather class methods
rst_elements = get_rst_method_elements(
env,
whitelist_names=whitelist,
skip_constructor=skip_constructor,
undocumented_members=undoc_members,
private_members=private_members,
rst_elements=rst_elements,
)
# Add content while respecting the line order
for line_number in sorted(rst_elements.keys()):
for rst_element in rst_elements[line_number]:
self.content += rst_element
class AutoMethodDirective(BaseDirective):
"""Directive to render :term:`Javascript` class method documentation.
The unique argument should be the identifier of the class method element.
.. sourcecode:: rest
.. js:automethod:: module.AwesomeClass.awesomeMethod
.. seealso::
:ref:`directive/automethod`
"""
#: Javascript method is callable
has_arguments = True
#: Define the Object type
objtype = "method"
def handle_signature(self, signature, node):
"""Update the signature *node*."""
env = self.state.document.settings.env.element_environment
name = env["name"]
prefix = env["prefix"]
node["type"] = "method"
node["id"] = env["id"]
node["fullname"] = name
if prefix is not None:
node += addnodes.desc_type(prefix + " ", prefix + " ")
node += addnodes.desc_name(name, name)
param_list = addnodes.desc_parameterlist()
for argument in env["arguments"]:
param_list += addnodes.desc_parameter(argument, argument)
node += param_list
class_name = env["class_id"].rsplit(".", 1)[-1]
return class_name + "." + name, None
def before_content(self):
"""Update the content.
Compute the description if available.
"""
env = self.state.document.settings.env.element_environment
self.content = self.generate_description(env)
class AutoAttributeDirective(BaseDirective):
"""Directive to render :term:`Javascript` class attribute documentation.
The unique argument should be the identifier of the class attribute element.
.. sourcecode:: rest
.. js:autoattribute:: module.AwesomeClass.DATA
The available options are:
* skip-value:
Indicate whether attribute value should be skipped.
.. seealso::
:ref:`directive/autoattribute`
"""
#: Javascript data are not callable
has_arguments = False
#: Define the Object type
objtype = "attribute"
#: data options
option_spec = {
"skip-value": lambda x: True,
}
def handle_signature(self, signature, node):
"""Update the signature *node*."""
env = self.state.document.settings.env.element_environment
name = env["name"]
value = env["value"]
prefix = env["prefix"]
node["type"] = "attribute"
node["id"] = env["id"]
node["fullname"] = name
skip_value = self.options.get("skip-value", False)
if prefix is not None:
node += addnodes.desc_type(prefix + " ", prefix + " ")
node += addnodes.desc_name(name, name)
if not skip_value:
node += addnodes.desc_annotation(" = " + value, " = " + value)
class_name = env["class_id"].rsplit(".", 1)[-1]
return class_name + "." + name, None
def before_content(self):
"""Update the content.
Compute the description if available.
"""
env = self.state.document.settings.env.element_environment
self.content = self.generate_description(env)
|
|
# overall python packages
import glob
import h5py
import os
import time
import numpy as n
import sys
# specific functions
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
# dedicated packages
#import ClusterScalingRelations
#cl = ClusterScalingRelations.ClusterScalingRelations_Mantz2016()
#import StellarMass
import XrayLuminosity
xr = XrayLuminosity.XrayLuminosity()
dV = -9999
from numpy import exp, random
logmbh = lambda logm200c : 8.18 + 1.55 * ( logm200c - 13. )
LX_Z_lo, FRACTION_Z_lo = n.loadtxt(os.path.join(os.environ['GIT_EMERGE'],'data','agn_obscuration','fraction_zgt0_zlt10.txt' ), unpack=True)
LX_Z_me, FRACTION_Z_me = n.loadtxt(os.path.join(os.environ['GIT_EMERGE'],'data','agn_obscuration','fraction_zgt10_zlt25.txt'), unpack=True)
LX_Z_hi, FRACTION_Z_hi = n.loadtxt(os.path.join(os.environ['GIT_EMERGE'],'data','agn_obscuration','fraction_zgt25_zlt50.txt'), unpack=True)
zmin_obscur = n.array([-0.01,1.0,2.5])
zmax_obscur = n.array([1.0,2.5,10.0])
# read the Xray AGN luminosity function and add a condition to reproduce it
def create_catalogs_out(fileList, z, snap_name):
"""
Adds Xray emission mass using the Bongiorno et al. 2016 model to the rockstar outputs.
"""
#
out_duty_cycle = os.path.join(os.environ['MD10'],"duty_cycle", snap_name + "_duty_cycle.txt")
log_stellar_mass, duty_cycle = n.loadtxt(out_duty_cycle, unpack="True")
percentage_active = interp1d(n.hstack((-200., 0,n.min(log_stellar_mass)-0.01,log_stellar_mass,n.max(log_stellar_mass)+0.01,15)), n.hstack(( 0., 0., 0., duty_cycle, 0., 0.)))
# set up the x ray lambda SAR
logMs = n.arange(4.5,14.5,0.01)
cdfs_interpolations = []
XXS = n.arange(32,36.1,0.1)
for mass in logMs:
norming = xr.Phi_stellar_mass(mass, z)
cdfs_interpolations.append( interp1d(n.hstack((n.array([xr.Phi_stellar_mass_to_X(X, mass, z) for X in XXS ])/norming, 1.)), n.hstack((XXS, XXS[-1]+0.1))) )
cdfs_interpolations = n.array(cdfs_interpolations)
if z > zmin_obscur[0] and z<=zmax_obscur[0]:
obscured_fraction_interpolated = interp1d(n.hstack(( 0, LX_Z_lo, 60 )), n.hstack(( FRACTION_Z_lo[0],FRACTION_Z_lo,FRACTION_Z_lo[-1] )) )
elif z > zmin_obscur[1] and z<=zmax_obscur[1]:
obscured_fraction_interpolated = interp1d(n.hstack(( 0, LX_Z_me, 60 )), n.hstack(( FRACTION_Z_me[0],FRACTION_Z_me,FRACTION_Z_me[-1] )) )
else:
obscured_fraction_interpolated = interp1d(n.hstack(( 0, LX_Z_hi, 60 )), n.hstack(( FRACTION_Z_hi[0],FRACTION_Z_hi,FRACTION_Z_hi[-1] )) )
# loops over files
t0=time.time()
outFile = fileName[:-5]+"_Xray.fits"
# opens all relevant files
msFile = fileName[:-5]+"_Ms.fits"
hd = fits.open(fileName)
hm = fits.open(msFile)
stellar_mass = hm[1].data['stellar_mass_Mo13_mvir']
selection = (hm[1].data['stellar_mass_Mo13_mvir']>0) # hm[1].data['stellar_mass_reliable']
Nhalo=len(stellar_mass)
randomX = n.random.rand(Nhalo)
active_gn = ( percentage_active(stellar_mass) > randomX )
# lambda SAR addition
indexes = n.searchsorted(logMs,stellar_mass)
indexes[selection] = n.zeros_like(indexes[selection])
lambda_sar_Bo16 = n.array([ cdfs_interpolations[indexes[ii]](randomX[ii]) for ii in range(Nhalo) ])
# obscuration Merloni et al. 2015 as a function of luminosity
# randomObscuration = n.random.rand(Nhalo)
# obscured = (xr.obscured_fraction_optical_Merloni2015(lambda_sar_Bo16 + stellar_mass) < randomObscuration )
# obscuration Buchner et al. 2015 + 2017
# add the log NH of the logNH_host
# 35 % have a thick obscuration 24 - 26
# 65 % have a thin obscuration that depends on stellar mass and Xray luminosity
logNH = random.uniform(20, 22,Nhalo)
obs_type = n.zeros(Nhalo)
# 35% of thick, 24-26
randomNH = n.random.rand(Nhalo)
thick_obscuration = (randomNH < 0.35)
thin_obscuration = (randomNH >= 0.35)
logNH[thick_obscuration] = random.uniform(24, 26, len(logNH[thick_obscuration]))
obs_type[thick_obscuration] = n.ones_like(logNH[thick_obscuration])*2
# the thin : about 40 % are thin whatever happens: 22-24
logNH_host_mean = 21.7 + (stellar_mass - 9.5)*0.38
logNH_host = random.normal(logNH_host_mean, 0.5)
logNH[(thin_obscuration)&(logNH_host>=22)] = random.uniform(22, 24, len(logNH[(thin_obscuration)&(logNH_host>=22)]))
obs_type[(thin_obscuration)&(logNH_host>=22)] = n.ones_like(logNH[(thin_obscuration)&(logNH_host>=22)])
# a few more are thin depending on their Xray luminosity: 22-24
rest = (thin_obscuration)&(logNH_host<22)
randomNH2 = n.random.rand(Nhalo)
rest_obscured = (rest)&(randomNH2 < obscured_fraction_interpolated(lambda_sar_Bo16 + stellar_mass))
logNH[(rest_obscured)] = random.uniform(22, 24, len(logNH[(rest_obscured)]))
obs_type[(rest_obscured)] = n.ones_like(logNH[(rest_obscured)])
# the rest has: 20-22 by default
print logNH, obs_type
# columns related to Xray AGN
col1 = fits.Column(name='lambda_sar_Bo16',format='D', array = lambda_sar_Bo16 )
col2 = fits.Column(name='activity',format='L', array = active_gn )
col3 = fits.Column(name='obscuration_type_Buchner2017',format='K', array = obs_type.astype('int') )
#col4 = fits.Column(name='M_BH' ,format='D', array =logmbh(hd[1].data['M200c'])))
col4 = fits.Column(name='log_NH_Buchner2017' ,format='D', array = logNH)
# columns related to clusters
col5 = fits.Column(name='Mgas_cluster' ,format='D', array =n.log10(cl.logM500_to_logMgas(hd[1].data['M500c'], z)))
col6 = fits.Column(name='kT_cluster' ,format='D', unit='keV', array =cl.logM500_to_kT(hd[1].data['M500c'], z))
col7 = fits.Column(name='Lx_bol_cluster',format='D', array =n.log10(cl.logM500_to_L(hd[1].data['M500c'], z)))
col8 = fits.Column(name='Lx_ce_cluster' ,format='D', array =n.log10(cl.logM500_to_Lce(hd[1].data['M500c'], z)))
#define the table hdu
colArray = [col1]
#for col in hd[1].columns :
#colArray.append(col)
# AGN Mvir cols
colArray.append(col2)
colArray.append(col3)
# Clusters columns
colArray.append(col4)
colArray.append(col5)
colArray.append(col6)
colArray.append(col7)
colArray.append(col8)
hdu_cols = fits.ColDefs(colArray)
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
if os.path.isfile(outFile):
os.system("rm "+outFile)
thdulist.writeto(outFile)
print time.time()-t0
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
for el in summ[:9]:
print el
fileList_snap = n.array(glob.glob(os.path.join(os.environ["MD10"], 'work_agn', 'out_'+el['snap_name']+'_SAM_Nb_?.fits')))
fileList_snap.sort()
print fileList_snap
create_catalogs_out(fileList_snap, el['redshift'], el['snap_name'])
h5_files = n.array(glob.glob(os.path.join(os.environ['MD10'], "h5", "hlist_?.?????_emerge.hdf5")))
h5_files.sort()
bins = n.arange(6,13,0.1)
xb = (bins[1:] + bins[:-1]) / 2.
def measureSMF(h5_file, volume=1000.**3., update=True):
f1 = h5py.File(h5_file, "r+")
mass = f1['/emerge_data/stellar_mass'].value
print( h5_file, len(mass) )
if len(mass)>0:
counts, bb = n.histogram(n.log10(mass[mass>0]), bins=bins)
dN_dVdlogM = counts*0.6777**3./(bins[1:]-bins[:-1])/volume/n.log(10)
if update:
print('updates')
#print(f1['/stellar_mass_function/stellar_mass_low'].value)
f1['/stellar_mass_function/stellar_mass_low'][:] = bins[:-1]
f1['/stellar_mass_function/stellar_mass_up'][:] = bins[1:]
f1['/stellar_mass_function/counts'][:] = counts
f1['/stellar_mass_function/dN_dVdlogM'][:] = dN_dVdlogM
else:
print('creates')
stellar_mass_function_data = f1.create_group('stellar_mass_function')
ds = stellar_mass_function_data.create_dataset('stellar_mass_low', data = bins[:-1] )
ds.attrs['units'] = r'$M_\odot$'
ds.attrs['long_name'] = r'$M_\odot$'
ds = stellar_mass_function_data.create_dataset('stellar_mass_up', data = bins[1:] )
ds.attrs['units'] = r'$M_\odot$'
ds.attrs['long_name'] = r'$M_\odot$'
ds = stellar_mass_function_data.create_dataset('dN_dVdlogM', data = dN_dVdlogM )
ds.attrs['units'] = r'$ Mpc^{-3} dex^{-1}$'
ds.attrs['long_name'] = r'$dN / (dV/, dlogM) $'
ds = stellar_mass_function_data.create_dataset('counts', data = counts )
ds.attrs['units'] = r'count'
ds.attrs['long_name'] = r'galaxy counts'
f1.close()
for h5_file in h5_files:
try:
measureSMF(h5_file)
except( ValueError ):
pass
|
|
# -*- coding: utf-8 -*-
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
from argparse import ArgumentParser
import django
from django.core import checks
from django.core.management.color import color_style, no_style
from django.db import connections
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options.get('no_color'):
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if self.requires_system_checks and not options.get('skip_checks'):
self.check()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
|
|
"""HTML5 Push Messaging notification service."""
import datetime
from functools import partial
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
HTTP_BAD_REQUEST, HTTP_INTERNAL_SERVER_ERROR, HTTP_UNAUTHORIZED, URL_ROOT)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
from homeassistant.util.json import load_json, save_json
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, DOMAIN,
PLATFORM_SCHEMA, BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = 'html5_push_registrations.conf'
SERVICE_DISMISS = 'html5_dismiss'
ATTR_GCM_SENDER_ID = 'gcm_sender_id'
ATTR_GCM_API_KEY = 'gcm_api_key'
ATTR_VAPID_PUB_KEY = 'vapid_pub_key'
ATTR_VAPID_PRV_KEY = 'vapid_prv_key'
ATTR_VAPID_EMAIL = 'vapid_email'
def gcm_api_deprecated(value):
"""Warn user that GCM API config is deprecated."""
if not value:
return value
_LOGGER.warning(
"Configuring html5_push_notifications via the GCM api"
" has been deprecated and will stop working after April 11,"
" 2019. Use the VAPID configuration instead. For instructions,"
" see https://www.home-assistant.io/components/notify.html5/")
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(ATTR_GCM_SENDER_ID):
vol.All(cv.string, gcm_api_deprecated),
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
})
ATTR_SUBSCRIPTION = 'subscription'
ATTR_BROWSER = 'browser'
ATTR_NAME = 'name'
ATTR_ENDPOINT = 'endpoint'
ATTR_KEYS = 'keys'
ATTR_AUTH = 'auth'
ATTR_P256DH = 'p256dh'
ATTR_EXPIRATIONTIME = 'expirationTime'
ATTR_TAG = 'tag'
ATTR_ACTION = 'action'
ATTR_ACTIONS = 'actions'
ATTR_TYPE = 'type'
ATTR_URL = 'url'
ATTR_DISMISS = 'dismiss'
ATTR_JWT = 'jwt'
WS_TYPE_APPKEY = 'notify/html5/appkey'
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_APPKEY
})
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict, vol.Schema({
vol.Required(ATTR_AUTH): cv.string,
vol.Required(ATTR_P256DH): cv.string,
})
)
SUBSCRIPTION_SCHEMA = vol.All(
dict, vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
})
)
DISMISS_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
})
REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(['chrome', 'firefox']),
vol.Optional(ATTR_NAME): cv.string
})
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(['received', 'clicked', 'closed']),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
})
NOTIFY_CALLBACK_EVENT = 'html5_notification'
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
'actions', 'badge', 'body', 'dir', 'icon', 'image', 'lang',
'renotify', 'requireInteraction', 'tag', 'timestamp', 'vibrate')
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(
websocket_api.result_message(msg['id'], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(
HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(
ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations,
json_path)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = '/api/notify.html5'
name = 'api:notify.html5'
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(
humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
return self.json_message(
'Push notification subscriber registered.')
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or 'unnamed device',
self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message('Registration not found.')
reg = self.registrations.pop(found)
try:
hass = request.app['hass']
await hass.async_add_job(save_json, self.json_path,
self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
'Error saving registration.', HTTP_INTERNAL_SERVER_ERROR)
return self.json_message('Push notification subscriber unregistered.')
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = '/api/notify.html5/callback'
name = 'api:notify.html5/callback'
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message('No target found in JWT',
status_code=HTTP_UNAUTHORIZED)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message('Authorization header is expected',
status_code=HTTP_UNAUTHORIZED)
parts = auth.split()
if parts[0].lower() != 'bearer':
return self.json_message('Authorization header must '
'start with Bearer',
status_code=HTTP_UNAUTHORIZED)
if len(parts) != 2:
return self.json_message('Authorization header must '
'be Bearer token',
status_code=HTTP_UNAUTHORIZED)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message('token is invalid',
status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning("Callback event payload is not valid: %s",
humanize_error(event_payload, ex))
event_name = '{}.{}'.format(NOTIFY_CALLBACK_EVENT,
event_payload[ATTR_TYPE])
request.app['hass'].bus.fire(event_name, event_payload)
return self.json({'status': 'ok', 'event': event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations,
json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_claims = {"sub": "mailto:{}".format(vapid_email)}
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {
ATTR_TAG: tag,
ATTR_DISMISS: True,
ATTR_DATA: {}
}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
'badge': '/static/images/notification-badge.png',
'body': message,
ATTR_DATA: {},
'icon': '/static/icons/favicon-192x192.png',
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (payload[ATTR_DATA].get(ATTR_URL) is None and
payload.get(ATTR_ACTIONS) is None):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
import jwt
from pywebpush import WebPusher, webpush
timestamp = int(time.time())
payload['timestamp'] = (timestamp*1000) # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
if info is None:
_LOGGER.error("%s is not a valid HTML5 push notification"
" target", target)
continue
jwt_exp = (datetime.datetime.fromtimestamp(timestamp) +
datetime.timedelta(days=JWT_VALID_DAYS))
jwt_secret = info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
jwt_claims = {'exp': jwt_exp, 'nbf': timestamp,
'iat': timestamp, ATTR_TARGET: target,
ATTR_TAG: payload[ATTR_TAG]}
jwt_token = jwt.encode(jwt_claims, jwt_secret).decode('utf-8')
payload[ATTR_DATA][ATTR_JWT] = jwt_token
if self._vapid_prv and self._vapid_claims:
response = webpush(
info[ATTR_SUBSCRIPTION],
json.dumps(payload),
vapid_private_key=self._vapid_prv,
vapid_claims=self._vapid_claims
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = self._gcm_key \
if 'googleapis.com' \
in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT] \
else None
response = WebPusher(info[ATTR_SUBSCRIPTION]).send(
json.dumps(payload), gcm_key=gcm_key, ttl='86400'
)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path,
self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
|
|
"""JSON Tree Library
"""
import collections
import datetime
import json
import json.scanner
import re
import sys
__version__ = (0,5,1)
__version_string__ = '.'.join(str(x) for x in __version__)
__author__ = 'Doug Napoleone'
__email__ = '[email protected]'
if sys.version_info.major > 2 :
basestring = str
# ISO/UTC date examples:
# 2013-04-29T22:45:35.294303Z
# 2013-04-29T22:45:35.294303
# 2013-04-29 22:45:35
# 2013-04-29T22:45:35.4361-0400
# 2013-04-29T22:45:35.4361-04:00
_datetime_iso_re = re.compile(
r'^(?P<parsable>\d{4}-\d{2}-\d{2}(?P<T>[ T])\d{2}:\d{2}:\d{2}'
r'(?P<f>\.\d{1,7})?)(?P<z>[-+]\d{2}\:?\d{2})?(?P<Z>Z)?')
_date = "%Y-%m-%d"
_time = "%H:%M:%S"
_f = '.%f'
_z = '%z'
class _FixedTzOffset(datetime.tzinfo):
def __init__(self, offset_str):
hours = int(offset_str[1:3], 10)
mins = int(offset_str[-2:], 10)
if offset_str[0] == '-':
hours = -hours
mins = -mins
self.__offset = datetime.timedelta(hours=hours,
minutes=mins)
self.__dst = datetime.timedelta(hours=hours-1,
minutes=mins)
self.__name = ''
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dst
def _datetimedecoder(dtstr):
match = _datetime_iso_re.match(dtstr)
if match:
gd = match.groupdict()
T = gd['T']
strptime = _date + T + _time
if gd['f']:
strptime += '.%f'
if gd['Z']:
strptime += 'Z'
try:
result = datetime.datetime.strptime(gd['parsable'], strptime)
if gd['z']:
result = result.replace(tzinfo=_FixedTzOffset(gd['z']))
return result
except ValueError:
return dtstr
return dtstr
def _datetimeencoder(dtobj):
return dtobj.isoformat()
class jsontree(collections.defaultdict):
"""Default dictionary where keys can be accessed as attributes and
new entries recursively default to be this class. This means the following
code is valid:
>>> mytree = jsontree()
>>> mytree.something.there = 3
>>> mytree['something']['there'] == 3
True
"""
def __init__(self, *args, **kwdargs):
super(jsontree, self).__init__(jsontree, *args, **kwdargs)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return self[name]
def __setattr__(self, name, value):
self[name] = value
return value
def mapped_jsontree_class(mapping):
"""Return a class which is a jsontree, but with a supplied attribute name
mapping. The mapping argument can be a mapping object
(dict, jsontree, etc.) or it can be a callable which takes a single
argument (the attribute name), and returns a new name.
This is useful in situations where you have a jsontree with keys that are
not valid python attribute names, to simplify communication with a client
library, or allow for configurable names.
For example:
>>> numjt = mapped_jsontree_class(dict(one='1', two='2', three='3'))
>>> number = numjt()
>>> number.one = 'something'
>>> dict(number)
{'1': 'something'}
This is very useful for abstracting field names that may change between
a development sandbox and production environment. Both FogBugz and Jira
bug trackers have custom fields with dynamically generated values. These
field names can be abstracted out into a configruation mapping, and the
jsontree code can be standardized.
This can also be iseful for JavaScript API's (PHPCake) which insist on
having spaces in some key names. A function can be supplied which maps
all '_'s in the attribute name to spaces:
>>> spacify = lambda name: name.replace('_', ' ')
>>> spacemapped = mapped_jsontree_class(spacify)
>>> sm = spacemapped()
>>> sm.hello_there = 5
>>> sm.hello_there
5
>>> list(sm.keys())
['hello there']
This will also work with non-string keys for translating from libraries
that use object keys in python over to string versions of the keys in JSON
>>> numjt = mapped_jsontree_class(dict(one=1, two=2))
>>> number = numjt()
>>> number.one = 'something'
>>> dict(number)
{1: 'something'}
>>> numjt_as_text = mapped_jsontree_class(dict(one='1', two='2'))
>>> dumped_number = dumps(number)
>>> loaded_number = loads(dumped_number, jsontreecls=numjt_as_text)
>>> str(loaded_number.one)
'something'
>>> repr(dict(loaded_number)).replace('u', '') # cheat the python2 tests
"{'1': 'something'}"
"""
mapper = mapping
if not callable(mapping):
if not isinstance(mapping, collections.Mapping):
raise TypeError("Argument mapping is not collable or an instance "
"of collections.Mapping")
mapper = lambda name: mapping.get(name, name)
class mapped_jsontree(collections.defaultdict):
def __init__(self, *args, **kwdargs):
super(mapped_jsontree, self).__init__(mapped_jsontree, *args, **kwdargs)
def __getattribute__(self, name):
mapped_name = mapper(name)
if not isinstance(mapped_name, basestring):
return self[mapped_name]
try:
return object.__getattribute__(self, mapped_name)
except AttributeError:
return self[mapped_name]
def __setattr__(self, name, value):
mapped_name = mapper(name)
self[mapped_name] = value
return value
return mapped_jsontree
def mapped_jsontree(mapping, *args, **kwdargs):
"""Helper function that calls mapped_jsontree_class, and passing the
rest of the arguments to the constructor of the new class.
>>> number = mapped_jsontree(dict(one='1', two='2', three='3', four='4'),
... {'1': 'something', '2': 'hello'})
>>> number.two
'hello'
>>> list(number.items())
[('1', 'something'), ('2', 'hello')]
"""
return mapped_jsontree_class(mapping)(*args, **kwdargs)
class JSONTreeEncoder(json.JSONEncoder):
"""JSON encoder class that serializes out jsontree object structures and
datetime objects into ISO strings.
"""
def __init__(self, *args, **kwdargs):
datetimeencoder = _datetimeencoder
if 'datetimeencoder' in kwdargs:
datetimeencoder = kwdargs.pop('datetimeencoder')
super(JSONTreeEncoder, self).__init__(*args, **kwdargs)
self.__datetimeencoder = datetimeencoder
def default(self, obj):
if isinstance(obj, datetime.datetime):
return self.__datetimeencoder(obj)
else:
return super(JSONTreeEncoder, self).default(obj)
class JSONTreeDecoder(json.JSONDecoder):
"""JSON decoder class for deserializing to a jsontree object structure
and building datetime objects from strings with the ISO datetime format.
"""
def __init__(self, *args, **kwdargs):
jsontreecls = jsontree
datetimedecoder = _datetimedecoder
if 'jsontreecls' in kwdargs:
jsontreecls = kwdargs.pop('jsontreecls')
if 'datetimedecoder' in kwdargs:
datetimedecoder = kwdargs.pop('datetimedecoder')
super(JSONTreeDecoder, self).__init__(*args, **kwdargs)
self.__parse_object = self.parse_object
self.__parse_string = self.parse_string
self.parse_object = self._parse_object
self.parse_string = self._parse_string
self.scan_once = json.scanner.py_make_scanner(self)
self.__jsontreecls = jsontreecls
self.__datetimedecoder = datetimedecoder
def _parse_object(self, *args, **kwdargs):
result = self.__parse_object(*args, **kwdargs)
return self.__jsontreecls(result[0]), result[1]
def _parse_string(self, *args, **kwdargs):
value, idx = self.__parse_string(*args, **kwdargs)
value = self.__datetimedecoder(value)
return value, idx
def clone(root, jsontreecls=jsontree, datetimeencoder=_datetimeencoder,
datetimedecoder=_datetimedecoder):
"""Clone an object by first searializing out and then loading it back in.
"""
return json.loads(json.dumps(root, cls=JSONTreeEncoder,
datetimeencoder=datetimeencoder),
cls=JSONTreeDecoder, jsontreecls=jsontreecls,
datetimedecoder=datetimedecoder)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=JSONTreeEncoder, indent=None, separators=None,
encoding="utf-8", default=None, sort_keys=False, **kargs):
"""JSON serialize to file function that defaults the encoding class to be
JSONTreeEncoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
if sys.version_info.major > 2 :
kargs['sort_keys'] = sort_keys
kargs['default'] = default
return json.dump(obj, fp, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan,
cls=cls, indent=indent, separators=separators, **kargs)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=JSONTreeEncoder, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kargs):
"""JSON serialize to string function that defaults the encoding class to be
JSONTreeEncoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
if sys.version_info.major > 2 :
kargs['sort_keys'] = sort_keys
kargs['default'] = default
return json.dumps(obj, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan,
cls=cls, indent=indent, separators=separators, **kargs)
def load(fp, encoding=None, cls=JSONTreeDecoder, object_hook=None,
parse_float=None, parse_int=None, parse_constant=None,
object_pairs_hook=None, **kargs):
"""JSON load from file function that defaults the loading class to be
JSONTreeDecoder
"""
if sys.version_info.major == 2:
kargs['encoding'] = encoding
return json.load(fp, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant,
object_pairs_hook=object_pairs_hook, **kargs)
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None,
parse_float=None, parse_int=None, parse_constant=None,
object_pairs_hook=None, **kargs):
"""JSON load from string function that defaults the loading class to be
JSONTreeDecoder
"""
return json.loads(s, encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant,
object_pairs_hook=object_pairs_hook, **kargs)
|
|
"""
Abstract base class for 1-D plots which only use one axis
"""
from __future__ import absolute_import
# Standard library imports
from numpy import argsort, asarray
# Enthought library imports
from traits.api import (Any, Bool, Enum, Instance, Property, cached_property,
on_trait_change)
# local imports
from .abstract_plot_renderer import AbstractPlotRenderer
from .abstract_mapper import AbstractMapper
from .array_data_source import ArrayDataSource
from .base import reverse_map_1d
class Base1DPlot(AbstractPlotRenderer):
""" Base class for one-dimensional plots
This class provides a base for plots such as jitter plots, color bars,
single-axis scatter plots, and geophysical horizon and tops plots.
"""
#: The data source of values
index = Instance(ArrayDataSource)
#: Screen mapper for index data.
index_mapper = Instance(AbstractMapper)
#: Corresponds to either **index_mapper** or None, depending on
#: the orientation of the plot.
x_mapper = Property(depends_on=['orientation', 'index_mapper'])
#: Corresponds to either **index_mapper** or None, depending on
#: the orientation of the plot.
y_mapper = Property(depends_on=['orientation', 'index_mapper'])
#: The orientation of the index axis.
orientation = Enum('v', 'h')
#: Should the plot go left-to-right or bottom-to-top (normal) or the reverse?
direction = Enum('normal', 'flipped')
#: Faux origin for the axes and other objects to look at
origin = Property(
Enum('bottom left', 'top left', 'bottom right', 'top right'),
depends_on=['orientation', 'direction']
)
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
#: flag whether the data cache is valid
_cache_valid = Bool(False)
#: cache of the index values in data space
_cached_data = Any()
#: cache of the sorted index values in data space
_cached_data_pts_sorted = Any()
#: cache of the sorted indices of the index values
_cached_data_argsort = Any()
#: flag whether the screen coordinates are valid
_screen_cache_valid = Bool(False)
#: cache holding the screen coordinates of the index values
_cached_screen_pts = Any()
#------------------------------------------------------------------------
# AbstractPlotRenderer interface
#------------------------------------------------------------------------
def map_screen(self, data_array):
""" Maps a 1D array of data points into screen space and returns it as
a 1D array.
Parameters
----------
data_array : 1D array
An array of data-space values to be mapped to screen coordinates.
Returns
-------
screen_array : 1D array
An array of points in screen space, either x-values (if
orientation is 'h') or y-values (if orientation is 'v').
Notes
-----
Returning a 1D array is experimental, and may break some tools and
overlays. If needs be we can refactor so that it returns a 2D array.
"""
# data_array is 1D array of length N
if len(data_array) == 0:
return []
return asarray(self.index_mapper.map_screen(data_array))
def map_data(self, screen_pts):
""" Maps 2D screen space points into the 1D index space of the plot.
Parameters
----------
screen_pts : tuple of x-array, y-array
2 arrays (or values) screen space coordinates.
Returns
-------
data_array : 1D array
An array of points in data space corresponding to the screen-space
points.
"""
x, y = screen_pts
if self.orientation == "v":
return asarray(self.index_mapper.map_data(y))
else:
return asarray(self.index_mapper.map_data(x))
def map_index(self, screen_pt, threshold=2.0, outside_returns_none=True,
index_only=True):
""" Maps a screen space point to an index into the plot's index array.
Parameters
----------
screen_pts: tuple of x-array, y-array
2 arrays (or values) screen space coordinates.
threshold : float
Optional screen-space distance allowed between *screen_pt* and the
plot; if non-zero, then a *screen_pt* within this distance is
mapped to the neared plot index. (This feature is useful for sparse
data.)
outside_returns_none : Boolean
If True, then if *screen_pt* is outside the range of the data, the
method returns None. If False, it returns the nearest end index in
such a case.
index_only : Boolean
This is included for compatibity with the base class, but is
ignored, as it is always true for 1D plots.
Returns
-------
index : int
An index into the index array. If the input point cannot be mapped
to an index, then None is returned.
If *screen_pt* corresponds to multiple indices, then only the first
index is returned.
"""
data_pt = self.map_data(screen_pt)
if ((data_pt < self.index_mapper.range.low) or \
(data_pt > self.index_mapper.range.high)) and \
outside_returns_none:
return None
if self._cached_data_pts_sorted is None:
self._cached_data_argsort = argsort(self._cached_data)
self._cached_data_pts_sorted = self._cached_data[self._cached_data_argsort]
# XXX better to just use argmin(abs(data - data_pt))?
data = self._cached_data_pts_sorted
try:
ndx = reverse_map_1d(data, data_pt, "ascending")
except IndexError:
if outside_returns_none:
return None
else:
if data_pt < data[0]:
return 0
else:
return len(data) - 1
orig_ndx = self._cached_data_argsort[ndx]
if threshold == 0.0:
return orig_ndx
screen_points = self._cached_screen_pts
x = screen_points[orig_ndx]
if self.orientation == 'v':
x0 = screen_pt[1]
else:
x0 = screen_pt[0]
if abs(x - x0) <= threshold:
return orig_ndx
else:
return None
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _compute_screen_coord(self):
""" Compute the screen coordinates of the index values """
if not self._screen_cache_valid:
self._gather_points()
pts = self.map_screen(self._cached_data)
self._cached_screen_pts = pts
self._screen_cache_valid = True
self._cached_data_pts_sorted = None
self._cached_data_argsort = None
return self._cached_screen_pts
def _gather_points(self):
""" Ensure that data cache is valid """
if self._cache_valid:
return
if not self.index:
return
index, index_mask = self.index.get_data_mask()
if len(index) == 0:
self._cached_data = []
self._cache_valid = True
return
self._cached_data = index
self._cache_valid = True
self._cached_screen_points = None
self._screen_cached_valid = False
def _update_mappers(self):
""" Update the mapper when the bounds, orientation or direction change
"""
mapper = self.index_mapper
if mapper is None:
return
x = self.x
x2 = self.x2
y = self.y
y2 = self.y2
if self.orientation == 'h':
if self.direction == 'normal':
mapper.screen_bounds = (x, x2)
elif self.direction == 'flipped':
mapper.screen_bounds = (x2, x)
elif self.orientation == 'v':
if self.direction == 'normal':
mapper.screen_bounds = (y, y2)
elif self.direction == 'flipped':
mapper.screen_bounds = (y2, y)
self.invalidate_draw()
self._cache_valid = False
self._screen_cache_valid = False
#------------------------------------------------------------------------
# Property setters and getters
#------------------------------------------------------------------------
@cached_property
def _get_x_mapper(self):
if self.orientation == "h":
return self.index_mapper
else:
return None
@cached_property
def _get_y_mapper(self):
if self.orientation == "h":
return None
else:
return self.index_mapper
@cached_property
def _get_origin(self):
if self.orientation == 'h':
if self.direction == 'normal':
return 'bottom left'
else:
return 'bottom right'
else:
if self.direction == 'normal':
return 'bottom left'
else:
return 'top left'
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
@on_trait_change("index.data_changed")
def _invalidate(self):
self._cache_valid = False
self._screen_cache_valid = False
@on_trait_change("index_mapper.updated")
def _invalidate_screen(self):
self._screen_cache_valid = False
def _bounds_changed(self, old, new):
super(Base1DPlot, self)._bounds_changed(old, new)
self._update_mappers()
def _bounds_items_changed(self, event):
super(Base1DPlot, self)._bounds_items_changed(event)
self._update_mappers()
def _position_changed(self, old, new):
super(Base1DPlot, self)._position_changed(old, new)
self._update_mappers()
def _position_items_changed(self, event):
super(Base1DPlot, self)._position_items_changed(event)
self._update_mappers()
def _updated_changed_for_index_mapper(self):
self._update_mappers()
def _orientation_changed(self):
self._update_mappers()
def _direction_changed(self):
self._update_mappers()
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv6,
IP_DSCP_CS5,
IP_PROTO_TCP)
def of_demo_19():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 19 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Type
# IPv6 Source Address
# IPv6 Destination Address
# IP DSCP
# TCP Source Port
# TCP Destination Port
eth_type = ETH_TYPE_IPv6
ipv6_src = "4231::3210:3210:3210:3210/80"
ipv6_dst = "1234:1234:1234:1234::5678:5678/64"
ipv6_flabel = 33
ip_dscp = IP_DSCP_CS5 # 'Class Selector' = 'Critical'
ip_proto = IP_PROTO_TCP
tcp_src_port = 11111
tcp_dst_port = 22222
# --- Flow Actions: Output (CONTROLLER)
output_port = "CONTROLLER"
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" IPv6 Source Address (%s)\n"
" IPv6 Destination Address (%s)\n"
" IPv6 Flow Label (%s)\n"
" IP DSCP (%s)\n"
" TCP Source Port (%s)\n"
" TCP Destination Port (%s)" %
(hex(eth_type), ipv6_src, ipv6_dst, ipv6_flabel,
ip_dscp, tcp_src_port, tcp_dst_port))
print (" Action: Output (to %s)" % (output_port))
time.sleep(rundelay)
flow_entry = FlowEntry()
flow_entry.set_flow_name(flow_name="demo19.py")
table_id = 0
flow_id = 25
flow_entry.set_flow_table_id(table_id)
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_priority(flow_priority=1018)
flow_entry.set_flow_cookie(cookie=23)
flow_entry.set_flow_hard_timeout(hard_timeout=1200)
flow_entry.set_flow_idle_timeout(idle_timeout=3400)
# --- Instruction: 'Apply-actions'
# Actions: 'Output'
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port=output_port)
instruction.add_apply_action(action)
flow_entry .add_instruction(instruction)
# --- Match Fields: Ethernet Type
# IPv6 Source Address
# IPv6 Destination Address
# IPv6 Flow Label
# IP protocol number (TCP)
# IP DSCP
# TCP Source Port
# TCP Destination Port
match = Match()
match.set_eth_type(eth_type)
match.set_ipv6_src(ipv6_src)
match.set_ipv6_dst(ipv6_dst)
match.set_ipv6_flabel(ipv6_flabel)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_tcp_src(tcp_src_port)
match.set_tcp_dst(tcp_dst_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node" %
(flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_19()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""A visitor class that generates protobufs for each python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import enum
from google.protobuf import message
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.api.lib import api_objects_pb2
# Following object need to be handled individually.
_CORNER_CASES = {
'': {
'tools': {}
},
'test.TestCase': {},
'test.TestCase.failureException': {},
'train.NanLossDuringTrainingError': {
'message': {}
},
}
# Python 2 vs. 3 differences
if sys.version_info.major == 3:
_NORMALIZE_TYPE = {}
for t in ('property', 'object', 'getset_descriptor', 'int', 'str', 'type',
'tuple', 'module', 'collections.defaultdict', 'set', 'dict',
'NoneType', 'frozenset'):
_NORMALIZE_TYPE["<class '%s'>" % t] = "<type '%s'>" % t
for e in 'Exception', 'RuntimeError':
_NORMALIZE_TYPE["<class '%s'>" % e] = "<type 'exceptions.%s'>" % e
_NORMALIZE_TYPE["<class 'abc.ABCMeta'>"] = "<type 'type'>"
_NORMALIZE_ISINSTANCE = {
"<class "
"'tensorflow.lite.python.op_hint.OpHint.OpHintArgumentTracker'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.lite.python.op_hint.OpHintArgumentTracker'>",
"<class "
"'tensorflow.python.training.monitored_session._MonitoredSession.StepContext'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.python.training.monitored_session.StepContext'>",
"<class "
"'tensorflow.python.ops.variables.Variable.SaveSliceInfo'>":
"<class "
"'tensorflow.python.ops.variables.SaveSliceInfo'>"
}
def _SkipMember(cls, member):
return (member == 'with_traceback' or member in ('name', 'value') and
isinstance(cls, type) and issubclass(cls, enum.Enum))
else:
_NORMALIZE_TYPE = {"<class 'abc.ABCMeta'>": "<type 'type'>"}
_NORMALIZE_ISINSTANCE = {}
def _SkipMember(cls, member): # pylint: disable=unused-argument
return False
def _NormalizeType(ty):
return _NORMALIZE_TYPE.get(ty, ty)
def _NormalizeIsInstance(ty):
return _NORMALIZE_ISINSTANCE.get(ty, ty)
def _SanitizedArgSpec(obj):
"""Get an ArgSpec string that is free of addresses.
We have callables as function arg defaults. This results in addresses in
getargspec output. This function returns a sanitized string list of base
classes.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
string, a string representation of the argspec.
"""
output_string = ''
unsanitized_arg_spec = tf_inspect.getargspec(obj)
for clean_attr in ('args', 'varargs', 'keywords'):
output_string += '%s=%s, ' % (clean_attr,
getattr(unsanitized_arg_spec, clean_attr))
if unsanitized_arg_spec.defaults:
sanitized_defaults = []
for val in unsanitized_arg_spec.defaults:
str_val = str(val)
# Sanitize argspecs that have hex code in them.
if ' at 0x' in str_val:
sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])
else:
sanitized_defaults.append(str_val)
output_string += 'defaults=%s, ' % sanitized_defaults
else:
output_string += 'defaults=None'
return output_string
def _SanitizedMRO(obj):
"""Get a list of superclasses with minimal amount of non-TF classes.
Based on many parameters like python version, OS, protobuf implementation
or changes in google core libraries the list of superclasses of a class
can change. We only return the first non-TF class to be robust to non API
affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`
is still maintained in the return value.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
list of strings, string representation of the class names.
"""
return_list = []
for cls in tf_inspect.getmro(obj):
if cls.__name__ == '_NewClass':
# Ignore class created by @deprecated_alias decorator.
continue
str_repr = _NormalizeType(str(cls))
return_list.append(str_repr)
if 'tensorflow' not in str_repr:
break
# Hack - tensorflow.test.StubOutForTesting may or may not be type <object>
# depending on the environment. To avoid inconsistency, break after we add
# StubOutForTesting to the return_list.
if 'StubOutForTesting' in str_repr:
break
return return_list
def _IsProtoClass(obj):
"""Returns whether the passed obj is a Protocol Buffer class."""
return isinstance(obj, type) and issubclass(obj, message.Message)
class PythonObjectToProtoVisitor(object):
"""A visitor that summarizes given python objects as protobufs."""
def __init__(self):
# A dict to store all protocol buffers.
# Keyed by "path" to the object.
self._protos = {}
def GetProtos(self):
"""Return the list of protos stored."""
return self._protos
def __call__(self, path, parent, children):
# The path to the object.
lib_path = 'tensorflow.%s' % path if path else 'tensorflow'
# A small helper method to construct members(children) protos.
def _AddMember(member_name, member_obj, proto):
"""Add the child object to the object being constructed."""
_, member_obj = tf_decorator.unwrap(member_obj)
if _SkipMember(parent, member_name):
return
if member_name == '__init__' or not member_name.startswith('_'):
if tf_inspect.isroutine(member_obj):
new_method = proto.member_method.add()
new_method.name = member_name
# If member_obj is a python builtin, there is no way to get its
# argspec, because it is implemented on the C side. It also has no
# func_code.
if hasattr(member_obj, '__code__'):
new_method.argspec = _SanitizedArgSpec(member_obj)
else:
new_member = proto.member.add()
new_member.name = member_name
new_member.mtype = _NormalizeType(str(type(member_obj)))
parent_corner_cases = _CORNER_CASES.get(path, {})
if path not in _CORNER_CASES or parent_corner_cases:
# Decide if we have a module or a class.
if tf_inspect.ismodule(parent):
# Create a module object.
module_obj = api_objects_pb2.TFAPIModule()
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
module_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, module_obj)
# Store the constructed module object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_module=module_obj)
elif _IsProtoClass(parent):
proto_obj = api_objects_pb2.TFAPIProto()
parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)
# Store the constructed proto object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_proto=proto_obj)
elif tf_inspect.isclass(parent):
# Construct a class.
class_obj = api_objects_pb2.TFAPIClass()
class_obj.is_instance.extend(
_NormalizeIsInstance(i) for i in _SanitizedMRO(parent))
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
class_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, class_obj)
# Store the constructed class object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_class=class_obj)
else:
logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'
'Object is neither a module nor a class: %s', path)
|
|
from sympy import Symbol, symbols, hypersimp, factorial, binomial, \
collect, Function, powsimp, separate, sin, exp, Rational, fraction, \
simplify, trigsimp, cos, tan, cot, log, ratsimp, Matrix, pi, integrate, \
solve, nsimplify, GoldenRatio, sqrt, E, I, sympify, atan, Derivative, \
S, diff, oo, Eq, Integer, gamma, acos, Integral, logcombine, Wild, \
separatevars, erf, rcollect, count_ops
from sympy.utilities import all
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z, t, a, b, c, d, e
def test_ratsimp():
f, g = 1/x + 1/y, (x + y)/(x*y)
assert f != g and ratsimp(f) == g
f, g = 1/(1 + 1/x), 1 - 1/(x + 1)
assert f != g and ratsimp(f) == g
f, g = x/(x + y) + y/(x + y), 1
assert f != g and ratsimp(f) == g
f, g = -x - y - y**2/(x + y) + x**2/(x + y), -2*y
assert f != g and ratsimp(f) == g
f = (a*c*x*y + a*c*z - b*d*x*y - b*d*z - b*t*x*y - b*t*x - b*t*z + e*x)/(x*y + z)
G = [a*c - b*d - b*t + (-b*t*x + e*x)/(x*y + z),
a*c - b*d - b*t - ( b*t*x - e*x)/(x*y + z)]
assert f != g and ratsimp(f) in G
A = sqrt(pi)
B = log(erf(x) - 1)
C = log(erf(x) + 1)
D = 8 - 8*erf(x)
f = A*B/D - A*C/D + A*C*erf(x)/D - A*B*erf(x)/D + 2*A/D
assert ratsimp(f) == A*B/8 - A*C/8 + A/(4 - 4*erf(x))
def test_trigsimp1():
x, y = symbols('x,y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == cot(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) in \
[2 + 3*cos(x/2)**2, 5 - 3*sin(x/2)**2]
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e), deep=True) == log(2)
def test_trigsimp2():
x, y = symbols('x,y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
def test_issue1274():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2+2.0*cos(x)**2)-2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x,y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
@XFAIL
def test_factorial_simplify():
# There are more tests in test_factorials.py. These are just to
# ensure that simplify() calls factorial_simplify correctly
from sympy.specfun.factorials import factorial
x = Symbol('x')
assert simplify(factorial(x)/x) == factorial(x-1)
assert simplify(factorial(factorial(x))) == factorial(factorial(x))
def test_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('x,y,z,k,n,m,w,f,s,A')
assert all(simplify(tmp) == tmp for tmp in [I, E, oo, x, -x, -oo, -E, -I])
e = 1/x + 1/y
assert e != (x+y)/(x*y)
assert simplify(e) == (x+y)/(x*y)
e = A**2*s**4/(4*pi*k*m**3)
assert simplify(e) == e
e = (4+4*x-2*(2+2*x))/(2+2*x)
assert simplify(e) == 0
e = (-4*x*y**2-2*y**3-2*x**2*y)/(x+y)**2
assert simplify(e) == -2*y
e = -x-y-(x+y)**(-1)*y**2+(x+y)**(-1)*x**2
assert simplify(e) == -2*y
e = (x+x*y)/x
assert simplify(e) == 1 + y
e = (f(x)+y*f(x))/f(x)
assert simplify(e) == 1 + y
e = (2 * (1/n - cos(n * pi)/n))/pi
assert simplify(e) == 2*((1 - 1*cos(pi*n))/(pi*n))
e = integrate(1/(x**3+1), x).diff(x)
assert simplify(e) == 1/(x**3+1)
e = integrate(x/(x**2+3*x+1), x).diff(x)
assert simplify(e) == x/(x**2+3*x+1)
A = Matrix([[2*k-m*w**2, -k], [-k, k-m*w**2]]).inv()
assert simplify((A*Matrix([0,f]))[1]) == \
(f*(2*k - m*w**2))/(k**2 - 3*k*m*w**2 + m**2*w**4)
a, b, c, d, e, f, g, h, i = symbols('a,b,c,d,e,f,g,h,i')
f_1 = x*a + y*b + z*c - 1
f_2 = x*d + y*e + z*f - 1
f_3 = x*g + y*h + z*i - 1
solutions = solve([f_1, f_2, f_3], x, y, z, simplified=False)
assert simplify(solutions[y]) == \
(a*i+c*d+f*g-a*f-c*g-d*i)/(a*e*i+b*f*g+c*d*h-a*f*h-b*d*i-c*e*g)
f = -x + y/(z + t) + z*x/(z + t) + z*a/(z + t) + t*x/(z + t)
assert simplify(f) == (y + a*z)/(z + t)
A, B = symbols('A,B', commutative=False)
assert simplify(A*B - B*A) == A*B - B*A
def test_simplify_ratio():
# roots of x**3-3*x+5
roots = ['(5/2 + 21**(1/2)/2)**(1/3)*(1/2 - I*3**(1/2)/2)'
' + 1/((1/2 - I*3**(1/2)/2)*(5/2 + 21**(1/2)/2)**(1/3))',
'(5/2 + 21**(1/2)/2)**(1/3)*(1/2 + I*3**(1/2)/2)'
' + 1/((1/2 + I*3**(1/2)/2)*(5/2 + 21**(1/2)/2)**(1/3))',
'-1/(5/2 + 21**(1/2)/2)**(1/3) - (5/2 + 21**(1/2)/2)**(1/3)']
for r in roots:
r = S(r)
assert count_ops(simplify(r, ratio=1)) <= count_ops(r)
# If ratio=oo, simplify() is always applied:
assert simplify(r, ratio=oo) is not r
def test_simplify_issue_1308():
assert simplify(exp(-Rational(1, 2)) + exp(-Rational(3, 2))) == \
(1 + E)*exp(-Rational(3, 2))
assert simplify(exp(1)+exp(-exp(1))) == (1 + exp(1 + E))*exp(-E)
def test_simplify_fail1():
x = Symbol('x')
y = Symbol('y')
e = (x+y)**2/(-4*x*y**2-2*y**3-2*x**2*y)
assert simplify(e) == 1 / (-2*y)
def test_fraction():
x, y, z = map(Symbol, 'xyz')
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2+1)/y) == (x**2+1, y)
assert fraction(x*(y+1)/y**7) == (x*(y+1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
def test_separate():
x, y, z = symbols('x,y,z')
assert separate((x*y*z)**4) == x**4*y**4*z**4
assert separate((x*y*z)**x) == x**x*y**x*z**x
assert separate((x*(y*z)**2)**3) == x**3*y**6*z**6
assert separate((sin((x*y)**2)*y)**z) == sin((x*y)**2)**z*y**z
assert separate((sin((x*y)**2)*y)**z, deep=True) == sin(x**2*y**2)**z*y**z
assert separate(exp(x)**2) == exp(2*x)
assert separate((exp(x)*exp(y))**2) == exp(2*x)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2) == exp(2*(x*y)**z)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2, deep=True) == exp(2*x**z*y**z)*exp(2*y)
def test_separate_X1():
x, y, z = map(Symbol, 'xyz')
assert separate((exp(x)*exp(y))**z) == exp(x*z)*exp(y*z)
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp( f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep = True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x+y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == exp(x+y)*2**(x+y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x)+sin(y)+2**x*2**y) == exp(2+x+y)*sin(x)+sin(y)+2**(x+y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x+y))
assert powsimp(x**2*x**y) == x**(2+y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == exp(1) + exp(-E)
# This should not change without deep. Otherwise, simplify() will fail.
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + E*exp(E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == (1 + E*exp(E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == (1 + E*exp(E))*exp(-E)
x,y = symbols('x,y', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp( y**n * (y/x)**(-n) ) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)),deep=True) == (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x+y)/(3*z), deep=False, combine='exp') == (x+y)/(3*z)
assert powsimp((x/3+y/3)/z, deep=True, combine='exp') == (x/3+y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
p = symbols('p', positive=True)
assert powsimp((1/x)**log(2)/x) == (1/x)**(1 + log(2))
assert powsimp((1/p)**log(2)/p) == p**(-1 - log(2))
def test_powsimp_nc():
x, y, z = symbols('x,y,z')
A, B, C = symbols('A B C', commutative=False)
assert powsimp(A**x*A**y, combine='all') == A**(x+y)
assert powsimp(A**x*A**y, combine='base') == A**x*A**y
assert powsimp(A**x*A**y, combine='exp') == A**(x+y)
assert powsimp(A**x*B**x, combine='all') == (A*B)**x
assert powsimp(A**x*B**x, combine='base') == (A*B)**x
assert powsimp(A**x*B**x, combine='exp') == A**x*B**x
assert powsimp(B**x*A**x, combine='all') == (B*A)**x
assert powsimp(B**x*A**x, combine='base') == (B*A)**x
assert powsimp(B**x*A**x, combine='exp') == B**x*A**x
assert powsimp(A**x*A**y*A**z, combine='all') == A**(x+y+z)
assert powsimp(A**x*A**y*A**z, combine='base') == A**x*A**y*A**z
assert powsimp(A**x*A**y*A**z, combine='exp') == A**(x+y+z)
assert powsimp(A**x*B**x*C**x, combine='all') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='base') == (A*B*C)**x
assert powsimp(A**x*B**x*C**x, combine='exp') == A**x*B**x*C**x
assert powsimp(B**x*A**x*C**x, combine='all') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='base') == (B*A*C)**x
assert powsimp(B**x*A**x*C**x, combine='exp') == B**x*A**x*C**x
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('x,y,z,n')
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1+y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2+y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2+3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('a,b,x')
assert collect(a*(cos(x)+sin(x)) + b*(cos(x)+sin(x)), sin(x)+cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('a,b,c')
f = Function('f')
x, y, z, n = symbols('x,y,z,n')
assert collect(-x/8 + x*y, -x) == -x*(S.One/8 - y)
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z+a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('a,b,c,x')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == (x**2)**c*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('a,x,y,z,n')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1+ (x+y) + (x+y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx , fx) == (a + b)*D(fx, x)
# 1685
assert collect(5*f(x)+3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) ==\
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
@XFAIL
def collect_issues():
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x)) !=\
(1 + x*D(f(x), x) + D(f(x), x))/f(x)
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fxx = D(f(x), x, x)
# collect does not distinguish nested derivatives, so it returns
# -- (a + b)*D(D(f, x), x)
assert collect(a*fxx + b*fxx , fxx) == (a + b)*fxx
def test_collect_Wild():
"""Collect with respect to functions with Wild argument"""
a, b, x, y = symbols('a b x y')
f = Function('f')
w1 = Wild('.1')
w2 = Wild('.2')
assert collect(f(x) + a*f(x), f(w1)) == (1 + a)*f(x)
assert collect(f(x, y) + a*f(x, y), f(w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w2)) == (1 + a)*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, x) + a*f(x, x), f(w1, w1)) == (1 + a)*f(x, x)
assert collect(a*(x + 1)**y + (x + 1)**y, w1**y) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**b) == a*(x + 1)**y + (x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, (x + 1)**w2) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**w2) == (1 + a)*(x + 1)**y
def test_rcollect():
assert rcollect((x**2*y + x*y + x + y)/(x + y), y) == (x + y*(1 + x + x**2))/(x + y)
assert rcollect(sqrt(-((x + 1)*(y + 1))), z) == sqrt(-((x + 1)*(y + 1)))
def test_separatevars():
x,y,z,n = symbols('x,y,z,n')
assert separatevars(2*n*x*z+2*x*y*z) == 2*x*z*(n+y)
assert separatevars(x*z+x*y*z) == x*z*(1+y)
assert separatevars(pi*x*z+pi*x*y*z) == pi*x*z*(1+y)
assert separatevars(x*y**2*sin(x) + x*sin(x)*sin(y)) == x*(sin(y) + y**2)*sin(x)
assert separatevars(x*exp(x+y)+x*exp(x)) == x*(1 + exp(y))*exp(x)
assert separatevars((x*(y+1))**z) == x**z*(1 + y)**z
assert separatevars(1+x+y+x*y) == (x+1)*(y+1)
assert separatevars(y / pi * exp(-(z - x) / cos(n))) == y * exp((x - z) / cos(n)) / pi
# 1759
p=Symbol('p',positive=True)
assert separatevars(sqrt(p**2 + x*p**2)) == p*sqrt(1 + x)
assert separatevars(sqrt(y*(p**2 + x*p**2))) == p*sqrt(y)*sqrt(1 + x)
def test_separatevars_advanced_factor():
x,y,z = symbols('x,y,z')
assert separatevars(1 + log(x)*log(y) + log(x) + log(y)) == (log(x) + 1)*(log(y) + 1)
assert separatevars(1 + x - log(z) - x*log(z) - exp(y)*log(z) - \
x*exp(y)*log(z) + x*exp(y) + exp(y)) == \
(1 + x)*(1 - log(z))*(1 + exp(y))
x, y = symbols('x,y', positive=True)
assert separatevars(1 + log(x**log(y)) + log(x*y)) == (log(x) + 1)*(log(y) + 1)
def test_hypersimp():
n, k = symbols('n,k', integer=True)
assert hypersimp(factorial(k), k) == k + 1
assert hypersimp(factorial(k**2), k) is None
assert hypersimp(1/factorial(k), k) == 1/(k + 1)
assert hypersimp(2**k/factorial(k)**2, k) == 2/(k**2+2*k+1)
assert hypersimp(binomial(n, k), k) == (n-k)/(k+1)
assert hypersimp(binomial(n+1, k), k) == (n-k+1)/(k+1)
term = (4*k+1)*factorial(k)/factorial(2*k+1)
assert hypersimp(term, k) == (S(1)/2)*((4*k + 5)/(3 + 14*k + 8*k**2))
term = 1/((2*k-1)*factorial(2*k+1))
assert hypersimp(term, k) == (2*k-1)/(6 + 22*k + 24*k**2 + 8*k**3)
term = binomial(n, k)*(-1)**k/factorial(k)
assert hypersimp(term, k) == (k - n)/(k**2+2*k+1)
def test_nsimplify():
x = Symbol("x")
assert nsimplify(0) == 0
assert nsimplify(-1) == -1
assert nsimplify(1) == 1
assert nsimplify(1+x) == 1+x
assert nsimplify(2.7) == Rational(27, 10)
assert nsimplify(1-GoldenRatio) == (1-sqrt(5))/2
assert nsimplify((1+sqrt(5))/4, [GoldenRatio]) == GoldenRatio/2
assert nsimplify(2/GoldenRatio, [GoldenRatio]) == 2*GoldenRatio - 2
assert nsimplify(exp(5*pi*I/3, evaluate=False)) == sympify('1/2 - I*3**(1/2)/2')
assert nsimplify(sin(3*pi/5, evaluate=False)) == sympify('(5/8 + 1/8*5**(1/2))**(1/2)')
assert nsimplify(sqrt(atan('1', evaluate=False))*(2+I), [pi]) == sqrt(pi) + sqrt(pi)/2*I
assert nsimplify(2 + exp(2*atan('1/4')*I)) == sympify('49/17 + 8*I/17')
assert nsimplify(pi, tolerance=0.01) == Rational(22, 7)
assert nsimplify(pi, tolerance=0.001) == Rational(355, 113)
assert nsimplify(0.33333, tolerance=1e-4) == Rational(1, 3)
assert nsimplify(2.0**(1/3.), tolerance=0.001) == Rational(635, 504)
assert nsimplify(2.0**(1/3.), tolerance=0.001, full=True) == 2**Rational(1, 3)
assert nsimplify(x + .5, rational=True) == Rational(1, 2) + x
assert nsimplify(1/.3 + x, rational=True) == Rational(10, 3) + x
assert nsimplify(log(3).n(), rational=True) == \
sympify('109861228866811/100000000000000')
def test_extract_minus_sign():
x = Symbol("x")
y = Symbol("y")
a = Symbol("a")
b = Symbol("b")
assert simplify(-x/-y) == x/y
assert simplify(-x/y) == -x/y
assert simplify(x/y) == x/y
assert simplify(x/-y) == -x/y
assert simplify(-x/0) == -oo*x
assert simplify(S(-5)/0) == -oo
assert simplify(-a*x/(-y-b)) == a*x/(b + y)
def test_diff():
x = Symbol("x")
y = Symbol("y")
f = Function("f")
g = Function("g")
assert simplify(g(x).diff(x)*f(x).diff(x)-f(x).diff(x)*g(x).diff(x)) == 0
assert simplify(2*f(x)*f(x).diff(x)-diff(f(x)**2, x)) == 0
assert simplify(diff(1/f(x), x)+f(x).diff(x)/f(x)**2) == 0
assert simplify(f(x).diff(x, y)-f(x).diff(y, x)) == 0
def test_logcombine_1():
x, y = symbols("x,y")
a = Symbol("a")
z, w = symbols("z,w", positive=True)
b = Symbol("b", real=True)
assert logcombine(log(x)+2*log(y)) == log(x) + 2*log(y)
assert logcombine(log(x)+2*log(y), force=True) == log(x*y**2)
assert logcombine(a*log(w)+log(z)) == a*log(w) + log(z)
assert logcombine(b*log(z)+b*log(x)) == log(z**b) + b*log(x)
assert logcombine(b*log(z)-log(w)) == log(z**b/w)
assert logcombine(log(x)*log(z)) == log(x)*log(z)
assert logcombine(log(w)*log(x)) == log(w)*log(x)
assert logcombine(cos(-2*log(z)+b*log(w))) == cos(log(w**b/z**2))
assert logcombine(log(log(x)-log(y))-log(z), force=True) == \
log(log((x/y)**(1/z)))
assert logcombine((2+I)*log(x), force=True) == I*log(x)+log(x**2)
assert logcombine((x**2+log(x)-log(y))/(x*y), force=True) == \
log(x**(1/(x*y))*y**(-1/(x*y)))+x/y
assert logcombine(log(x)*2*log(y)+log(z), force=True) == \
log(z*y**log(x**2))
assert logcombine((x*y+sqrt(x**4+y**4)+log(x)-log(y))/(pi*x**Rational(2, 3)*\
y**Rational(3, 2)), force=True) == \
log(x**(1/(pi*x**Rational(2, 3)*y**Rational(3, 2)))*y**(-1/(pi*\
x**Rational(2, 3)*y**Rational(3, 2)))) + (x**4 + y**4)**Rational(1, 2)/(pi*\
x**Rational(2, 3)*y**Rational(3, 2)) + x**Rational(1, 3)/(pi*y**Rational(1, 2))
assert logcombine(Eq(log(x), -2*log(y)), force=True) == \
Eq(log(x*y**2), Integer(0))
assert logcombine(Eq(y, x*acos(-log(x/y))), force=True) == \
Eq(y, x*acos(log(y/x)))
assert logcombine(gamma(-log(x/y))*acos(-log(x/y)), force=True) == \
acos(log(y/x))*gamma(log(y/x))
assert logcombine((2+3*I)*log(x), force=True) == \
log(x**2)+3*I*log(x)
assert logcombine(Eq(y, -log(x)), force=True) == Eq(y, log(1/x))
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x), force=True) == \
Integral((sin(x**2)+cos(x**3))/x, x)
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x)+ (2+3*I)*log(x), \
force=True) == log(x**2)+3*I*log(x) + \
Integral((sin(x**2)+cos(x**3))/x, x)
@XFAIL
def test_logcombine_2():
# The same as one of the tests above, but with Rational(a, b) replaced with a/b.
# This fails because of a bug in matches. See issue 1274.
x, y = symbols("x,y")
assert logcombine((x*y+sqrt(x**4+y**4)+log(x)-log(y))/(pi*x**(2/3)*y**(3/2)), \
force=True) == log(x**(1/(pi*x**(2/3)*y**(3/2)))*y**(-1/\
(pi*x**(2/3)*y**(3/2)))) + (x**4 + y**4)**(1/2)/(pi*x**(2/3)*y**(3/2)) + \
x**(1/3)/(pi*y**(1/2))
def test_posify():
from sympy import posify, Symbol, log
from sympy.abc import x
assert str(posify(
x +
Symbol('p', positive=True) +
Symbol('n', negative=True))) == '(n + p + _x, {_x: x})'
# log(1/x).expand() should be log(1/x) but it comes back as -log(x)
# when it is corrected, posify will allow the change to be made:
eq, rep = posify(1/x)
assert log(eq).expand().subs(rep) == -log(x)
assert str(posify([x, 1 + x])) == '([_x, 1 + _x], {_x: x})'
def test_powdenest():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
p = symbols('p', positive=True)
i, j = symbols('i,j', integer=1)
assert powdenest(x) == x
assert powdenest(x + 2*(x**(2*a/3))**(3*x)) == x + 2*(x**(a/3))**(6*x)
assert powdenest((exp(2*a/3))**(3*x)) == (exp(a/3))**(6*x)
assert powdenest((x**(2*a/3))**(3*x)) == (x**(a/3))**(6*x)
assert powdenest(exp(3*x*log(2))) == 2**(3*x)
assert powdenest(sqrt(p**2)) == p
i, j = symbols('i,j', integer=1)
assert powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j == x**(x*(i + j))
assert powdenest(exp(3*y*log(x))) == x**(3*y)
assert powdenest(exp(y*(log(a) + log(b)))) == (a*b)**y
assert powdenest(exp(3*(log(a) + log(b)))) == a**3*b**3
assert powdenest(((x**(2*i))**(3*y))**x) == ((x**(2*i))**(3*y))**x
assert powdenest(((x**(2*i))**(3*y))**x, force=True) == x**(6*i*x*y)
assert powdenest(((x**(2*a/3))**(3*y/i))**x) == ((x**(a/3))**(y/i))**(6*x)
assert powdenest((x**(2*i)*y**(4*i))**z, force=True) == (x*y**2)**(2*i*z)
e = ((x**2*y**4)**a)**(x*y)
assert powdenest(e) == e
e = (((x**2*y**4)**a)**(x*y))**3
assert powdenest(e) == ((x**2*y**4)**a)**(3*x*y)
@XFAIL
def test_powdenest_fail_in_polys():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
assert powdenest((((x**2*y**4)**a)**(x*y)), force=True) == (x**2*y**4)**(a*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y))**3, force=True) == (x**2*y**4)**(3*a*x*y)
|
|
# from __future__ import unicode_literals
# Character ranges of letters
letters = 'a-zA-Z' # \u00c0-\u00d6\u00d8-\u00f6\u00f8-\u0103\u0106\u0107\
# \u010c-\u010f\u0112-\u0115\u011a-\u012d\u0131\u0141\u0142\u0147\u0148\
# \u0150-\u0153\u0158-\u0161\u0164\u0165\u016e-\u0171\u017d\u017e\
# \u0391-\u03a1\u03a3-\u03a9\u03b1-\u03c9\u03d1\u03d2\u03d5\u03d6\
# \u03da-\u03e1\u03f0\u03f1\u03f5\u210a-\u210c\u2110-\u2113\u211b\u211c\
# \u2128\u212c\u212d\u212f-\u2131\u2133-\u2138\uf6b2-\uf6b5\uf6b7\uf6b9\
# \uf6ba-\uf6bc\uf6be\uf6bf\uf6c1-\uf700\uf730\uf731\uf770\uf772\uf773\
# \uf776\uf779\uf77a\uf77d-\uf780\uf782-\uf78b\uf78d-\uf78f\uf790\
# \uf793-\uf79a\uf79c-\uf7a2\uf7a4-\uf7bd\uf800-\uf833\ufb01\ufb02'
# Character ranges of letterlikes
letterlikes = '' # \u0024\u00A1\u00A2\u00A3\u00A5\u00A7\u00A9\u00AB\u00AE\
# \u00B0\u00B5\u00B6\u00B8\u00BB\u00BF\u02C7\u02D8\u2013\u2014\u2020\u2021\
# \u2022\u2026\u2032\u2033\u2035\u2036\u2060\u20AC\u210F\u2122\u2127\u212B\
# \u21B5\u2205\u221E\u221F\u2220\u2221\u2222\u22EE\u22EF\u22F0\u22F1\u2300\
# \u2318\u231A\u23B4\u23B5\u2500\u2502\u25A0\u25A1\u25AA\u25AE\u25AF\u25B2\
# \u25B3\u25BC\u25BD\u25C0\u25C6\u25C7\u25CB\u25CF\u25E6\u25FB\u25FC\u2605\
# \u2639\u263A\u2660\u2661\u2662\u2663\u266D\u266E\u266F\u2736\uF3A0\uF3B8\
# \uF3B9\uF527\uF528\uF720\uF721\uF722\uF723\uF725\uF749\uF74A\uF74D\uF74E\
# \uF74F\uF750\uF751\uF752\uF753\uF754\uF755\uF756\uF757\uF760\uF763\uF766\
# \uF768\uF769\uF76A\uF76B\uF76C\uF7D4\uF800\uF801\uF802\uF803\uF804\uF805\
# \uF806\uF807\uF808\uF809\uF80A\uF80B\uF80C\uF80D\uF80E\uF80F\uF810\uF811\
# \uF812\uF813\uF814\uF815\uF816\uF817\uF818\uF819\uF81A\uF81B\uF81C\uF81D\
# \uF81E\uF81F\uF820\uF821\uF822\uF823\uF824\uF825\uF826\uF827\uF828\uF829\
# \uF82A\uF82B\uF82C\uF82D\uF82E\uF82F\uF830\uF831\uF832\uF833\uFE35\uFE36\
# \uFE37\uFE38'
# All supported longname characters
named_characters = {
# 'AAcute': '\u00E1',
# 'ABar': '\u0101',
# 'ACup': '\u0103',
# 'ADoubleDot': '\u00E4',
# 'AE': '\u00E6',
# 'AGrave': '\u00E0',
# 'AHat': '\u00E2',
# 'Aleph': '\u2135',
# 'AliasDelimiter': '\uF764',
# 'AliasIndicator': '\uF768',
# 'AlignmentMarker': '\uF760',
# 'Alpha': '\u03B1',
# 'AltKey': '\uF7D1',
# 'And': '\u2227',
# 'Angle': '\u2220',
# 'Angstrom': '\u212B',
# 'ARing': '\u00E5',
# 'AscendingEllipsis': '\u22F0',
# 'ATilde': '\u00E3',
# 'AutoLeftMatch': '\uF3A8',
# 'AutoOperand': '\uF3AE',
# 'AutoPlaceholder': '\uF3A4',
# 'AutoRightMatch': '\uF3A9',
# 'AutoSpace': '\uF3AD',
# 'Backslash': '\u2216',
# 'BeamedEighthNote': '\u266B',
# 'BeamedSixteenthNote': '\u266C',
# 'Because': '\u2235',
# 'Bet': '\u2136',
# 'Beta': '\u03B2',
# 'BlackBishop': '\u265D',
# 'BlackKing': '\u265A',
# 'BlackKnight': '\u265E',
# 'BlackPawn': '\u265F',
# 'BlackQueen': '\u265B',
# 'BlackRook': '\u265C',
# 'Breve': '\u02D8',
# 'Bullet': '\u2022',
# 'CAcute': '\u0107',
# 'CapitalAAcute': '\u00C1',
# 'CapitalABar': '\u0100',
# 'CapitalACup': '\u0102',
# 'CapitalADoubleDot': '\u00C4',
# 'CapitalAE': '\u00C6',
# 'CapitalAGrave': '\u00C0',
# 'CapitalAHat': '\u00C2',
# 'CapitalAlpha': '\u0391',
# 'CapitalARing': '\u00C5',
# 'CapitalATilde': '\u00C3',
# 'CapitalBeta': '\u0392',
# 'CapitalCAcute': '\u0106',
# 'CapitalCCedilla': '\u00C7',
# 'CapitalCHacek': '\u010C',
# 'CapitalChi': '\u03A7',
# 'CapitalDelta': '\u0394',
# 'CapitalDHacek': '\u010E',
# 'CapitalDifferentialD': '\uF74B',
# 'CapitalDigamma': '\u03DC',
# 'CapitalEAcute': '\u00C9',
# 'CapitalEBar': '\u0112',
# 'CapitalECup': '\u0114',
# 'CapitalEDoubleDot': '\u00CB',
# 'CapitalEGrave': '\u00C8',
# 'CapitalEHacek': '\u011A',
# 'CapitalEHat': '\u00CA',
# 'CapitalEpsilon': '\u0395',
# 'CapitalEta': '\u0397',
# 'CapitalEth': '\u00D0',
# 'CapitalGamma': '\u0393',
# 'CapitalIAcute': '\u00CD',
# 'CapitalICup': '\u012C',
# 'CapitalIDoubleDot': '\u00CF',
# 'CapitalIGrave': '\u00CC',
# 'CapitalIHat': '\u00CE',
# 'CapitalIota': '\u0399',
# 'CapitalKappa': '\u039A',
# 'CapitalKoppa': '\u03DE',
# 'CapitalLambda': '\u039B',
# 'CapitalLSlash': '\u0141',
# 'CapitalM': '\u039C',
# 'CapitalNHacek': '\u0147',
# 'CapitalNTilde': '\u00D1',
# 'CapitalN': '\u039D',
# 'CapitalOAcute': '\u00D3',
# 'CapitalODoubleAcute': '\u0150',
# 'CapitalODoubleDot': '\u00D6',
# 'CapitalOE': '\u0152',
# 'CapitalOGrave': '\u00D2',
# 'CapitalOHat': '\u00D4',
# 'CapitalOmega': '\u03A9',
# 'CapitalOmicron': '\u039F',
# 'CapitalOSlash': '\u00D8',
# 'CapitalOTilde': '\u00D5',
# 'CapitalPhi': '\u03A6',
# 'CapitalPi': '\u03A0',
# 'CapitalPsi': '\u03A8',
# 'CapitalRHacek': '\u0158',
# 'CapitalRho': '\u03A1',
# 'CapitalSampi': '\u03E0',
# 'CapitalSHacek': '\u0160',
# 'CapitalSigma': '\u03A3',
# 'CapitalStigma': '\u03DA',
# 'CapitalTa': '\u03A4',
# 'CapitalTHacek': '\u0164',
# 'CapitalTheta': '\u0398',
# 'CapitalThorn': '\u00DE',
# 'CapitalUAcute': '\u00DA',
# 'CapitalUDoubleAcute': '\u0170',
# 'CapitalUDoubleDot': '\u00DC',
# 'CapitalUGrave': '\u00D9',
# 'CapitalUHat': '\u00DB',
# 'CapitalUpsilon': '\u03A5',
# 'CapitalURing': '\u016E',
# 'CapitalXi': '\u039E',
# 'CapitalYAcute': '\u00DD',
# 'CapitalZeta': '\u0396',
# 'CapitalZHacek': '\u017D',
# 'Cap': '\u2322',
# 'CCedilla': '\u00E7',
# 'Cedilla': '\u00B8',
# 'CenterDot': '\u00B7',
# 'CenterEllipsis': '\u22EF',
# 'Cent': '\u00A2',
# 'CHacek': '\u010D',
# 'Checkmark': '\u2713',
# 'Chi': '\u03C7',
# 'CircleDot': '\u2299',
# 'CircleMinus': '\u2296',
# 'CirclePlus': '\u2295',
# 'CircleTimes': '\u2297',
# 'ClockwiseContourIntegral': '\u2232',
# 'CloseCurlyDoubleQuote': '\u201D',
# 'CloseCurlyQuote': '\u2019',
# 'CloverLeaf': '\u2318',
# 'ClubSuit': '\u2663',
# 'Colon': '\u2236',
# 'CommandKey': '\uF76A',
# 'Congruent': '\u2261',
# 'Conjugate': '\uF3C8',
# 'ConjugateTranspose': '\uF3C9',
# 'ConstantC': '\uF7DA',
# 'Continuation': '\uF3B1',
# 'ContourIntegral': '\u222E',
# 'ControlKey': '\uF763',
# 'Coproduct': '\u2210',
# 'Copyright': '\u00A9',
# 'CounterClockwiseContourIntegral': '\u2233',
# 'Cross': '\uF4A0',
# 'CupCap': '\u224D',
# 'Cup': '\u2323',
# 'CurlyCapitalUpsilon': '\u03D2',
# 'CurlyEpsilon': '\u03B5',
# 'CurlyKappa': '\u03F0',
# 'CurlyPhi': '\u03C6',
# 'CurlyPi': '\u03D6',
# 'CurlyRho': '\u03F1',
# 'CurlyTheta': '\u03D1',
# 'Currency': '\u00A4',
# 'Dagger': '\u2020',
# 'Dalet': '\u2138',
# 'Dash': '\u2013',
# 'Degree': '\u00B0',
# 'DeleteKey': '\uF7D0',
# 'Del': '\u2207',
# 'Delta': '\u03B4',
# 'DescendingEllipsis': '\u22F1',
# 'DHacek': '\u010F',
# 'Diameter': '\u2300',
# 'Diamond': '\u22C4',
# 'DiamondSuit': '\u2662',
# 'DifferenceDelta': '\u2206',
# 'DifferentialD': '\uF74C',
# 'Digamma': '\u03DD',
# 'DiscreteRatio': '\uF4A4',
# 'DiscreteShift': '\uF4A3',
# 'DiscretionaryHyphen': '\u00AD',
# 'DiscretionaryLineSeparator': '\uF76E',
# 'DiscretionaryParagraphSeparator': '\uF76F',
# 'Divide': '\u00F7',
# 'DotEqual': '\u2250',
# 'DotlessI': '\u0131',
# 'DotlessJ': '\uF700',
# 'DottedSquare': '\uF751',
# 'DoubleContourIntegral': '\u222F',
# 'DoubleDagger': '\u2021',
# 'DoubledGamma': '\uF74A',
# 'DoubleDownArrow': '\u21D3',
# 'DoubledPi': '\uF749',
# 'DoubleLeftArrow': '\u21D0',
# 'DoubleLeftRightArrow': '\u21D4',
# 'DoubleLeftTee': '\u2AE4',
# 'DoubleLongLeftArrow': '\u27F8',
# 'DoubleLongLeftRightArrow': '\u27FA',
# 'DoubleLongRightArrow': '\u27F9',
# 'DoublePrime': '\u2033',
# 'DoubleRightArrow': '\u21D2',
# 'DoubleRightTee': '\u22A8',
# 'DoubleStruckA': '\uF6E6',
# 'DoubleStruckB': '\uF6E7',
# 'DoubleStruckC': '\uF6E8',
# 'DoubleStruckCapitalA': '\uF7A4',
# 'DoubleStruckCapitalB': '\uF7A5',
# 'DoubleStruckCapitalC': '\uF7A6',
# 'DoubleStruckCapitalD': '\uF7A7',
# 'DoubleStruckCapitalE': '\uF7A8',
# 'DoubleStruckCapitalF': '\uF7A9',
# 'DoubleStruckCapitalG': '\uF7AA',
# 'DoubleStruckCapitalH': '\uF7AB',
# 'DoubleStruckCapitalI': '\uF7AC',
# 'DoubleStruckCapitalJ': '\uF7AD',
# 'DoubleStruckCapitalK': '\uF7AE',
# 'DoubleStruckCapitalL': '\uF7AF',
# 'DoubleStruckCapitalM': '\uF7B0',
# 'DoubleStruckCapitalN': '\uF7B1',
# 'DoubleStruckCapitalO': '\uF7B2',
# 'DoubleStruckCapitalP': '\uF7B3',
# 'DoubleStruckCapitalQ': '\uF7B4',
# 'DoubleStruckCapitalR': '\uF7B5',
# 'DoubleStruckCapitalS': '\uF7B6',
# 'DoubleStruckCapitalT': '\uF7B7',
# 'DoubleStruckCapitalU': '\uF7B8',
# 'DoubleStruckCapitalV': '\uF7B9',
# 'DoubleStruckCapitalW': '\uF7BA',
# 'DoubleStruckCapitalX': '\uF7BB',
# 'DoubleStruckCapitalY': '\uF7BC',
# 'DoubleStruckCapitalZ': '\uF7BD',
# 'DoubleStruckD': '\uF6E9',
# 'DoubleStruckE': '\uF6EA',
# 'DoubleStruckEight': '\uF7E3',
# 'DoubleStruckF': '\uF6EB',
# 'DoubleStruckFive': '\uF7E0',
# 'DoubleStruckFour': '\uF7DF',
# 'DoubleStruckG': '\uF6EC',
# 'DoubleStruckH': '\uF6ED',
# 'DoubleStruckI': '\uF6EE',
# 'DoubleStruckJ': '\uF6EF',
# 'DoubleStruckK': '\uF6F0',
# 'DoubleStruckL': '\uF6F1',
# 'DoubleStruckM': '\uF6F2',
# 'DoubleStruckN': '\uF6F3',
# 'DoubleStruckNine': '\uF7E4',
# 'DoubleStruckO': '\uF6F4',
# 'DoubleStruckOne': '\uF7DC',
# 'DoubleStruckP': '\uF6F5',
# 'DoubleStruckQ': '\uF6F6',
# 'DoubleStruckR': '\uF6F7',
# 'DoubleStruckS': '\uF6F8',
# 'DoubleStruckSeven': '\uF7E2',
# 'DoubleStruckSix': '\uF7E1',
# 'DoubleStruckT': '\uF6F9',
# 'DoubleStruckThree': '\uF7DE',
# 'DoubleStruckTwo': '\uF7DD',
# 'DoubleStruckU': '\uF6FA',
# 'DoubleStruckV': '\uF6FB',
# 'DoubleStruckW': '\uF6FC',
# 'DoubleStruckX': '\uF6FD',
# 'DoubleStruckY': '\uF6FE',
# 'DoubleStruckZ': '\uF6FF',
# 'DoubleStruckZero': '\uF7DB',
# 'DoubleUpArrow': '\u21D1',
# 'DoubleUpDownArrow': '\u21D5',
# 'DoubleVerticalBar': '\u2225',
# 'DownArrowBar': '\u2913',
# 'DownArrow': '\u2193',
# 'DownArrowUpArrow': '\u21F5',
# 'DownBreve': '\uF755',
# 'DownExclamation': '\u00A1',
# 'DownLeftRightVector': '\u2950',
# 'DownLeftTeeVector': '\u295E',
# 'DownLeftVector': '\u21BD',
# 'DownLeftVectorBar': '\u2956',
# 'DownPointer': '\u25BE',
# 'DownQuestion': '\u00BF',
# 'DownRightTeeVector': '\u295F',
# 'DownRightVector': '\u21C1',
# 'DownRightVectorBar': '\u2957',
# 'DownTeeArrow': '\u21A7',
# 'DownTee': '\u22A4',
# 'EAcute': '\u00E9',
# 'Earth': '\u2641',
# 'EBar': '\u0113',
# 'ECup': '\u0115',
# 'EDoubleDot': '\u00EB',
# 'EGrave': '\u00E8',
# 'EHacek': '\u011B',
# 'EHat': '\u00EA',
# 'EighthNote': '\u266A',
# 'Element': '\u2208',
# 'Ellipsis': '\u2026',
# 'EmptyCircle': '\u25CB',
# 'EmptyDiamond': '\u25C7',
# 'EmptyDownTriangle': '\u25BD',
# 'EmptyRectangle': '\u25AF',
# 'EmptySet': '\u2205',
# 'EmptySmallCircle': '\u25E6',
# 'EmptySmallSquare': '\u25FB',
# 'EmptySquare': '\u25A1',
# 'EmptyUpTriangle': '\u25B3',
# 'EmptyVerySmallSquare': '\u25AB',
# 'EnterKey': '\uF7D4',
# 'EntityEnd': '\uF3B9',
# 'EntityStart': '\uF3B8',
# 'Epsilon': '\u03F5',
# 'Equal': '\uF431',
# 'EqualTilde': '\u2242',
# 'Equilibrium': '\u21CC',
# 'Equivalent': '\u29E6',
# 'ErrorIndicator': '\uF767',
# 'EscapeKey': '\uF769',
# 'Eta': '\u03B7',
# 'Eth': '\u00F0',
# 'Euro': '\u20AC',
# 'Exists': '\u2203',
# 'ExponentialE': '\uF74D',
# 'FiLigature': '\uFB01',
# 'FilledCircle': '\u25CF',
# 'FilledDiamond': '\u25C6',
# 'FilledDownTriangle': '\u25BC',
# 'FilledLeftTriangle': '\u25C0',
# 'FilledRectangle': '\u25AE',
# 'FilledRightTriangle': '\u25B6',
# 'FilledSmallCircle': '\uF750',
# 'FilledSmallSquare': '\u25FC',
# 'FilledSquare': '\u25A0',
# 'FilledUpTriangle': '\u25B2',
# 'FilledVerySmallSquare': '\u25AA',
# 'FinalSigma': '\u03C2',
# 'FirstPage': '\uF7FA',
# 'FivePointedStar': '\u2605',
# 'Flat': '\u266D',
# 'FlLigature': '\uFB02',
# 'Florin': '\u0192',
# 'ForAll': '\u2200',
# 'FormalA': '\uF800',
# 'FormalB': '\uF801',
# 'FormalC': '\uF802',
# 'FormalCapitalA': '\uF81A',
# 'FormalCapitalB': '\uF81B',
# 'FormalCapitalC': '\uF81C',
# 'FormalCapitalD': '\uF81D',
# 'FormalCapitalE': '\uF81E',
# 'FormalCapitalF': '\uF81F',
# 'FormalCapitalG': '\uF820',
# 'FormalCapitalH': '\uF821',
# 'FormalCapitalI': '\uF822',
# 'FormalCapitalJ': '\uF823',
# 'FormalCapitalK': '\uF824',
# 'FormalCapitalL': '\uF825',
# 'FormalCapitalM': '\uF826',
# 'FormalCapitalN': '\uF827',
# 'FormalCapitalO': '\uF828',
# 'FormalCapitalP': '\uF829',
# 'FormalCapitalQ': '\uF82A',
# 'FormalCapitalR': '\uF82B',
# 'FormalCapitalS': '\uF82C',
# 'FormalCapitalT': '\uF82D',
# 'FormalCapitalU': '\uF82E',
# 'FormalCapitalV': '\uF82F',
# 'FormalCapitalW': '\uF830',
# 'FormalCapitalX': '\uF831',
# 'FormalCapitalY': '\uF832',
# 'FormalCapitalZ': '\uF833',
# 'FormalD': '\uF803',
# 'FormalE': '\uF804',
# 'FormalF': '\uF805',
# 'FormalG': '\uF806',
# 'FormalH': '\uF807',
# 'FormalI': '\uF808',
# 'FormalJ': '\uF809',
# 'FormalK': '\uF80A',
# 'FormalL': '\uF80B',
# 'FormalM': '\uF80C',
# 'FormalN': '\uF80D',
# 'FormalO': '\uF80E',
# 'FormalP': '\uF80F',
# 'FormalQ': '\uF810',
# 'FormalR': '\uF811',
# 'FormalS': '\uF812',
# 'FormalT': '\uF813',
# 'FormalU': '\uF814',
# 'FormalV': '\uF815',
# 'FormalW': '\uF816',
# 'FormalX': '\uF817',
# 'FormalY': '\uF818',
# 'FormalZ': '\uF819',
# 'FreakedSmiley': '\uF721',
# 'Function': '\uF4A1',
# 'Gamma': '\u03B3',
# 'Gimel': '\u2137',
# 'GothicA': '\uF6CC',
# 'GothicB': '\uF6CD',
# 'GothicC': '\uF6CE',
# 'GothicCapitalA': '\uF78A',
# 'GothicCapitalB': '\uF78B',
# 'GothicCapitalC': '\u212D',
# 'GothicCapitalD': '\uF78D',
# 'GothicCapitalE': '\uF78E',
# 'GothicCapitalF': '\uF78F',
# 'GothicCapitalG': '\uF790',
# 'GothicCapitalH': '\u210C',
# 'GothicCapitalI': '\u2111',
# 'GothicCapitalJ': '\uF793',
# 'GothicCapitalK': '\uF794',
# 'GothicCapitalL': '\uF795',
# 'GothicCapitalM': '\uF796',
# 'GothicCapitalN': '\uF797',
# 'GothicCapitalO': '\uF798',
# 'GothicCapitalP': '\uF799',
# 'GothicCapitalQ': '\uF79A',
# 'GothicCapitalR': '\u211C',
# 'GothicCapitalS': '\uF79C',
# 'GothicCapitalT': '\uF79D',
# 'GothicCapitalU': '\uF79E',
# 'GothicCapitalV': '\uF79F',
# 'GothicCapitalW': '\uF7A0',
# 'GothicCapitalX': '\uF7A1',
# 'GothicCapitalY': '\uF7A2',
# 'GothicCapitalZ': '\u2128',
# 'GothicD': '\uF6CF',
# 'GothicE': '\uF6D0',
# 'GothicEight': '\uF7ED',
# 'GothicF': '\uF6D1',
# 'GothicFive': '\uF7EA',
# 'GothicFour': '\uF7E9',
# 'GothicG': '\uF6D2',
# 'GothicH': '\uF6D3',
# 'GothicI': '\uF6D4',
# 'GothicJ': '\uF6D5',
# 'GothicK': '\uF6D6',
# 'GothicL': '\uF6D7',
# 'GothicM': '\uF6D8',
# 'GothicN': '\uF6D9',
# 'GothicNine': '\uF7EF',
# 'GothicO': '\uF6DA',
# 'GothicOne': '\uF7E6',
# 'GothicP': '\uF6DB',
# 'GothicQ': '\uF6DC',
# 'GothicR': '\uF6DD',
# 'GothicS': '\uF6DE',
# 'GothicSeven': '\uF7EC',
# 'GothicSix': '\uF7EB',
# 'GothicT': '\uF6DF',
# 'GothicThree': '\uF7E8',
# 'GothicTwo': '\uF7E7',
# 'GothicU': '\uF6E0',
# 'GothicV': '\uF6E1',
# 'GothicW': '\uF6E2',
# 'GothicX': '\uF6E3',
# 'GothicY': '\uF6E4',
# 'GothicZ': '\uF6E5',
# 'GothicZero': '\uF7E5',
# 'GrayCircle': '\uF753',
# 'GraySquare': '\uF752',
# 'GreaterEqualLess': '\u22DB',
# 'GreaterEqual': '\u2265',
# 'GreaterFullEqual': '\u2267',
# 'GreaterGreater': '\u226B',
# 'GreaterLess': '\u2277',
# 'GreaterSlantEqual': '\u2A7E',
# 'GreaterTilde': '\u2273',
# 'Hacek': '\u02C7',
# 'HappySmiley': '\u263A',
# 'HBar': '\u210F',
# 'HeartSuit': '\u2661',
# 'HermitianConjugate': '\uF3CE',
# 'HorizontalLine': '\u2500',
# 'HumpDownHump': '\u224E',
# 'HumpEqual': '\u224F',
# 'Hyphen': '\u2010',
# 'IAcute': '\u00ED',
# 'ICup': '\u012D',
# 'IDoubleDot': '\u00EF',
# 'IGrave': '\u00EC',
# 'IHat': '\u00EE',
# 'ImaginaryI': '\uF74E',
# 'ImaginaryJ': '\uF74F',
# 'ImplicitPlus': '\uF39E',
# 'Implies': '\uF523',
# 'Infinity': '\u221E',
# 'Integral': '\u222B',
# 'Intersection': '\u22C2',
# 'InvisibleApplication': '\uF76D',
# 'InvisibleComma': '\uF765',
# 'InvisiblePostfixScriptBase': '\uF3B4',
# 'InvisiblePrefixScriptBase': '\uF3B3',
# 'InvisibleSpace': '\uF360',
# 'InvisibleTimes': '\u2062',
# 'Iota': '\u03B9',
# 'Jupiter': '\u2643',
# 'Kappa': '\u03BA',
# 'KernelIcon': '\uF756',
# 'Koppa': '\u03DF',
# 'Lambda': '\u03BB',
# 'LastPage': '\uF7FB',
# 'LeftAngleBracket': '\u2329',
# 'LeftArrowBar': '\u21E4',
# 'LeftArrow': '\u2190',
# 'LeftArrowRightArrow': '\u21C6',
# 'LeftBracketingBar': '\uF603',
# 'LeftCeiling': '\u2308',
# 'LeftDoubleBracket': '\u301A',
# 'LeftDoubleBracketingBar': '\uF605',
# 'LeftDownTeeVector': '\u2961',
# 'LeftDownVectorBar': '\u2959',
# 'LeftDownVector': '\u21C3',
# 'LeftFloor': '\u230A',
# 'LeftGuillemet': '\u00AB',
# 'LeftModified': '\uF76B',
# 'LeftPointer': '\u25C2',
# 'LeftRightArrow': '\u2194',
# 'LeftRightVector': '\u294E',
# 'LeftSkeleton': '\uF761',
# 'LeftTee': '\u22A3',
# 'LeftTeeArrow': '\u21A4',
# 'LeftTeeVector': '\u295A',
# 'LeftTriangle': '\u22B2',
# 'LeftTriangleBar': '\u29CF',
# 'LeftTriangleEqual': '\u22B4',
# 'LeftUpDownVector': '\u2951',
# 'LeftUpTeeVector': '\u2960',
# 'LeftUpVector': '\u21BF',
# 'LeftUpVectorBar': '\u2958',
# 'LeftVector': '\u21BC',
# 'LeftVectorBar': '\u2952',
# 'LessEqual': '\u2264',
# 'LessEqualGreater': '\u22DA',
# 'LessFullEqual': '\u2266',
# 'LessGreater': '\u2276',
# 'LessLess': '\u226A',
# 'LessSlantEqual': '\u2A7D',
# 'LessTilde': '\u2272',
# 'LetterSpace': '\uF754',
# 'LightBulb': '\uF723',
# 'LongDash': '\u2014',
# 'LongEqual': '\uF7D9',
# 'LongLeftArrow': '\u27F5',
# 'LongLeftRightArrow': '\u27F7',
# 'LongRightArrow': '\u27F6',
# 'LowerLeftArrow': '\u2199',
# 'LowerRightArrow': '\u2198',
# 'LSlash': '\u0142',
# 'Mars': '\u2642',
# 'MathematicaIcon': '\uF757',
# 'MeasuredAngle': '\u2221',
# 'MediumSpace': '\u205F',
# 'Mercury': '\u263F',
# 'Mho': '\u2127',
# 'Micro': '\u00B5',
# 'MinusPlus': '\u2213',
# 'M': '\u03BC',
# 'Nand': '\u22BC',
# 'Natural': '\u266E',
# 'NegativeMediumSpace': '\uF383',
# 'NegativeThickSpace': '\uF384',
# 'NegativeThinSpace': '\uF382',
# 'NegativeVeryThinSpace': '\uF380',
# 'Neptune': '\u2646',
# 'NestedGreaterGreater': '\u2AA2',
# 'NestedLessLess': '\u2AA1',
# 'NeutralSmiley': '\uF722',
# 'NHacek': '\u0148',
# 'NoBreak': '\u2060',
# 'NonBreakingSpace': '\u00A0',
# 'Nor': '\u22BD',
# 'NotCongruent': '\u2262',
# 'NotCupCap': '\u226D',
# 'NotDoubleVerticalBar': '\u2226',
# 'NotElement': '\u2209',
# 'NotEqual': '\u2260',
# 'NotEqualTilde': '\uF400',
# 'NotExists': '\u2204',
# 'NotGreater': '\u226F',
# 'NotGreaterEqual': '\u2271',
# 'NotGreaterFullEqual': '\u2269',
# 'NotGreaterGreater': '\uF427',
# 'NotGreaterLess': '\u2279',
# 'NotGreaterSlantEqual': '\uF429',
# 'NotGreaterTilde': '\u2275',
# 'NotHumpDownHump': '\uF402',
# 'NotHumpEqual': '\uF401',
# 'NotLeftTriangle': '\u22EA',
# 'NotLeftTriangleBar': '\uF412',
# 'NotLeftTriangleEqual': '\u22EC',
# 'NotLessEqual': '\u2270',
# 'NotLessFullEqual': '\u2268',
# 'NotLessGreater': '\u2278',
# 'NotLess': '\u226E',
# 'NotLessLess': '\uF422',
# 'NotLessSlantEqual': '\uF424',
# 'NotLessTilde': '\u2274',
# 'Not': '\u00AC',
# 'NotNestedGreaterGreater': '\uF428',
# 'NotNestedLessLess': '\uF423',
# 'NotPrecedes': '\u2280',
# 'NotPrecedesEqual': '\uF42B',
# 'NotPrecedesSlantEqual': '\u22E0',
# 'NotPrecedesTilde': '\u22E8',
# 'NotReverseElement': '\u220C',
# 'NotRightTriangle': '\u22EB',
# 'NotRightTriangleBar': '\uF413',
# 'NotRightTriangleEqual': '\u22ED',
# 'NotSquareSubset': '\uF42E',
# 'NotSquareSubsetEqual': '\u22E2',
# 'NotSquareSuperset': '\uF42F',
# 'NotSquareSupersetEqual': '\u22E3',
# 'NotSubset': '\u2284',
# 'NotSubsetEqual': '\u2288',
# 'NotSucceeds': '\u2281',
# 'NotSucceedsEqual': '\uF42D',
# 'NotSucceedsSlantEqual': '\u22E1',
# 'NotSucceedsTilde': '\u22E9',
# 'NotSuperset': '\u2285',
# 'NotSupersetEqual': '\u2289',
# 'NotTilde': '\u2241',
# 'NotTildeEqual': '\u2244',
# 'NotTildeFullEqual': '\u2247',
# 'NotTildeTilde': '\u2249',
# 'NotVerticalBar': '\u2224',
# 'NTilde': '\u00F1',
# 'N': '\u03BD',
# 'Null': '\uF3A0',
# 'NumberSign': '\uF724',
# 'OAcute': '\u00F3',
# 'ODoubleAcute': '\u0151',
# 'ODoubleDot': '\u00F6',
# 'OE': '\u0153',
# 'OGrave': '\u00F2',
# 'OHat': '\u00F4',
# 'Omega': '\u03C9',
# 'Omicron': '\u03BF',
# 'OpenCurlyDoubleQuote': '\u201C',
# 'OpenCurlyQuote': '\u2018',
# 'OptionKey': '\uF7D2',
# 'Or': '\u2228',
# 'OSlash': '\u00F8',
# 'OTilde': '\u00F5',
# 'OverBrace': '\uFE37',
# 'OverBracket': '\u23B4',
# 'OverParenthesis': '\uFE35',
# 'Paragraph': '\u00B6',
# 'PartialD': '\u2202',
# 'Phi': '\u03D5',
# 'Pi': '\u03C0',
# 'Piecewise': '\uF361',
# 'Placeholder': '\uF528',
# 'PlusMinus': '\u00B1',
# 'Pluto': '\u2647',
# 'Precedes': '\u227A',
# 'PrecedesEqual': '\u2AAF',
# 'PrecedesSlantEqual': '\u227C',
# 'PrecedesTilde': '\u227E',
# 'Prime': '\u2032',
# 'Product': '\u220F',
# 'Proportion': '\u2237',
# 'Proportional': '\u221D',
# 'Psi': '\u03C8',
# 'QuarterNote': '\u2669',
'RawAmpersand': '\u0026',
'RawAt': '\u0040',
'RawBackquote': '\u0060',
'RawBackslash': '\u005C',
'RawColon': '\u003A',
'RawComma': '\u002C',
'RawDash': '\u002D',
'RawDollar': '\u0024',
'RawDot': '\u002E',
'RawDoubleQuote': '\u0022',
'RawEqual': '\u003D',
'RawEscape': '\u001B',
'RawExclamation': '\u0021',
'RawGreater': '\u003E',
'RawLeftBrace': '\u007B',
'RawLeftBracket': '\u005B',
'RawLeftParenthesis': '\u0028',
'RawLess': '\u003C',
'RawNumberSign': '\u0023',
'RawPercent': '\u0025',
'RawPlus': '\u002B',
'RawQuestion': '\u003F',
'RawQuote': '\u0027',
'RawRightBrace': '\u007D',
'RawRightBracket': '\u005D',
'RawRightParenthesis': '\u0029',
'RawSemicolon': '\u003B',
'RawSlash': '\u002F',
'RawSpace': '\u0020',
'RawStar': '\u002A',
'RawTab': '\u0009',
'RawTilde': '\u007E',
'RawUnderscore': '\u005F',
'RawVerticalBar': '\u007C',
'RawWedge': '\u005E',
# 'RegisteredTrademark': '\u00AE',
# 'ReturnIndicator': '\u21B5',
# 'ReturnKey': '\uF766',
# 'ReverseDoublePrime': '\u2036',
# 'ReverseElement': '\u220B',
# 'ReverseEquilibrium': '\u21CB',
# 'ReversePrime': '\u2035',
# 'ReverseUpEquilibrium': '\u296F',
# 'RHacek': '\u0159',
# 'Rho': '\u03C1',
# 'RightAngle': '\u221F',
# 'RightAngleBracket': '\u232A',
# 'RightArrow': '\u2192',
# 'RightArrowBar': '\u21E5',
# 'RightArrowLeftArrow': '\u21C4',
# 'RightBracketingBar': '\uF604',
# 'RightCeiling': '\u2309',
# 'RightDoubleBracket': '\u301B',
# 'RightDoubleBracketingBar': '\uF606',
# 'RightDownTeeVector': '\u295D',
# 'RightDownVector': '\u21C2',
# 'RightDownVectorBar': '\u2955',
# 'RightFloor': '\u230B',
# 'RightGuillemet': '\u00BB',
# 'RightModified': '\uF76C',
# 'RightPointer': '\u25B8',
# 'RightSkeleton': '\uF762',
# 'RightTee': '\u22A2',
# 'RightTeeArrow': '\u21A6',
# 'RightTeeVector': '\u295B',
# 'RightTriangle': '\u22B3',
# 'RightTriangleBar': '\u29D0',
# 'RightTriangleEqual': '\u22B5',
# 'RightUpDownVector': '\u294F',
# 'RightUpTeeVector': '\u295C',
# 'RightUpVector': '\u21BE',
# 'RightUpVectorBar': '\u2954',
# 'RightVector': '\u21C0',
# 'RightVectorBar': '\u2953',
# 'RoundImplies': '\u2970',
# 'RoundSpaceIndicator': '\uF3B2',
# 'Rule': '\uF522',
# 'RuleDelayed': '\uF51F',
# 'SadSmiley': '\u2639',
# 'Sampi': '\u03E0',
# 'Saturn': '\u2644',
# 'ScriptA': '\uF6B2',
# 'ScriptB': '\uF6B3',
# 'ScriptC': '\uF6B4',
# 'ScriptCapitalA': '\uF770',
# 'ScriptCapitalB': '\u212C',
# 'ScriptCapitalC': '\uF772',
# 'ScriptCapitalD': '\uF773',
# 'ScriptCapitalE': '\u2130',
# 'ScriptCapitalF': '\u2131',
# 'ScriptCapitalG': '\uF776',
# 'ScriptCapitalH': '\u210B',
# 'ScriptCapitalI': '\u2110',
# 'ScriptCapitalJ': '\uF779',
# 'ScriptCapitalK': '\uF77A',
# 'ScriptCapitalL': '\u2112',
# 'ScriptCapitalM': '\u2133',
# 'ScriptCapitalN': '\uF77D',
# 'ScriptCapitalO': '\uF77E',
# 'ScriptCapitalP': '\u2118',
# 'ScriptCapitalQ': '\uF780',
# 'ScriptCapitalR': '\u211B',
# 'ScriptCapitalS': '\uF782',
# 'ScriptCapitalT': '\uF783',
# 'ScriptCapitalU': '\uF784',
# 'ScriptCapitalV': '\uF785',
# 'ScriptCapitalW': '\uF786',
# 'ScriptCapitalX': '\uF787',
# 'ScriptCapitalY': '\uF788',
# 'ScriptCapitalZ': '\uF789',
# 'ScriptD': '\uF6B5',
# 'ScriptDotlessI': '\uF730',
# 'ScriptDotlessJ': '\uF731',
# 'ScriptE': '\u212F',
# 'ScriptEight': '\uF7F8',
# 'ScriptF': '\uF6B7',
# 'ScriptFive': '\uF7F5',
# 'ScriptFour': '\uF7F4',
# 'ScriptG': '\u210A',
# 'ScriptH': '\uF6B9',
# 'ScriptI': '\uF6BA',
# 'ScriptJ': '\uF6BB',
# 'ScriptK': '\uF6BC',
# 'ScriptL': '\u2113',
# 'ScriptM': '\uF6BE',
# 'ScriptN': '\uF6BF',
# 'ScriptNine': '\uF7F9',
# 'ScriptO': '\u2134',
# 'ScriptOne': '\uF7F1',
# 'ScriptP': '\uF6C1',
# 'ScriptQ': '\uF6C2',
# 'ScriptR': '\uF6C3',
# 'ScriptS': '\uF6C4',
# 'ScriptSeven': '\uF7F7',
# 'ScriptSix': '\uF7F6',
# 'ScriptT': '\uF6C5',
# 'ScriptThree': '\uF7F3',
# 'ScriptTwo': '\uF7F2',
# 'ScriptU': '\uF6C6',
# 'ScriptV': '\uF6C7',
# 'ScriptW': '\uF6C8',
# 'ScriptX': '\uF6C9',
# 'ScriptY': '\uF6CA',
# 'ScriptZ': '\uF6CB',
# 'ScriptZero': '\uF7F0',
# 'Section': '\u00A7',
# 'SelectionPlaceholder': '\uF527',
# 'SHacek': '\u0161',
# 'Sharp': '\u266F',
# 'ShortLeftArrow': '\uF526',
# 'ShortRightArrow': '\uF525',
# 'Sigma': '\u03C3',
# 'SixPointedStar': '\u2736',
# 'SkeletonIndicator': '\u2043',
# 'SmallCircle': '\u2218',
# 'SpaceIndicator': '\u2423',
# 'SpaceKey': '\uF7BF',
# 'SpadeSuit': '\u2660',
# 'SpanFromAbove': '\uF3BB',
# 'SpanFromBoth': '\uF3BC',
# 'SpanFromLeft': '\uF3BA',
# 'SphericalAngle': '\u2222',
# 'Sqrt': '\u221A',
# 'Square': '\uF520',
# 'SquareIntersection': '\u2293',
# 'SquareSubset': '\u228F',
# 'SquareSubsetEqual': '\u2291',
# 'SquareSuperset': '\u2290',
# 'SquareSupersetEqual': '\u2292',
# 'SquareUnion': '\u2294',
# 'Star': '\u22C6',
# 'Sterling': '\u00A3',
# 'Stigma': '\u03DB',
# 'Subset': '\u2282',
# 'SubsetEqual': '\u2286',
# 'Succeeds': '\u227B',
# 'SucceedsEqual': '\u2AB0',
# 'SucceedsSlantEqual': '\u227D',
# 'SucceedsTilde': '\u227F',
# 'SuchThat': '\u220D',
# 'Sum': '\u2211',
# 'Superset': '\u2283',
# 'SupersetEqual': '\u2287',
# 'SystemEnterKey': '\uF75F',
# 'SZ': '\u00DF',
# 'TabKey': '\uF7BE',
# 'Ta': '\u03C4',
# 'THacek': '\u0165',
# 'Therefore': '\u2234',
# 'Theta': '\u03B8',
# 'ThickSpace': '\u2005',
# 'ThinSpace': '\u2009',
# 'Thorn': '\u00FE',
# 'Tilde': '\u223C',
# 'TildeEqual': '\u2243',
# 'TildeFullEqual': '\u2245',
# 'TildeTilde': '\u2248',
# 'Times': '\u00D7',
# 'Trademark': '\u2122',
# 'Transpose': '\uF3C7',
# 'UAcute': '\u00FA',
# 'UDoubleAcute': '\u0171',
# 'UDoubleDot': '\u00FC',
# 'UGrave': '\u00F9',
# 'UHat': '\u00FB',
# 'UnderBrace': '\uFE38',
# 'UnderBracket': '\u23B5',
# 'UnderParenthesis': '\uFE36',
# 'Union': '\u22C3',
# 'UnionPlus': '\u228E',
# 'UpArrow': '\u2191',
# 'UpArrowBar': '\u2912',
# 'UpArrowDownArrow': '\u21C5',
# 'UpDownArrow': '\u2195',
# 'UpEquilibrium': '\u296E',
# 'UpperLeftArrow': '\u2196',
# 'UpperRightArrow': '\u2197',
# 'UpPointer': '\u25B4',
# 'Upsilon': '\u03C5',
# 'UpTee': '\u22A5',
# 'UpTeeArrow': '\u21A5',
# 'Uranus': '\u2645',
# 'URing': '\u016F',
# 'Vee': '\u22C1',
# 'Venus': '\u2640',
# 'VerticalBar': '\u2223',
# 'VerticalEllipsis': '\u22EE',
# 'VerticalLine': '\u2502',
# 'VerticalSeparator': '\uF432',
# 'VerticalTilde': '\u2240',
# 'VeryThinSpace': '\u200A',
# 'WarningSign': '\uF725',
# 'WatchIcon': '\u231A',
# 'Wedge': '\u22C0',
# 'WeierstrassP': '\u2118',
# 'WhiteBishop': '\u2657',
# 'WhiteKing': '\u2654',
# 'WhiteKnight': '\u2658',
# 'WhitePawn': '\u2659',
# 'WhiteQueen': '\u2655',
# 'WhiteRook': '\u2656',
# 'Wolf': '\uF720',
# 'Xi': '\u03BE',
# 'Xnor': '\uF4A2',
# 'Xor': '\u22BB',
# 'YAcute': '\u00FD',
# 'YDoubleDot': '\u00FF',
# 'Yen': '\u00A5',
# 'Zeta': '\u03B6',
# 'ZHacek': '\u017E',
}
aliased_characters = {
# "a'": '\u00E1',
# 'a-': '\u0101',
# 'a': '\u0103',
# 'a"': '\u00E4',
# 'ae': '\u00E6',
# 'a`': '\u00E0',
# 'a^': '\u00E2',
# 'al': '\u2135',
# 'esc': '\uF768',
# 'am': '\uF760',
# 'a': '\u03B1',
# 'alpha': '\u03B1',
# 'alt': '\uF7D1',
# '&&': '\u2227',
# 'and': '\u2227',
# 'Ang': '\u212B',
# 'ao': '\u00E5',
# 'a~': '\u00E3',
# '\\': '\u2216',
# 'be': '\u2136',
# 'b': '\u03B2',
# 'beta': '\u03B2',
# 'bv': '\u02D8',
# 'b': '\u2022',
# "c'": '\u0107',
# "A'": '\u00C1',
# 'A-': '\u0100',
# 'A': '\u0102',
# 'A"': '\u00C4',
# 'AE': '\u00C6',
# 'A`': '\u00C0',
# 'A^': '\u00C2',
# 'A': '\u0391',
# 'Alpha': '\u0391',
# 'Ao': '\u00C5',
# 'A~': '\u00C3',
# 'B': '\u0392',
# 'Beta': '\u0392',
# "C'": '\u0106',
# 'C,': '\u00C7',
# 'Cv': '\u010C',
# 'Ch': '\u03A7',
# 'Chi': '\u03A7',
# 'C': '\u03A7',
# 'D': '\u0394',
# 'Delta': '\u0394',
# 'Dv': '\u010E',
# 'DD': '\uF74B',
# 'Di': '\u03DC',
# 'Digamma': '\u03DC',
# "E'": '\u00C9',
# 'E-': '\u0112',
# 'E': '\u0114',
# 'E"': '\u00CB',
# 'E`': '\u00C8',
# 'Ev': '\u011A',
# 'E^': '\u00CA',
# 'E': '\u0395',
# 'Epsilon': '\u0395',
# 'Et': '\u0397',
# 'Eta': '\u0397',
# 'H': '\u0397',
# 'D-': '\u00D0',
# 'G': '\u0393',
# 'Gamma': '\u0393',
# "I'": '\u00CD',
# 'I': '\u012C',
# 'I"': '\u00CF',
# 'I`': '\u00CC',
# 'I^': '\u00CE',
# 'I': '\u0399',
# 'Iota': '\u0399',
# 'K': '\u039A',
# 'Kappa': '\u039A',
# 'Ko': '\u03DE',
# 'Koppa': '\u03DE',
# 'L': '\u039B',
# 'Lambda': '\u039B',
# 'L/': '\u0141',
# 'M': '\u039C',
# 'M': '\u039C',
# 'Nv': '\u0147',
# 'N~': '\u00D1',
# 'N': '\u039D',
# 'N': '\u039D',
# "O'": '\u00D3',
# "O''": '\u0150',
# 'O"': '\u00D6',
# 'OE': '\u0152',
# 'O`': '\u00D2',
# 'O^': '\u00D4',
# 'O': '\u03A9',
# 'Omega': '\u03A9',
# 'W': '\u03A9',
# 'Om': '\u039F',
# 'Omicron': '\u039F',
# 'O/': '\u00D8',
# 'O~': '\u00D5',
# 'Ph': '\u03A6',
# 'Phi': '\u03A6',
# 'F': '\u03A6',
# 'P': '\u03A0',
# 'Pi': '\u03A0',
# 'Ps': '\u03A8',
# 'Psi': '\u03A8',
# 'Y': '\u03A8',
# 'Rv': '\u0158',
# 'R': '\u03A1',
# 'Rho': '\u03A1',
# 'Sa': '\u03E0',
# 'Sampi': '\u03E0',
# 'Sv': '\u0160',
# 'S': '\u03A3',
# 'Sigma': '\u03A3',
# 'T': '\u03A4',
# 'Ta': '\u03A4',
# 'Tv': '\u0164',
# 'Th': '\u0398',
# 'Theta': '\u0398',
# 'Q': '\u0398',
# 'Thn': '\u00DE',
# "U'": '\u00DA',
# "U''": '\u0170',
# 'U"': '\u00DC',
# 'U`': '\u00D9',
# 'U^': '\u00DB',
# 'U': '\u03A5',
# 'Upsilon': '\u03A5',
# 'Uo': '\u016E',
# 'X': '\u039E',
# 'Xi': '\u039E',
# "Y'": '\u00DD',
# 'Z': '\u0396',
# 'Zeta': '\u0396',
# 'Zv': '\u017D',
# 'c,': '\u00E7',
# 'cd': '\u00B8',
# '.': '\u00B7',
# 'cent': '\u00A2',
# 'cv': '\u010D',
# 'ch': '\u03C7',
# 'chi': '\u03C7',
# 'c': '\u03C7',
# 'c.': '\u2299',
# 'c-': '\u2296',
# 'c+': '\u2295',
# 'c*': '\u2297',
# 'ccint': '\u2232',
# 'cl': '\u2318',
# ':': '\u2236',
# 'cmd': '\uF76A',
# '===': '\u2261',
# 'co': '\uF3C8',
# 'conj': '\uF3C8',
# 'ct': '\uF3C9',
# 'cont': '\uF3B1',
# 'cint': '\u222E',
# 'ctrl': '\uF763',
# 'coprod': '\u2210',
# 'cccint': '\u2233',
# 'cross': '\uF4A0',
# 'cU': '\u03D2',
# 'cUpsilon': '\u03D2',
# 'ce': '\u03B5',
# 'cepsilon': '\u03B5',
# 'ck': '\u03F0',
# 'ckappa': '\u03F0',
# 'j': '\u03C6',
# 'cph': '\u03C6',
# 'cphi': '\u03C6',
# 'cp': '\u03D6',
# 'cpi': '\u03D6',
# 'cr': '\u03F1',
# 'crho': '\u03F1',
# 'cq': '\u03D1',
# 'cth': '\u03D1',
# 'ctheta': '\u03D1',
# 'dg': '\u2020',
# 'da': '\u2138',
# '-': '\u2013',
# 'deg': '\u00B0',
# ' del': '\uF7D0',
# 'del': '\u2207',
# 'd': '\u03B4',
# 'delta': '\u03B4',
# 'dv': '\u010F',
# 'dia': '\u22C4',
# 'diffd': '\u2206',
# 'dd': '\uF74C',
# 'di': '\u03DD',
# 'digamma': '\u03DD',
# 'dratio': '\uF4A4',
# 'shift': '\uF4A3',
# 'dhy': '\u00AD',
# 'dlsep': '\uF76E',
# 'dpsep': '\uF76F',
# 'div': '\u00F7',
# '.=': '\u2250',
# 'ddg': '\u2021',
# 'gg': '\uF74A',
# 'pp': '\uF749',
# ' <=': '\u21D0',
# '<=>': '\u21D4',
# '<==': '\u27F8',
# '<==>': '\u27FA',
# '==>': '\u27F9',
# "''": '\u2033',
# ' =>': '\u21D2',
# 'dsa': '\uF6E6',
# 'dsb': '\uF6E7',
# 'dsc': '\uF6E8',
# 'dsA': '\uF7A4',
# 'dsB': '\uF7A5',
# 'dsC': '\uF7A6',
# 'dsD': '\uF7A7',
# 'dsE': '\uF7A8',
# 'dsF': '\uF7A9',
# 'dsG': '\uF7AA',
# 'dsH': '\uF7AB',
# 'dsI': '\uF7AC',
# 'dsJ': '\uF7AD',
# 'dsK': '\uF7AE',
# 'dsL': '\uF7AF',
# 'dsM': '\uF7B0',
# 'dsN': '\uF7B1',
# 'dsO': '\uF7B2',
# 'dsP': '\uF7B3',
# 'dsQ': '\uF7B4',
# 'dsR': '\uF7B5',
# 'dsS': '\uF7B6',
# 'dsT': '\uF7B7',
# 'dsU': '\uF7B8',
# 'dsV': '\uF7B9',
# 'dsW': '\uF7BA',
# 'dsX': '\uF7BB',
# 'dsY': '\uF7BC',
# 'dsZ': '\uF7BD',
# 'dsd': '\uF6E9',
# 'dse': '\uF6EA',
# 'ds8': '\uF7E3',
# 'dsf': '\uF6EB',
# 'ds5': '\uF7E0',
# 'ds4': '\uF7DF',
# 'dsg': '\uF6EC',
# 'dsh': '\uF6ED',
# 'dsi': '\uF6EE',
# 'dsj': '\uF6EF',
# 'dsk': '\uF6F0',
# 'dsl': '\uF6F1',
# 'dsm': '\uF6F2',
# 'dsn': '\uF6F3',
# 'ds9': '\uF7E4',
# 'dso': '\uF6F4',
# 'ds1': '\uF7DC',
# 'dsp': '\uF6F5',
# 'dsq': '\uF6F6',
# 'dsr': '\uF6F7',
# 'dss': '\uF6F8',
# 'ds7': '\uF7E2',
# 'ds6': '\uF7E1',
# 'dst': '\uF6F9',
# 'ds3': '\uF7DE',
# 'ds2': '\uF7DD',
# 'ds': '\uF6FA',
# 'dsv': '\uF6FB',
# 'dsw': '\uF6FC',
# 'dsx': '\uF6FD',
# 'dsy': '\uF6FE',
# 'dsz': '\uF6FF',
# 'ds0': '\uF7DB',
# ' ||': '\u2225',
# 'dbv': '\uF755',
# 'd!': '\u00A1',
# 'd?': '\u00BF',
# 'dT': '\u22A4',
# "e'": '\u00E9',
# 'e-': '\u0113',
# 'e': '\u0115',
# 'e"': '\u00EB',
# 'e`': '\u00E8',
# 'ev': '\u011B',
# 'e^': '\u00EA',
# 'el': '\u2208',
# 'elem': '\u2208',
# '...': '\u2026',
# 'eci': '\u25CB',
# 'es': '\u2205',
# 'esci': '\u25E6',
# 'essq': '\u25FB',
# 'esq': '\u25A1',
# 'ent': '\uF7D4',
# 'e': '\u03F5',
# 'epsilon': '\u03F5',
# '==': '\uF431',
# '=~': '\u2242',
# 'equi': '\u21CC',
# 'equiv': '\u29E6',
# ' esc': '\uF769',
# 'et': '\u03B7',
# 'eta': '\u03B7',
# 'h': '\u03B7',
# 'd-': '\u00F0',
# 'ex': '\u2203',
# 'ee': '\uF74D',
# 'fci': '\u25CF',
# 'fsci': '\uF750',
# 'fssq': '\u25FC',
# 'fsq': '\u25A0',
# 'fvssq': '\u25AA',
# 'fs': '\u03C2',
# '*5': '\u2605',
# 'fa': '\u2200',
# '$a': '\uF800',
# '$b': '\uF801',
# '$c': '\uF802',
# '$A': '\uF81A',
# '$B': '\uF81B',
# '$C': '\uF81C',
# '$D': '\uF81D',
# '$E': '\uF81E',
# '$F': '\uF81F',
# '$G': '\uF820',
# '$H': '\uF821',
# '$I': '\uF822',
# '$J': '\uF823',
# '$K': '\uF824',
# '$L': '\uF825',
# '$M': '\uF826',
# '$N': '\uF827',
# '$O': '\uF828',
# '$P': '\uF829',
# '$Q': '\uF82A',
# '$R': '\uF82B',
# '$S': '\uF82C',
# '$T': '\uF82D',
# '$U': '\uF82E',
# '$V': '\uF82F',
# '$W': '\uF830',
# '$X': '\uF831',
# '$Y': '\uF832',
# '$Z': '\uF833',
# '$d': '\uF803',
# '$e': '\uF804',
# '$f': '\uF805',
# '$g': '\uF806',
# '$h': '\uF807',
# '$i': '\uF808',
# '$j': '\uF809',
# '$k': '\uF80A',
# '$l': '\uF80B',
# '$m': '\uF80C',
# '$n': '\uF80D',
# '$o': '\uF80E',
# '$p': '\uF80F',
# '$q': '\uF810',
# '$r': '\uF811',
# '$s': '\uF812',
# '$t': '\uF813',
# '$': '\uF814',
# '$v': '\uF815',
# '$w': '\uF816',
# '$x': '\uF817',
# '$y': '\uF818',
# '$z': '\uF819',
# ':-@': '\uF721',
# 'fn': '\uF4A1',
# 'g': '\u03B3',
# 'gamma': '\u03B3',
# 'gi': '\u2137',
# 'goa': '\uF6CC',
# 'gob': '\uF6CD',
# 'goc': '\uF6CE',
# 'goA': '\uF78A',
# 'goB': '\uF78B',
# 'goC': '\u212D',
# 'goD': '\uF78D',
# 'goE': '\uF78E',
# 'goF': '\uF78F',
# 'goG': '\uF790',
# 'goH': '\u210C',
# 'goI': '\u2111',
# 'goJ': '\uF793',
# 'goK': '\uF794',
# 'goL': '\uF795',
# 'goM': '\uF796',
# 'goN': '\uF797',
# 'goO': '\uF798',
# 'goP': '\uF799',
# 'goQ': '\uF79A',
# 'goR': '\u211C',
# 'goS': '\uF79C',
# 'goT': '\uF79D',
# 'goU': '\uF79E',
# 'goV': '\uF79F',
# 'goW': '\uF7A0',
# 'goX': '\uF7A1',
# 'goY': '\uF7A2',
# 'goZ': '\u2128',
# 'god': '\uF6CF',
# 'goe': '\uF6D0',
# 'go8': '\uF7ED',
# 'gof': '\uF6D1',
# 'go5': '\uF7EA',
# 'go4': '\uF7E9',
# 'gog': '\uF6D2',
# 'goh': '\uF6D3',
# 'goi': '\uF6D4',
# 'goj': '\uF6D5',
# 'gok': '\uF6D6',
# 'gol': '\uF6D7',
# 'gom': '\uF6D8',
# 'gon': '\uF6D9',
# 'go9': '\uF7EF',
# 'goo': '\uF6DA',
# 'go1': '\uF7E6',
# 'gop': '\uF6DB',
# 'goq': '\uF6DC',
# 'gor': '\uF6DD',
# 'gos': '\uF6DE',
# 'go7': '\uF7EC',
# 'go6': '\uF7EB',
# 'got': '\uF6DF',
# 'go3': '\uF7E8',
# 'go2': '\uF7E7',
# 'go': '\uF6E0',
# 'gov': '\uF6E1',
# 'gow': '\uF6E2',
# 'gox': '\uF6E3',
# 'goy': '\uF6E4',
# 'goz': '\uF6E5',
# 'go0': '\uF7E5',
# 'gci': '\uF753',
# 'gsq': '\uF752',
# '>=': '\u2265',
# '>/': '\u2A7E',
# '>~': '\u2273',
# 'hck': '\u02C7',
# ':)': '\u263A',
# ':-)': '\u263A',
# 'hb': '\u210F',
# 'hc': '\uF3CE',
# 'hline': '\u2500',
# 'h=': '\u224F',
# "i'": '\u00ED',
# 'i': '\u012D',
# 'i"': '\u00EF',
# 'i`': '\u00EC',
# 'i^': '\u00EE',
# 'ii': '\uF74E',
# 'jj': '\uF74F',
# '+': '\uF39E',
# '=>': '\uF523',
# 'inf': '\u221E',
# 'int': '\u222B',
# 'inter': '\u22C2',
# '@': '\uF76D',
# ',': '\uF765',
# 'is': '\uF360',
# 'i': '\u03B9',
# 'iota': '\u03B9',
# 'k': '\u03BA',
# 'kappa': '\u03BA',
# 'ko': '\u03DF',
# 'koppa': '\u03DF',
# 'l': '\u03BB',
# 'lambda': '\u03BB',
# '<': '\u2329',
# '<-': '\u2190',
# 'l|': '\uF603',
# 'lc': '\u2308',
# '[[': '\u301A',
# 'l||': '\uF605',
# 'lf': '\u230A',
# 'g<<': '\u00AB',
# '[': '\uF76B',
# '<->': '\u2194',
# 'lT': '\u22A3',
# '<=': '\u2264',
# '</': '\u2A7D',
# '<~': '\u2272',
# '_': '\uF754',
# 'ls': '\uF754',
# '--': '\u2014',
# '<--': '\u27F5',
# '<-->': '\u27F7',
# '-->': '\u27F6',
# 'l/': '\u0142',
# 'math': '\uF757',
# ' ': '\u205F',
# 'mho': '\u2127',
# 'mi': '\u00B5',
# '-+': '\u2213',
# 'm': '\u03BC',
# 'm': '\u03BC',
# 'nand': '\u22BC',
# '- ': '\uF383',
# '- ': '\uF384',
# '- ': '\uF382',
# '- ': '\uF380',
# ':-|': '\uF722',
# 'nv': '\u0148',
# 'nb': '\u2060',
# 'nbs': '\u00A0',
# 'nor': '\u22BD',
# '!===': '\u2262',
# '!||': '\u2226',
# '!el': '\u2209',
# '!elem': '\u2209',
# '!=': '\u2260',
# '!=~': '\uF400',
# '!ex': '\u2204',
# '!>': '\u226F',
# '!>=': '\u2271',
# '!>/': '\uF429',
# '!>~': '\u2275',
# '!h=': '\uF401',
# '!<=': '\u2270',
# '!<': '\u226E',
# '!</': '\uF424',
# '!<~': '\u2274',
# '!': '\u00AC',
# 'not': '\u00AC',
# '!mem': '\u220C',
# '!sub': '\u2284',
# '!sub=': '\u2288',
# '!sup': '\u2285',
# '!sup=': '\u2289',
# '!~': '\u2241',
# '!~=': '\u2244',
# '!~==': '\u2247',
# '!~~': '\u2249',
# '!|': '\u2224',
# 'n~': '\u00F1',
# 'n': '\u03BD',
# 'n': '\u03BD',
# 'null': '\uF3A0',
# "o'": '\u00F3',
# "o''": '\u0151',
# 'o"': '\u00F6',
# 'oe': '\u0153',
# 'o`': '\u00F2',
# 'o^': '\u00F4',
# 'o': '\u03C9',
# 'omega': '\u03C9',
# 'w': '\u03C9',
# 'om': '\u03BF',
# 'omicron': '\u03BF',
# 'opt': '\uF7D2',
# '||': '\u2228',
# 'or': '\u2228',
# 'o/': '\u00F8',
# 'o~': '\u00F5',
# 'o{': '\uFE37',
# 'o[': '\u23B4',
# 'o(': '\uFE35',
# 'pd': '\u2202',
# 'ph': '\u03D5',
# 'phi': '\u03D5',
# 'f': '\u03D5',
# 'p': '\u03C0',
# 'pi': '\u03C0',
# 'pw': '\uF361',
# 'pl': '\uF528',
# '+-': '\u00B1',
# "'": '\u2032',
# 'prod': '\u220F',
# 'prop': '\u221D',
# 'ps': '\u03C8',
# 'psi': '\u03C8',
# 'y': '\u03C8',
# 'rtm': '\u00AE',
# 'ret': '\u21B5',
# ' ret': '\uF766',
# '``': '\u2036',
# 'mem': '\u220B',
# '`': '\u2035',
# 'rv': '\u0159',
# 'r': '\u03C1',
# 'rho': '\u03C1',
# '>': '\u232A',
# ' ->': '\u2192',
# 'r|': '\uF604',
# 'rc': '\u2309',
# ']]': '\u301B',
# 'r||': '\uF606',
# 'rf': '\u230B',
# 'g>>': '\u00BB',
# ']': '\uF76C',
# 'rT': '\u22A2',
# 'vec': '\u21C0',
# '->': '\uF522',
# ':>': '\uF51F',
# ':-(': '\u2639',
# 'sa': '\u03E0',
# 'sampi': '\u03E0',
# 'sca': '\uF6B2',
# 'scb': '\uF6B3',
# 'scc': '\uF6B4',
# 'scA': '\uF770',
# 'scB': '\u212C',
# 'scC': '\uF772',
# 'scD': '\uF773',
# 'scE': '\u2130',
# 'scF': '\u2131',
# 'scG': '\uF776',
# 'scH': '\u210B',
# 'scI': '\u2110',
# 'scJ': '\uF779',
# 'scK': '\uF77A',
# 'scL': '\u2112',
# 'scM': '\u2133',
# 'scN': '\uF77D',
# 'scO': '\uF77E',
# 'scP': '\u2118',
# 'scQ': '\uF780',
# 'scR': '\u211B',
# 'scS': '\uF782',
# 'scT': '\uF783',
# 'scU': '\uF784',
# 'scV': '\uF785',
# 'scW': '\uF786',
# 'scX': '\uF787',
# 'scY': '\uF788',
# 'scZ': '\uF789',
# 'scd': '\uF6B5',
# 'sce': '\u212F',
# 'sc8': '\uF7F8',
# 'scf': '\uF6B7',
# 'sc5': '\uF7F5',
# 'sc4': '\uF7F4',
# 'scg': '\u210A',
# 'sch': '\uF6B9',
# 'sci': '\uF6BA',
# 'scj': '\uF6BB',
# 'sck': '\uF6BC',
# 'scl': '\u2113',
# 'scm': '\uF6BE',
# 'scn': '\uF6BF',
# 'sc9': '\uF7F9',
# 'sco': '\u2134',
# 'sc1': '\uF7F1',
# 'scp': '\uF6C1',
# 'scq': '\uF6C2',
# 'scr': '\uF6C3',
# 'scs': '\uF6C4',
# 'sc7': '\uF7F7',
# 'sc6': '\uF7F6',
# 'sct': '\uF6C5',
# 'sc3': '\uF7F3',
# 'sc2': '\uF7F2',
# 'sc': '\uF6C6',
# 'scv': '\uF6C7',
# 'scw': '\uF6C8',
# 'scx': '\uF6C9',
# 'scy': '\uF6CA',
# 'scz': '\uF6CB',
# 'sc0': '\uF7F0',
# 'spl': '\uF527',
# 'sv': '\u0161',
# 's': '\u03C3',
# 'sigma': '\u03C3',
# '*6': '\u2736',
# 'sc': '\u2218',
# 'space': '\u2423',
# 'spc': '\uF7BF',
# 'sqrt': '\u221A',
# 'sq': '\uF520',
# 'star': '\u22C6',
# 'sti': '\u03DB',
# 'stigma': '\u03DB',
# 'sub': '\u2282',
# 'sub=': '\u2286',
# 'st': '\u220D',
# 'sum': '\u2211',
# 'sup': '\u2283',
# 'sup=': '\u2287',
# 'sz': '\u00DF',
# 'ss': '\u00DF',
# 'tab': '\uF7BE',
# 't': '\u03C4',
# 'ta': '\u03C4',
# 'tv': '\u0165',
# 'tf': '\u2234',
# 'th': '\u03B8',
# 'theta': '\u03B8',
# 'q': '\u03B8',
# ' ': '\u2005',
# ' ': '\u2009',
# 'thn': '\u00FE',
# '~': '\u223C',
# '~=': '\u2243',
# '~==': '\u2245',
# '~~': '\u2248',
# '*': '\u00D7',
# 'tm': '\u2122',
# 'tr': '\uF3C7',
# "'": '\u00FA',
# "''": '\u0171',
# '"': '\u00FC',
# 'u`': '\u00F9',
# 'u^': '\u00FB',
# 'u{': '\uFE38',
# 'u[': '\u23B5',
# 'u(': '\uFE36',
# 'un': '\u22C3',
# '': '\u03C5',
# 'upsilon': '\u03C5',
# 'uT': '\u22A5',
# 'uo': '\u016F',
# 'v': '\u22C1',
# ' |': '\u2223',
# 'vline': '\u2502',
# '|': '\uF432',
# ' ': '\u200A',
# '^': '\u22C0',
# 'wp': '\u2118',
# 'wf': '\uF720',
# 'wolf': '\uF720',
# 'x': '\u03BE',
# 'xi': '\u03BE',
# 'xnor': '\uF4A2',
# 'xor': '\u22BB',
# "y'": '\u00FD',
# 'z': '\u03B6',
# 'zeta': '\u03B6',
# 'zv': '\u017E',
}
|
|
"""
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <[email protected]>
This package is distributed under New BSD license.
"""
import numpy as np
import matplotlib.gridspec as gridspec
from smt.utils.neural_net.data import random_mini_batches
from smt.utils.neural_net.optimizer import Adam
from smt.utils.neural_net.activation import Tanh, Linear
from smt.utils.neural_net.bwd_prop import L_model_backward
from smt.utils.neural_net.fwd_prop import L_model_forward, L_grads_forward
from smt.utils.neural_net.loss import lse
from smt.utils.neural_net.metrics import rsquare
from smt.utils.neural_net.data import normalize_data, load_csv
# TODO: implement batch-norm (deeper networks might suffer from exploding/vanishing gradients during training)
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def initialize_parameters(layer_dims=None):
"""
Initialize neural network given topology using "He" initialization
:param: layer_dims: neural architecture [n_0, n_1, n_2, ..., n_L] where n = number of nodes, L = number of layer
:param: activation: the activation function to use: tanh, sigmoid, or relu (choice dependens on problem type)
:param: regression: True = regression problem (last layer will be linear)
False = classification problem (last layer will be sigmoid)
:return: parameters: dictionary containing the neural net parameters:
parameters["Wl"]: matrix of weights associated with layer l
parameters["bl"]: vector of biases associated with layer l
parameters["a1"]: activation function for each layer where: -1 -- linear activation
0 -- sigmoid activation
1 -- tanh activation
2 -- relu activation
"""
if not layer_dims:
raise Exception("Neural net does have any layers")
# Network topology
number_layers = len(layer_dims) - 1 # input layer doesn't count
# Parameters
parameters = {}
for l in range(1, number_layers + 1):
parameters["W" + str(l)] = np.random.randn(
layer_dims[l], layer_dims[l - 1]
) * np.sqrt(1.0 / layer_dims[l - 1])
parameters["b" + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
# ------------------------------------ C L A S S -----------------------------------------------------------------------
class Model(object):
@property
def number_of_inputs(self):
return self._n_x
@property
def number_of_outputs(self):
return self._n_y
@property
def number_training_examples(self):
return self._m
@property
def layer_dims(self):
return self._layer_dims
@property
def activations(self):
return self._activations
@property
def parameters(self):
return self._parameters
@property
def training_history(self):
return self._training_history
@property
def scale_factors(self):
return self._scale_factors
@property
def training_data(self):
X = self._X_norm * self._scale_factors["x"][1] + self._scale_factors["x"][0]
Y = self._Y_norm * self._scale_factors["y"][1] + self._scale_factors["y"][0]
J = self._J_norm * self._scale_factors["y"][1] / self._scale_factors["x"][1]
return X, Y, J
def __init__(self, **kwargs):
self._parameters = dict()
self._layer_dims = list()
self._activations = list()
self._training_history = dict()
self._scale_factors = {"x": (1, 1), "y": (1, 1)}
self._X_norm = None
self._Y_norm = None
self._J_norm = None
self._n_x = None
self._n_y = None
self._m = None
self._caches = list()
self._J_caches = list()
for name, value in kwargs.items():
setattr(self, name, value)
@classmethod
def initialize(cls, n_x=None, n_y=None, deep=2, wide=12):
layer_dims = [n_x] + [wide] * deep + [n_y]
parameters = initialize_parameters(layer_dims)
activations = [Tanh()] * deep + [Linear()]
attributes = {
"_parameters": parameters,
"_activations": activations,
"_layer_dims": layer_dims,
"_n_x": n_x,
"_n_y": n_y,
}
return cls(**attributes)
def load_parameters(self, parameters):
L = len(parameters) // 2
deep = L - 1
wide = parameters["W1"].shape[0]
self._n_x = parameters["W1"].shape[1]
self._n_y = parameters["W" + str(L)].shape[0]
self._layer_dims = [self._n_x] + [wide] * deep + [self._n_y]
self._activations = [Tanh()] * deep + [Linear()]
self._parameters = parameters
def train(
self,
X,
Y,
J=None,
num_iterations=100,
mini_batch_size=None,
num_epochs=1,
alpha=0.01,
beta1=0.9,
beta2=0.99,
lambd=0.0,
gamma=0.0,
seed=None,
silent=False,
):
"""
Train the neural network
:param X: matrix of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param Y: matrix of shape (n_y, m) where n_y = no. of outputs
:param J: tensor of size (n_y, n_x, m) representing the Jacobian: dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
Note: to retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
:param mini_batch_size: training data batches [batch_1, batch_2, ...] where batch_i = (X, Y, J)_i
:param num_epochs: number of random passes through the entire data set (usually only used with mini-batch)
:param alpha: learning rate
:param beta1: parameter for ADAM optimizer
:param beta2: parameter for ADAM optimizer
:param lambd: regularization parameter
:param gamma: gradient-enhancement parameter
:param num_iterations: maximum number of optimizer iterations (per mini batch)
:param seed: random seed in case user wants to ensure repeatability
:param silent: don't print anything
"""
self._load_training_data(X, Y, J)
if not mini_batch_size:
mini_batch_size = self.number_training_examples
if silent:
is_print = False
elif mini_batch_size != 1:
is_print = False
else:
is_print = True
for e in range(num_epochs):
self._training_history["epoch_" + str(e)] = dict()
mini_batches = random_mini_batches(
self._X_norm, self._Y_norm, self._J_norm, mini_batch_size, seed
)
for b, mini_batch in enumerate(mini_batches):
# Get training data from this mini-batch
X, Y, J = mini_batch
# Optimization (learn parameters by minimizing prediction error)
optimizer = Adam.initialize(
initial_guess=self._parameters,
cost_function=lambda p: self.cost(
p, self.activations, X, Y, J, lambd, gamma
),
grad_function=lambda p: self.grad(
p, self.activations, X, Y, J, lambd, gamma
),
learning_rate=alpha,
beta1=beta1,
beta2=beta2,
)
self._parameters = optimizer.optimize(
max_iter=num_iterations, is_print=is_print
)
# Compute average cost and print output
avg_cost = np.mean(optimizer.cost_history).squeeze()
self._training_history["epoch_" + str(e)][
"batch_" + str(b)
] = optimizer.cost_history
if not silent:
print(
"epoch = {:d}, mini-batch = {:d}, avg cost = {:6.3f}".format(
e, b, avg_cost
)
)
def evaluate(self, X):
"""
Predict output(s) given inputs X.
:param X: inputs to neural network, np array of shape (n_x, m) where n_x = no. inputs, m = no. training examples
:return: Y: prediction, Y = np array of shape (n_y, m) where n_y = no. of outputs and m = no. of examples
"""
assert X.shape[0] == self.number_of_inputs
number_of_examples = X.shape[1]
mu_x, sigma_x = self._scale_factors["x"]
mu_y, sigma_y = self._scale_factors["y"]
X_norm = (X - mu_x) / sigma_x
Y_norm, _ = L_model_forward(X_norm, self.parameters, self.activations)
Y = (Y_norm * sigma_y + mu_y).reshape(
self.number_of_outputs, number_of_examples
)
return Y
def print_parameters(self):
"""
Print model parameters to screen for the user
"""
for key, value in self._parameters.items():
try:
print("{}: {}".format(key, str(value.tolist())))
except:
print("{}: {}".format(key, value))
def print_training_history(self):
"""
Print model parameters to screen for the user
"""
if self._training_history:
for epoch, batches in self._training_history.items():
for batch, history in batches.items():
for iteration, cost in enumerate(history):
print(
"{}, {}, iteration_{}, cost = {}".format(
epoch, batch, iteration, cost
)
)
def plot_training_history(self, title="Training History", is_show_plot=True):
"""
Plot the convergence history of the neural network learning algorithm
"""
import matplotlib.pyplot as plt
if self.training_history:
if len(self.training_history.keys()) > 1:
x_label = "epoch"
y_label = "avg cost"
y = []
for epoch, batches in self.training_history.items():
avg_costs = []
for batch, values in batches.items():
avg_cost = np.mean(np.array(values))
avg_costs.append(avg_cost)
y.append(np.mean(np.array(avg_costs)))
y = np.array(y)
x = np.arange(len(y))
elif len(self.training_history["epoch_0"]) > 1:
x_label = "mini-batch"
y_label = "avg cost"
y = []
for batch, values in self.training_history["epoch_0"].items():
avg_cost = np.mean(np.array(values))
y.append(avg_cost)
y = np.array(y)
x = np.arange(y.size)
else:
x_label = "optimizer iteration"
y_label = "cost"
y = np.array(self.training_history["epoch_0"]["batch_0"])
x = np.arange(y.size)
plt.plot(x, y)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
if is_show_plot:
plt.show()
def _load_training_data(self, X, Y, J=None):
"""
Load and normalize training data
:param X: matrix of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param Y: matrix of shape (n_y, m) where n_y = no. of outputs
:param J: tensor of size (n_y, n_x, m) representing the Jacobian: dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
Note: to retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
"""
assert X.shape[1] == Y.shape[1]
assert Y.shape[0] == Y.shape[0]
assert X.shape[0] == self._n_x
assert Y.shape[0] == self._n_y
if J is not None:
assert X.shape[1] == J.shape[2]
assert X.shape[0] == J.shape[1]
X_norm, Y_norm, J_norm, mu_x, sigma_x, mu_y, sigma_y = normalize_data(X, Y, J)
self._X_norm = X_norm
self._Y_norm = Y_norm
self._J_norm = J_norm
self._scale_factors["x"] = (mu_x, sigma_x)
self._scale_factors["y"] = (mu_y, sigma_y)
self._n_x, self._m = X.shape
self._n_y = Y.shape[0]
def cost(
self,
parameters,
activations,
x,
y_true=None,
dy_true=None,
lambd=0.0,
gamma=0.0,
):
"""
Cost function for training
:param x:
:param parameters:
:param activations:
:param y_true:
:param dy_true:
:param lambd:
:param gamma:
:return:
"""
y_pred, caches = L_model_forward(x, parameters, activations)
dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
w = [value for name, value in parameters.items() if "W" in name]
cost = lse(y_true, y_pred, lambd, w, dy_true, dy_pred, gamma)
return cost
def grad(
self,
parameters,
activations,
x,
y_true=None,
dy_true=None,
lambd=0.0,
gamma=0.0,
):
"""
Gradient of cost function for training
:param x:
:param parameters:
:param activations:
:param y_true:
:param dy_true:
:param lambd:
:param gamma:
:return:
"""
y_pred, caches = L_model_forward(x, parameters, activations)
dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
grad = L_model_backward(
y_pred, y_true, dy_pred, dy_true, caches, dy_caches, lambd, gamma
)
return grad
def gradient(self, X):
"""
Predict output(s) given inputs X.
:param X: inputs to neural network, np array of shape (n_x, m) where n_x = no. inputs, m = no. training examples
:return: J: prediction, J = np array of shape (n_y, n_x, m) = Jacobian
"""
assert X.shape[0] == self.number_of_inputs
number_of_examples = X.shape[1]
mu_x, sigma_x = self._scale_factors["x"]
mu_y, sigma_y = self._scale_factors["y"]
X_norm = (X - mu_x) / sigma_x
Y_norm, _ = L_model_forward(X_norm, self.parameters, self.activations)
J_norm, _ = L_grads_forward(X_norm, self.parameters, self.activations)
J = (J_norm * sigma_y / sigma_x).reshape(
self.number_of_outputs, self.number_of_inputs, number_of_examples
)
return J
def goodness_of_fit(self, X_test, Y_test, J_test=None, response=0, partial=0):
import matplotlib.pyplot as plt
assert X_test.shape[1] == Y_test.shape[1]
assert Y_test.shape[0] == Y_test.shape[0]
assert X_test.shape[0] == self.number_of_inputs
assert Y_test.shape[0] == self.number_of_outputs
if type(J_test) == np.ndarray:
assert X_test.shape[1] == J_test.shape[2]
assert X_test.shape[0] == J_test.shape[1]
number_test_examples = Y_test.shape[1]
Y_pred_test = self.evaluate(X_test)
J_pred_test = self.gradient(X_test)
X_train, Y_train, J_train = self.training_data
Y_pred_train = self.evaluate(X_train)
J_pred_train = self.gradient(X_train)
if type(J_test) == np.ndarray:
test = J_test[response, partial, :].reshape((1, number_test_examples))
test_pred = J_pred_test[response, partial, :].reshape(
(1, number_test_examples)
)
train = J_train[response, partial, :].reshape(
(1, self.number_training_examples)
)
train_pred = J_pred_train[response, partial, :].reshape(
(1, self.number_training_examples)
)
title = "Goodness of fit for dY" + str(response) + "/dX" + str(partial)
else:
test = Y_test[response, :].reshape((1, number_test_examples))
test_pred = Y_pred_test[response, :].reshape((1, number_test_examples))
train = Y_train[response, :].reshape((1, self.number_training_examples))
train_pred = Y_pred_train[response, :].reshape(
(1, self.number_training_examples)
)
title = "Goodness of fit for Y" + str(response)
metrics = dict()
metrics["R_squared"] = np.round(rsquare(test_pred, test), 2).squeeze()
metrics["std_error"] = np.round(
np.std(test_pred - test).reshape(1, 1), 2
).squeeze()
metrics["avg_error"] = np.round(
np.mean(test_pred - test).reshape(1, 1), 2
).squeeze()
# Reference line
y = np.linspace(
min(np.min(test), np.min(train)), max(np.max(test), np.max(train)), 100
)
# Prepare to plot
fig = plt.figure(figsize=(12, 6))
fig.suptitle(title, fontsize=16)
spec = gridspec.GridSpec(ncols=2, nrows=1, wspace=0.25)
# Plot
ax1 = fig.add_subplot(spec[0, 0])
ax1.plot(y, y)
ax1.scatter(test, test_pred, s=20, c="r")
ax1.scatter(train, train_pred, s=100, c="k", marker="+")
plt.legend(["perfect", "test", "train"])
plt.xlabel("actual")
plt.ylabel("predicted")
plt.title("RSquare = " + str(metrics["R_squared"]))
ax2 = fig.add_subplot(spec[0, 1])
error = (test_pred - test).T
weights = np.ones(error.shape) / test_pred.shape[1]
ax2.hist(error, weights=weights, facecolor="g", alpha=0.75)
plt.xlabel("Absolute Prediction Error")
plt.ylabel("Probability")
plt.title(
"$\mu$="
+ str(metrics["avg_error"])
+ ", $\sigma=$"
+ str(metrics["std_error"])
)
plt.grid(True)
plt.show()
return metrics
def run_example(
train_csv, test_csv, inputs, outputs, partials=None
): # pragma: no cover
"""
Example using 2D Rastrigin function (egg-crate-looking function)
usage: test_model(train_csv='train_data.csv',
test_csv='train_data.csv',
inputs=["X[0]", "X[1]"],
outputs=["Y[0]"],
partials=[["J[0][0]", "J[0][1]"]])
:param train_csv: str, csv file name containing training data
:param test_csv: str, csv file name containing test data
:param inputs: list(str), csv column labels corresponding to inputs
:param outputs: list(str), csv column labels corresponding to outputs
:param partials: list(str), csv column labels corresponding to partials
"""
# Sample data
X_train, Y_train, J_train = load_csv(
file=train_csv, inputs=inputs, outputs=outputs, partials=partials
)
X_test, Y_test, J_test = load_csv(
file=test_csv, inputs=inputs, outputs=outputs, partials=partials
)
# Hyper-parameters
alpha = 0.05
beta1 = 0.90
beta2 = 0.99
lambd = 0.1
gamma = 1.0
deep = 2
wide = 12
mini_batch_size = None # None = use all data as one batch
num_iterations = 25
num_epochs = 50
# Training
model = Model.initialize(
n_x=X_train.shape[0], n_y=Y_train.shape[0], deep=deep, wide=wide
)
model.train(
X=X_train,
Y=Y_train,
J=J_train,
alpha=alpha,
lambd=lambd,
gamma=gamma,
beta1=beta1,
beta2=beta2,
mini_batch_size=mini_batch_size,
num_iterations=num_iterations,
num_epochs=num_epochs,
silent=False,
)
model.plot_training_history()
model.goodness_of_fit(
X_test, Y_test
) # model.goodness_of_fit(X_test, Y_test, J_test, partial=1)
if __name__ == "__main__": # pragma: no cover
run_example(
train_csv="train_data.csv",
test_csv="train_data.csv",
inputs=["X[0]", "X[1]"],
outputs=["Y[0]"],
partials=[["J[0][0]", "J[0][1]"]],
)
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internet Group Management Protocol(IGMP) packet parser/serializer
RFC 1112
IGMP v1 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| Type | Unused | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 2236
IGMP v2 format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Max Resp Time | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
RFC 3376
IGMP v3 Membership Query format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x11 | Max Resp Code | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Group Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Resv |S| QRV | QQIC | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- . -+
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
IGMP v3 Membership Report format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 0x22 | Reserved | Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Number of Group Records (M) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [1] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [2] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
. . .
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Group Record [M] .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
where each Group Record has the following internal format:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Record Type | Aux Data Len | Number of Sources (N) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Multicast Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address [1] |
+- -+
| Source Address [2] |
+- -+
. . .
. . .
. . .
+- -+
| Source Address [N] |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
. .
. Auxiliary Data .
. .
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
import struct
from ryu.lib import addrconv
from ryu.lib import stringify
from ryu.lib.packet import packet_base
from ryu.lib.packet import packet_utils
IGMP_TYPE_QUERY = 0x11
IGMP_TYPE_REPORT_V1 = 0x12
IGMP_TYPE_REPORT_V2 = 0x16
IGMP_TYPE_LEAVE = 0x17
IGMP_TYPE_REPORT_V3 = 0x22
QUERY_RESPONSE_INTERVAL = 10.0
LAST_MEMBER_QUERY_INTERVAL = 1.0
MULTICAST_IP_ALL_HOST = '224.0.0.1'
MULTICAST_MAC_ALL_HOST = '01:00:5e:00:00:01'
# for types of IGMPv3 Report Group Records
MODE_IS_INCLUDE = 1
MODE_IS_EXCLUDE = 2
CHANGE_TO_INCLUDE_MODE = 3
CHANGE_TO_EXCLUDE_MODE = 4
ALLOW_NEW_SOURCES = 5
BLOCK_OLD_SOURCES = 6
class igmp(packet_base.PacketBase):
"""
Internet Group Management Protocol(IGMP, RFC 1112, RFC 2236)
header encoder/decoder class.
http://www.ietf.org/rfc/rfc1112.txt
http://www.ietf.org/rfc/rfc2236.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v2, or a combination of
version and a message type for v1.
maxresp max response time in unit of 1/10 second. it is
meaningful only in Query Message.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'address'
]
}
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=0, csum=0,
address='0.0.0.0'):
super(igmp, self).__init__()
self.msgtype = msgtype
self.maxresp = maxresp
self.csum = csum
self.address = address
@classmethod
def parser(cls, buf):
assert cls._MIN_LEN <= len(buf)
(msgtype, ) = struct.unpack_from('!B', buf)
if (IGMP_TYPE_QUERY == msgtype and
igmpv3_query.MIN_LEN <= len(buf)):
(instance, subclass, rest,) = igmpv3_query.parser(buf)
elif IGMP_TYPE_REPORT_V3 == msgtype:
(instance, subclass, rest,) = igmpv3_report.parser(buf)
else:
(msgtype, maxresp, csum, address
) = struct.unpack_from(cls._PACK_STR, buf)
instance = cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address))
subclass = None
rest = buf[cls._MIN_LEN:]
return instance, subclass, rest
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(self._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address)))
if self.csum == 0:
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
class igmpv3_query(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Query message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
maxresp max response time in unit of 1/10 second.
csum a check sum value. 0 means automatically-calculate
when encoding.
address a group address value.
s_flg when set to 1, routers suppress the timer process.
qrv robustness variable for a querier.
qqic an interval time for a querier in unit of seconds.
num a number of the multicast servers.
srcs a list of IPv4 addresses of the multicast servers.
=============== ====================================================
"""
_PACK_STR = '!BBH4sBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
MIN_LEN = _MIN_LEN
def __init__(self, msgtype=IGMP_TYPE_QUERY, maxresp=100, csum=0,
address='0.0.0.0', s_flg=0, qrv=2, qqic=0, num=0,
srcs=None):
super(igmpv3_query, self).__init__(
msgtype, maxresp, csum, address)
self.s_flg = s_flg
self.qrv = qrv
self.qqic = qqic
self.num = num
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
@classmethod
def parser(cls, buf):
(msgtype, maxresp, csum, address, s_qrv, qqic, num
) = struct.unpack_from(cls._PACK_STR, buf)
s_flg = (s_qrv >> 3) & 0b1
qrv = s_qrv & 0b111
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
return (cls(msgtype, maxresp, csum,
addrconv.ipv4.bin_to_text(address), s_flg, qrv,
qqic, num, srcs),
None,
buf[offset:])
def serialize(self, payload, prev):
s_qrv = self.s_flg << 3 | self.qrv
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
s_qrv, self.qqic, self.num))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 10, self.num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return str(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4
class igmpv3_report(igmp):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
msgtype a message type for v3.
csum a check sum value. 0 means automatically-calculate
when encoding.
record_num a number of the group records.
records a list of ryu.lib.packet.igmp.igmpv3_report_group.
None if no records.
=============== ====================================================
"""
_PACK_STR = '!BxH2xH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_class_prefixes = ['igmpv3_report_group']
def __init__(self, msgtype=IGMP_TYPE_REPORT_V3, csum=0, record_num=0,
records=None):
self.msgtype = msgtype
self.csum = csum
self.record_num = record_num
records = records or []
assert isinstance(records, list)
for record in records:
assert isinstance(record, igmpv3_report_group)
self.records = records
@classmethod
def parser(cls, buf):
(msgtype, csum, record_num
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
records = []
while 0 < len(buf[offset:]) and record_num > len(records):
record = igmpv3_report_group.parser(buf[offset:])
records.append(record)
offset += len(record)
assert record_num == len(records)
return (cls(msgtype, csum, record_num, records),
None,
buf[offset:])
def serialize(self, payload, prev):
buf = bytearray(struct.pack(self._PACK_STR, self.msgtype,
self.csum, self.record_num))
for record in self.records:
buf.extend(record.serialize())
if 0 == self.record_num:
self.record_num = len(self.records)
struct.pack_into('!H', buf, 6, self.record_num)
if 0 == self.csum:
self.csum = packet_utils.checksum(buf)
struct.pack_into('!H', buf, 2, self.csum)
return str(buf)
def __len__(self):
records_len = 0
for record in self.records:
records_len += len(record)
return self._MIN_LEN + records_len
class igmpv3_report_group(stringify.StringifyMixin):
"""
Internet Group Management Protocol(IGMP, RFC 3376)
Membership Report Group Record message encoder/decoder class.
http://www.ietf.org/rfc/rfc3376.txt
This is used with ryu.lib.packet.igmp.igmpv3_report.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte
order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
=============== ====================================================
Attribute Description
=============== ====================================================
type\_ a group record type for v3.
aux_len the length of the auxiliary data.
num a number of the multicast servers.
address a group address value.
srcs a list of IPv4 addresses of the multicast servers.
aux the auxiliary data.
=============== ====================================================
"""
_PACK_STR = '!BBH4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, type_=0, aux_len=0, num=0, address='0.0.0.0',
srcs=None, aux=None):
self.type_ = type_
self.aux_len = aux_len
self.num = num
self.address = address
srcs = srcs or []
assert isinstance(srcs, list)
for src in srcs:
assert isinstance(src, str)
self.srcs = srcs
self.aux = aux
@classmethod
def parser(cls, buf):
(type_, aux_len, num, address
) = struct.unpack_from(cls._PACK_STR, buf)
offset = cls._MIN_LEN
srcs = []
while 0 < len(buf[offset:]) and num > len(srcs):
assert 4 <= len(buf[offset:])
(src, ) = struct.unpack_from('4s', buf, offset)
srcs.append(addrconv.ipv4.bin_to_text(src))
offset += 4
assert num == len(srcs)
aux = None
if aux_len:
(aux, ) = struct.unpack_from('%ds' % (aux_len * 4), buf, offset)
return cls(type_, aux_len, num,
addrconv.ipv4.bin_to_text(address), srcs, aux)
def serialize(self):
buf = bytearray(struct.pack(self._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address)))
for src in self.srcs:
buf.extend(struct.pack('4s', addrconv.ipv4.text_to_bin(src)))
if 0 == self.num:
self.num = len(self.srcs)
struct.pack_into('!H', buf, 2, self.num)
if self.aux is not None:
mod = len(self.aux) % 4
if mod:
self.aux += bytearray(4 - mod)
self.aux = str(self.aux)
buf.extend(self.aux)
if 0 == self.aux_len:
self.aux_len = len(self.aux) / 4
struct.pack_into('!B', buf, 1, self.aux_len)
return str(buf)
def __len__(self):
return self._MIN_LEN + len(self.srcs) * 4 + self.aux_len * 4
|
|
'''
GPIB adapter
PROLOGIX GPIB-USB CONTROLLER
REV 6.4.1
http://prologix.biz/getfile?attachment_id=2
'''
from uvscada.aserial import ASerial
import serial
class Timeout(Exception):
pass
class ShortRead(Exception):
pass
'''
*********************************
GPIB
*********************************
In Controller and Device modes, characters received over USB port are aggregated in an
internal buffer and interpreted when a USB termination character - CR (ASCII 13) or
LF (ASCII 10) - is received. If CR, LF, ESC (ASCII 27), or '+' (ASCII 43) characters are
part of USB data they must be escaped by preceding them with an ESC character. All
un-escaped LF, CR and ESC and '+' characters in USB data are discarded.
Serial port parameters such as baud rate, data bits,
stop bits and flow control do not matter and may be set to any value
'''
class PUGpib:
def __init__(self, port="/dev/ttyUSB0", ser_timeout=1.0, gpib_timeout=0.9, addr=5, clr=True, eos=0):
self.port = port
self.addr = addr
self.ser = ASerial(port,
# They claim this parameter is ignored
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
xonxoff=False,
timeout=ser_timeout,
# Blocking writes
writeTimeout=None)
self.bin = False
# Clear any previous partial command
#self.send_str('')
# Clear any data laying around
self.ser.flushInput()
self.ser.flushOutput()
self.set_addr(addr)
if clr:
self.send_str('++clr')
# Will generate a bunch of interrupted errors if you don't set this (default 1)
self.send_str('++auto 0')
'''
++eos 0 Append CR+LF to instrument commands (appears to be default)
++eos 1 Append CR to instrument commands
++eos 2 Append LF to instrument commands
++eos 3 Do not append anything to instrument commands
++eos Query current EOS state
'''
self.send_str('++eos %d' % eos)
self.send_str('++read_tmo_ms %d' % (gpib_timeout * 1000,))
# Make sure simple queries work
self.version()
def set_addr(self, addr):
self.addr = addr
self.send_str("++addr %d" % (self.addr,))
def interface(self):
return "GPIB @ %s" % (self.port,)
def bin_mode(self):
self.bin = True
# disable cr/lf
self.send_str("++eos 3")
#self.send_str("++eot_enable 1")
# default: 0
#self.send_str("++eot_char 0")
def snd(self, *args, **kwargs):
self.send_str(*args, **kwargs)
def send_str(self, s):
#dbg('Sending "%s"' % (s))
'''
With EOT on should not be needed
for c in '\r\n+\x1b':
s = s.replace(c, '\x1b' + c)
'''
'''
Special care must be taken when sending binary data to instruments. If any of the
following characters occur in the binary data -- CR (ASCII 13 0x0D), LF (ASCII 10 0x0A), ESC
(ASCII 27 0x1B), '+' (ASCII 43 0x2B) - they must be escaped by preceding them with an ESC
character
'''
if self.bin:
for c in '\x1b\x2b\x0d\x0a':
s = s.replace(c, '\x1b' + c)
self.ser.writea(s + "\n")
# FIXME: flow control deadlock can get us stuck here
# need some sort of timeout mechanism
self.ser.flush()
def rcv(self, *args, **kwargs):
return self.recv_str(*args, **kwargs)
def recv_str(self, l=1024, empty=False, short=True):
self.ser.writea('++read eoi\n')
self.ser.flush()
if self.bin:
print("read() begin")
s = self.ser.reada(l)
else:
print("readline() begin")
s = self.ser.readlinea()
assert type(s) is str, type(s)
if not s and not empty:
raise Timeout('Failed recv any bytes')
if self.bin and short and len(s) != l:
raise ShortRead()
if not self.bin:
s = s.rstrip()
#print 'DBG: received "%s"' % (s)
return s
'''
You can set the GPIB address from the front panel only.
++read_tmo_ms 3000
++addr 5
*RST
SYSTEM:VERSION?
SYSTEM:ERROR?
++read eoi
++read 10
-410: Query INTERRUPTED
A command was received which sends data to the output buffer, but the output buffer contained data
from a previous command (the previous data is not overwritten). The output buffer is cleared when
power has been turned off, or after a *RST (reset) command has been executed.
'''
def snd_rcv(self, *args, **kwargs):
return self.sendrecv_str(*args, **kwargs)
def sendrecv_str(self, s, l=1024, empty=False, short=True):
self.send_str(s)
return self.recv_str(l=l, empty=empty, short=short)
def sendrecv_astr(self, s, empty=False):
'''Send receive adapter string. No ++read is required'''
self.send_str(s)
# wait for response line
s = self.ser.readlinea()
if not s and not empty:
raise Timeout('Failed recv')
s = s.rstrip()
#print 'received "%s"' % (s)
return s
def version(self):
return self.sendrecv_astr('++ver')
def dump_config(self):
'''
Having problem with a few GPIB adapters, reviewing all NVM to see what is different
If enabled, the following configuration parameters are saved whenever they are
updated - mode, addr, auto, eoi, eos, eot_enable, eot_char and read_tmo_ms.
'''
print('versions: %s' % self.version())
print('versions: %s' % self.sendrecv_astr("++ver"))
for cmd in ('mode', 'addr', 'auto', 'eoi', 'eos', 'eot_enable', 'eot_char', 'read_tmo_ms'):
print('%s: %s' % (cmd, self.sendrecv_astr("++%s" % cmd)))
def local(self):
self.send_str('++loc')
'''
only works as device
really want below
def status(self):
return self.snd_rcv('++status')
'''
def spoll(self):
return int(self.snd_rcv('++spoll'))
|
|
### BEGIN LICENSE ###
### Use of the triage tools and related source code is subject to the terms
### of the license below.
###
### ------------------------------------------------------------------------
### Copyright (C) 2011 Carnegie Mellon University. All Rights Reserved.
### Portions Copyright 2013 BlackBerry Ltd. All Rights Reserved.
### ------------------------------------------------------------------------
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions are
### met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following acknowledgments
### and disclaimers.
###
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
###
### 3. The names "Department of Homeland Security," "Carnegie Mellon
### University," "CERT" and/or "Software Engineering Institute" shall
### not be used to endorse or promote products derived from this software
### without prior written permission. For written permission, please
### contact [email protected].
###
### 4. Products derived from this software may not be called "CERT" nor
### may "CERT" appear in their names without prior written permission of
### [email protected].
###
### 5. Redistributions of any form whatsoever must retain the following
### acknowledgment:
###
### "This product includes software developed by CERT with funding
### and support from the Department of Homeland Security under
### Contract No. FA 8721-05-C-0003."
###
### THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
### CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
### EXPRESS OR IMPLIED, AS TO ANY MATTER, AND ALL SUCH WARRANTIES, INCLUDING
### WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
### EXPRESSLY DISCLAIMED. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING,
### CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
### RELATING TO EXCLUSIVITY, INFORMATIONAL CONTENT, ERROR-FREE OPERATION,
### RESULTS TO BE OBTAINED FROM USE, FREEDOM FROM PATENT, TRADEMARK AND
### COPYRIGHT INFRINGEMENT AND/OR FREEDOM FROM THEFT OF TRADE SECRETS.
### END LICENSE ###
'''
A collection of objects used to classify GDB Inferiors (Targets).
'''
import copy
import warnings, traceback
from functools import partial
import sys
import lib.rules as rules
from lib.tools import AttrDict
class Tag(object):
'''
A Tag used for classification. A Tag is a partial description of the
state of a GDB Inferior. Tags can be ordered by ranking.
'''
def __init__(self, tag_dict):
self.__dict__ = tag_dict
# for python3
def __lt__(self, other):
if type(other) != type(self):
raise TypeError("cannot compare type %s to type %s" % \
(type(other),type(self)))
return self.ranking[0] < other.ranking[0]
def __cmp__(self, other):
if not issubclass(type(other), type(self)):
raise TypeError("cannot compare type {} to type {}".format(type(other), type(self)))
return self.ranking[0] - other.ranking[0]
def __str__(self):
return "{0} ({1[0]:d}/{1[1]:d})".format(self.short_desc, self.ranking)
class Classification(AttrDict):
'''
A classification of a state of a GDB Inferior. Describes how exploitable
the current state of the GDB Inferior is.
An instance of this object is returned by a Classifier.
'''
def __init__(self, target):
AttrDict.__init__(self)
self.tags = []
def __add__(self, tag):
if not issubclass(type(tag), Tag):
raise TypeError("cannot add type {} to type {}".format(type(tag), type(self)))
self.tags.append(tag)
self.tags.sort()
for k, v in self.tags[0].__dict__.items():
self[k] = v
return self
# for python3
def __lt__(self, other):
if not issubclass(type(other), type(self)):
raise TypeError("cannot compare type {} to type {}".format(type(other), type(self)))
if len(self.tags) == 0 or len(other.tags) == 0:
return len(self.tags) < len(other.tags)
i = 0
while i < len(self.tags) and i < len(other.tags):
if self.tags[i] < other.tags[i]:
return True
i += 1
return False
def __cmp__(self, other):
if not issubclass(type(other), type(self)):
raise TypeError("cannot compare type {} to type {}".format(type(other), type(self)))
if len(self.tags) == 0 or len(other.tags) == 0:
return len(self.tags) - len(other.tags)
i = 0
while i < len(self.tags) and i < len(other.tags):
result = cmp(self.tags[i], other.tags[i])
if result:
return result
i += 1
return result
def __str__(self):
if not self.tags:
return "No matches"
result = ["Description: {}".format(self.desc),
"Short description: {}".format(self.tags[0]),
"Hash: {}.{}".format(self.hash.major, self.hash.minor),
"Exploitability Classification: {}".format(self.category),
"Explanation: {}".format(self.explanation)]
if len(self.tags) > 1:
result.append("Other tags: {}".format(
", ".join(str(r) for r in self.tags[1:])))
result.append("")
return "\n".join(result)
@staticmethod
def getMachineString(self):
'''
Returns a machine-parsable string representation of this
Classification.
NOTE: This code was designed by a specific user and hasn't
been fully tested since it was contributed.
'''
if not self.tags:
return "No matches"
result = []
#result.append("IDENTITY:{:s}")
result.append("PROCESSOR:{}".format(self.target.arch.upper())) # X86/X64/ARM/UNKNOWN
#result.append("CLASS:{:s}" # KERNEL/USER)
#result.append("QUALIFIER:{:s}") # KERNEL_PROCESS/KERNEL_PROCESS_REMOTE/
# KERNEL_SMALL_DUMP/KERNEL_DUMP/KERNEL_FULL_DUMP/
# USER_PROCESS/USER_PROCESS_REMOTE/
# USER_SMALL_DUMP/USER_DUMP
#result.append("CLASS:UNINITIALIZED") # debugger is uninit
#result.append("EVENT:{:s}") # DEBUG_EVENT_*
# BREAKPOINT, EXCEPTION, CREATE_THREAD,
# EXIT_THREAD, CREATE_PROCESS, EXIT_PROCESS,
# LOAD_MODULE, UNLOAD_MODULE, SYSTEM_ERROR
#result.append("EXCEPTION_FAULTING_ADDRESS:{:#16.16X}")
#result.append("EXCEPTION_CODE:{:#X}")
#result.append("EXCEPTION_LEVEL:{:s}") # FIRST_CHANCE/SECOND_CHANCE
#result.append("EXCEPTION_TYPE:{:s}")
#result.append("EXCEPTION_SUBTYPE:{:s}") # READ/WRITE/DEP
try:
result.append("FAULTING_INSTRUCTION:{}".format(str(self.target.current_instruction()).split(":\t")[1].strip()))
except IndexError:
result.append("FAULTING_INSTRUCTION:?")
result.append("MAJOR_HASH:{}".format(self.hash.major))
result.append("MINOR_HASH:{}".format(self.hash.minor))
bt_result = ["STACK_FRAME:{}".format(i.terse()) for i in self.target.backtrace() if i.type() != 2]
result.append("STACK_DEPTH:{}".format(len(bt_result)))
result.extend(bt_result)
result.append("INSTRUCTION_ADDRESS:{:#016x}".format(self.target.pc()))
try:
result.append("INVOKING_STACK_FRAME:{}".format(self.target.faulting_frame().position))
except AttributeError:
result.append("INVOKING_STACK_FRAME:?")
result.append("DESCRIPTION:{}".format(self.desc))
result.append("SHORT_DESCRIPTION:{}".format(self.tags[0]))
if len(self.tags) > 1:
result.append("OTHER_RULES:{}".format(", ".join(str(t) for t in self.tags[1:])))
result.append("CLASSIFICATION:{}".format(self.category))
#result.append("BUG_TITLE:{} - {} starting at {} (Hash={:#08x}.{:#08x})".format(
# " ".join(w.capitalize() for w in self.category.split("_")),
# self.desc, addr, self.hash.major, self.hash.minor))
result.append("EXPLANATION:{}".format(self.explanation))
#result.append("URL:{}")
result.append("")
return "\n".join(result)
class Classifier(object):
'''
A Classifier used for classifying the state of a Target (a Linux GDB
Inferior).
'''
_major_hash_depth = 5
def getRules(self, target):
'''
Organizes the nested list of rules (dicts) for classification
The rules specified in rules.py are organized into AttrDicts ("rules").
Each rule is composed of a tag and a match_function.
'''
processed_rules = []
num_rules = sum(len(rl) for (_, rl) in rules.rules)
ranking = 1
for cat, user_rule_list in rules.rules:
for user_rule in user_rule_list:
match_function = partial(getattr(target.analyzer, user_rule["match_function"]))
tag_data = copy.deepcopy(user_rule)
del tag_data["match_function"]
tag_data["ranking"] = (ranking, num_rules)
tag_data["category"] = cat
rule = AttrDict(matches=match_function, tag=Tag(tag_data))
processed_rules.append(rule)
ranking += 1
return processed_rules
def getClassification(self, target):
'''
Returns the Classification of target, which is a Classification of the
exploitability of a Linux GDB Inferior.
'''
c = Classification(target)
for rule in self.getRules(target):
try:
match = rule.matches()
if match:
c += rule.tag
except Exception as e:
warnings.warn("Error while analyzing rule {}: {}\n".format(
rule.tag, e))
c.hash = target.hash()
c['tags'] = [str(t) for t in c.tags]
return c
|
|
#!/usr/bin/env python
import csv
import gzip
import json
import logging
import os
import re
import shutil
import socket
import sys
import tempfile
from collections import defaultdict
from datetime import datetime
from operator import attrgetter
import boto3
REGION_NAMES = {
"US East (N. Virginia)": "us-east-1",
"US West (N. California)": "us-west-1",
"US West (Oregon)": "us-west-2",
"EU (Ireland)": "eu-west-1",
"EU (Frankfurt)": "eu-central-1",
"Asia Pacific (Tokyo)": "ap-northeast-1",
"Asia Pacific (Seoul)": "ap-northeast-2",
"Asia Pacific (Singapore)": "ap-southeast-1",
"Asia Pacific (Sydney)": "ap-southeast-2",
"South America (Sao Paulo)": "sa-east-1",
}
EBS_TYPES = {
"Magnetic": "standard",
"General Purpose": "gp2",
"Provisioned IOPS": "io1",
"Unknown Storage": "unknown"
}
# As of 2016-09-01, the hourly billing report doesn't have data in the
# 'product/volumeType' column for RDS storage anymore. We have to check
# for a substring of 'lineItem/LineItemDescription' instead.
RDS_STORAGE_TYPES = {
"Provisioned IOPS Storage": "io1",
"provisioned GP2 storage": "gp2",
}
def parse_datetime(timestamp):
"""Parses a timestamp in the format 2006-01-02T15:04:05Z."""
# This way is about 31x faster than arrow.get()
# and 6.5x faster than datetime.strptime()
year = int(timestamp[0:4])
month = int(timestamp[5:7])
day = int(timestamp[8:10])
hour = int(timestamp[11:13])
minute = int(timestamp[14:16])
second = int(timestamp[17:19])
return datetime(year, month, day, hour, minute, second)
def open_csv(tempdir, region_name):
"""Opens the latest hourly billing CSV file. Returns an open file object.
Depending on the AWSBILL_REPORT_PATH environment variable,
this may involve
downloading from S3, or it may just open a local file."""
report_path = os.getenv("AWSBILL_REPORT_PATH")
if report_path.startswith("file://"):
csv_path = report_path[len("file://"):]
elif report_path.startswith("s3://"):
csv_path = download_latest_from_s3(report_path, tempdir, region_name)
else:
raise ValueError("AWSBILL_REPORT_PATH environment variable must start with 'file://' or 's3://'") # noqa
return open(csv_path)
def open_output():
"""Opens the file-like object that will be used for output, and returns it.
Depending on the AWSBILL_GRAPHITE_HOST environment variable,
writes to this object may be sent to a Graphite
server or they may be written to stdout."""
output_host = os.getenv("AWSBILL_GRAPHITE_HOST")
if output_host is None:
raise ValueError("AWSBILL_GRAPHITE_HOST environment variable must specify the output destination; you may use 'stdout' to print metrics to stdout") # noqa
elif output_host == "stdout":
output_file = sys.stdout
else:
output_port = 2003
if ":" in output_host:
output_port = int(output_host.split(":", 1)[1])
output_host = output_host.split(":", 1)[0]
output_file = SocketWriter(output_host, output_port)
return output_file
def s3_primary_manifests(objects):
"""Returns the S3 object(s) corresponding to the relevant primary manifests
The relevant ones are considered to be the second-most- and most recent
ones, and they are returned in that order. If there are no billing
cycles older than the most recent, we return a single-element list with
only the most recent manifest.
`objects` should be an iterable of S3 objects."""
# The path to the billing report manifest is like this:
#
# <bucket>/<configured prefix>/hourly_billing/<YYYYmmdd>-<YYYYmmdd>/hourly_billing-Manifest.json # noqa
#
# We look for the most recent timestamp directory and use the manifest
# therein to find the most recent billing CSV.
manifests = [o for o in objects if o.key.endswith("Manifest.json")]
# Filter to those from the second-most- and most recent billing cycle
manifests.sort(key=attrgetter("key"), reverse=True)
cycles = set([])
for m in manifests:
rslt = re.search("/(\d{8}-\d{8})/", m.key)
if rslt is not None:
cycles.add(rslt.group(1))
if len(cycles) == 0:
raise Exception("Failed to find any appropriately-named billing CSVs")
last_two_cycles = sorted(list(cycles))[-2:]
if len(last_two_cycles) < 2:
last_two_cycles = 2 * last_two_cycles
manifests = [m for m in manifests if
last_two_cycles[0] in m.key or last_two_cycles[1] in m.key]
# The primary manifest(s) will be the one(s) with the shortest path length
manifests.sort(key=lambda a: len(a.key))
if last_two_cycles[0] == last_two_cycles[1]:
# There was only one billing cycle present among the manifests
return [manifests[0]]
return [manifests[1], manifests[0]]
def download_latest_from_s3(s3_path, tempdir, region_name):
"""Puts the latest hourly billing report from the given S3 path in a local
file.
Returns the path to that file."""
s3 = boto3.resource("s3", region_name=region_name)
bucket = s3.Bucket(s3_path.split("/")[2])
primaries = s3_primary_manifests(bucket.objects.all())
logging.info("Using primary manifest(s) {0}".format(
[p.key for p in primaries]
)
)
# Now we parse the manifest to get the path to the latest billing CSV
s3_csvs = []
for pri in primaries:
manifest = json.loads(pri.get()['Body'].read())
s3_csvs.extend(manifest["reportKeys"])
# Download each billing CSV to a temp directory and decompress
try:
cat_csv_path = os.path.join(tempdir, "billing_full.csv")
cat_csv = open(cat_csv_path, "w")
header_written = False
for s3_csv in s3_csvs:
logging.info("Downloading CSV from S3: {0}".format(s3_csv))
local_path = os.path.join(tempdir, s3_csv.split("/")[-1])
local_file = open(local_path, "w")
obj = [o for o in bucket.objects.filter(Prefix=s3_csv)][0]
local_file.write(obj.get()['Body'].read())
local_file.close()
logging.info("Decompressing CSV: {0}".format(s3_csv))
with gzip.open(local_path, "r") as f:
for line in f:
if line.startswith(
"identity/LineItemId,"
) and header_written:
continue
cat_csv.write(line)
header_written = True
# Remove these files as we finish with them to save on disk space
os.unlink(local_path)
except Exception, e:
logging.error(
"Exception: cleaning up by removing temp directory '{0}'".format(
tempdir
)
)
shutil.rmtree(tempdir)
raise e
cat_csv.close()
return cat_csv_path
class SocketWriter(object):
"""Wraps a socket object with a file-like write() method."""
def __init__(self, host, port):
self.host = host
self.port = port
self._sock = None
def write(self, data):
if self._sock is None:
logging.info("Connecting to Graphite server at {0}:{1}".format(
self.host,
self.port
)
)
self._sock = socket.create_connection((self.host, self.port))
return self._sock.send(data)
class MetricLedger(object):
"""Processes Row instances and generates timeseries data from them."""
def __init__(self, timeseries_patterns):
"""Initializes the MetricLedger with alist of TimeseriesPattern
objects."""
self._patterns = timeseries_patterns
self._timeseries = defaultdict(lambda: defaultdict(float))
def process(self, row):
"""Adds the data from the given Row object to any appropriate
timeseries."""
# Skip entries of the wrong type
if row.content["lineItem/LineItemType"] != "Usage":
return
# Skip non-hourly entries
if row.interval() != 3600:
return
for pat in self._patterns:
if pat.match(row):
for metric in pat.metric_names(row):
self._timeseries[metric][row.end_time()] += row.amount()
def output(self, output_file):
formatter = MetricFormatter()
logging.info("Writing metrics to timeseries database")
for ts_id, ts in self._timeseries.iteritems():
for timestamp, value in ts.iteritems():
output_file.write(formatter.format(ts_id, timestamp, value))
logging.info("Finished writing %d metrics to timeseries database", len(self._timeseries))
def get_timeseries(self):
"""Returns self._timeseries (for tests)."""
return self._timeseries
class MetricFormatter(object):
"""Converts CSV data to Graphite format."""
def __init__(self):
self._initial_pieces = []
if os.getenv("AWSBILL_METRIC_PREFIX") != "":
self._initial_pieces = [os.getenv("AWSBILL_METRIC_PREFIX")]
else:
self._initial_pieces = ["awsbill"]
def format(self, ts_id, timestamp, value):
"""Returns the Graphite line that corresponds to the given timeseries
ID, timestamp, and value."""
pieces = [p for p in self._initial_pieces]
pieces.append(ts_id)
metric_name = ".".join(pieces)
return "{0} {1:04f} {2}\n".format(
metric_name,
value,
timestamp.strftime('%s')
)
class TimeseriesPattern(object):
"""Describes a set of time series to be generated from the billing data.
This is an abstract class. Provide an implementation of the match() and
metric_name() methods."""
def match(self, row):
"""Determines whether the given Row instance matches the timeseries
pattern.
Returns True if so."""
raise NotImplementedError("This is an abstract class")
def metric_names(self, row):
"""Returns the names of the metrics to which the given row's amount()
value should be added.
We assume that match() has been called on the row already, and
returned True."""
raise NotImplementedError("This is an abstract class")
class TsInstanceType(TimeseriesPattern):
"""Describes per-EC2-instance-type Graphite metrics."""
def match(self, row):
if row.usage_type():
return (row.usage_type().startswith("ec2-instance."))
else:
pass
def metric_names(self, row):
return [".".join((row.region(), row.usage_type()))]
class TsEbsStorage(TimeseriesPattern):
"""Describes per-volume-type EBS storage metric."""
def match(self, row):
return row.usage_type().startswith("ebs.storage.")
def metric_names(self, row):
return [".".join((row.region(), row.usage_type()))]
class TsEbsPiops(TimeseriesPattern):
"""Describes the metric for PIOPS-month costs."""
def match(self, row):
return row.usage_type() == "ebs.piops"
def metric_names(self, row):
return [".".join((row.region(), "ebs.piops"))]
class TsEbsIops(TimeseriesPattern):
"""Describes the metric for IOPS costs."""
def match(self, row):
return row.usage_type() == "ebs.iops"
def metric_names(self, row):
return [".".join((row.region(), "ebs.iops"))]
class TsEbsSnapshot(TimeseriesPattern):
"""Describes the metric for EBS snapshot costs."""
def match(self, row):
return row.usage_type() == "ebs.snapshot"
def metric_names(self, row):
return [".".join((row.region(), "ebs.snapshot"))]
class TsRdsInstanceType(TimeseriesPattern):
"""Describes per-RDS-instance-type Graphite metrics."""
def match(self, row):
return (row.usage_type().startswith("rds-instance."))
def metric_names(self, row):
return [".".join((row.region(), row.usage_type()))]
class TsRdsStorage(TimeseriesPattern):
"""Describes per-volume-type RDS storage metric."""
def match(self, row):
return row.usage_type().startswith("rds.storage.")
def metric_names(self, row):
return [".".join((row.region(), row.usage_type()))]
class TsRdsPiops(TimeseriesPattern):
"""Describes the metric for RDS PIOPS-month costs."""
def match(self, row):
return row.usage_type() == "rds.piops"
def metric_names(self, row):
return [".".join((row.region(), "rds.piops"))]
class TsElasticacheInstanceType(TimeseriesPattern):
"""Describes per-ElastiCache-instance-type Graphite metrics."""
def match(self, row):
return (row.usage_type().startswith("elasticache-instance."))
def metric_names(self, row):
return [".".join((row.region(), row.usage_type()))]
class TsRegionTotal(TimeseriesPattern):
"""Describes a Graphite metric containing the sum of all hourly costs per
region.
This includes costs that we don't explicitly recognize and break out
into individual metrics. Any cost that shows up in the billing report
will go into this metric."""
def match(self, row):
return True
def metric_names(self, row):
return ["total-cost.{0}".format(row.region())]
class Row(object):
__slots__ = ["content", "_usage_type"]
def __init__(self, col_names, row_list):
"""Initializes a Row object, given the names of the CSV columns and
their values."""
self.content = dict(zip(col_names, row_list))
self._usage_type = None
def region(self):
"""Returns the normalized AWS region for the row, or 'noregion'.
Normalized region names are like 'us-east-2', 'ap-northeast-1'."""
if self.content["product/location"] in REGION_NAMES:
# Most services have product/location set
return REGION_NAMES[self.content["product/location"]]
elif self.content["lineItem/AvailabilityZone"] and \
self.content["lineItem/AvailabilityZone"][-1] in "1234567890":
# Some services, e.g. ElastiCache, use lineItem/AvailabilityZone
# instead
return self.content["lineItem/AvailabilityZone"]
return "noregion"
def interval(self):
"""Returns the length of the time interval to which this row
correpsonds, in seconds."""
start, end = [parse_datetime(x) for x in
self.content["identity/TimeInterval"].split("/", 1)]
return int((end - start).total_seconds())
def usage_type(self):
"""Parses the "lineItem/UsageType" field to get at the "subtype"
(my term).
Usage types can be of many forms. Here are some examples:
USE1-USW2-AWS-In-Bytes
Requests-RBP
Request
APN1-DataProcessing-Bytes
APN1-BoxUsage:c3.2xlarge
It's a goddamn nightmare. We try our best. Then we return the name
of the subtype, in the format in which it'll appear in the Graphite
metric.
Examples of usage types are:
ec2-instance.c3-2xlarge
ebs.storage.io1
ebs.piops
rds-instance.db-r3-large
This method returns the empty string if the usage type isn't
known."""
if self._usage_type is not None:
return self._usage_type
splut = self.content["lineItem/UsageType"].split("-", 1)
if len(splut[0]) == 4 and splut[0][0:2] in (
"US",
"EU",
"AP",
"SA"
) and splut[0].isupper() and splut[0][3].isdigit():
# Stuff before dash was probably a region code like "APN1"
csv_usage_type = splut[1]
else:
csv_usage_type = splut[0]
self._usage_type = ""
# EC2
if csv_usage_type.startswith("BoxUsage:"):
self._usage_type = self._usage_type_ec2_instance()
if csv_usage_type == "EBS:VolumeP-IOPS.piops":
self._usage_type = "ebs.piops"
if csv_usage_type.startswith("EBS:VolumeUsage"):
self._usage_type = self._usage_type_ebs_storage()
if csv_usage_type == "EBS:VolumeIOUsage":
self._usage_type = "ebs.iops"
if csv_usage_type == "EBS:SnapshotUsage":
self._usage_type = "ebs.snapshot"
# RDS
if csv_usage_type.startswith("InstanceUsage:") or \
csv_usage_type.startswith("Multi-AZUsage:"):
self._usage_type = self._usage_type_rds_instance()
if csv_usage_type == "RDS:PIOPS" or \
csv_usage_type == "RDS:Multi-AZ-PIOPS":
self._usage_type = "rds.piops"
if csv_usage_type.startswith("RDS:") and \
csv_usage_type.endswith("Storage"):
self._usage_type = self._usage_type_rds_storage()
# ElastiCache
if csv_usage_type.startswith("NodeUsage:"):
self._usage_type = self._usage_type_elasticache_instance()
return self._usage_type
def _usage_type_ec2_instance(self):
splut = self.content["lineItem/UsageType"].split(":", 1)
if len(splut) < 2:
return None
instance_type = splut[1].replace(".", "-")
return "ec2-instance.{0}".format(instance_type)
def _usage_type_ebs_storage(self):
if "product/volumeType" in self.content:
return "ebs.storage.{0}".format(
EBS_TYPES[self.content["product/volumeType"]]
)
else:
return "ebs.storage.unknown"
def _usage_type_rds_instance(self):
splut = self.content["lineItem/UsageType"].split(":", 1)
if len(splut) < 2:
return None
instance_type = splut[1].replace(".", "-")
return "rds-instance.{0}".format(instance_type)
def _usage_type_rds_storage(self):
line_item_description = self.content['lineItem/LineItemDescription']
volume_type = ""
for substring in RDS_STORAGE_TYPES.keys():
if substring in line_item_description:
volume_type = RDS_STORAGE_TYPES[substring]
if volume_type == "":
raise ValueError("Can't determine RDS storage type from line item description: '{0}'".format(line_item_description)) #noqa
return "rds.storage.{0}".format(volume_type)
def _usage_type_elasticache_instance(self):
splut = self.content["lineItem/UsageType"].split(":", 1)
if len(splut) < 2:
return None
instance_type = splut[1].replace(".", "-")
return "elasticache-instance.{0}".format(instance_type)
def end_time(self):
return parse_datetime(
self.content["identity/TimeInterval"].split("/", 1)[1]
)
def tags(self):
return {}
def amount(self):
return float(self.content["lineItem/BlendedCost"])
def new_metric_ledger():
return MetricLedger([
# EC2
TsInstanceType(),
TsEbsStorage(),
TsEbsPiops(),
TsEbsIops(),
TsEbsSnapshot(),
# RDS
TsRdsInstanceType(),
TsRdsStorage(),
TsRdsPiops(),
# ElastiCache
TsElasticacheInstanceType(),
# Total
TsRegionTotal(),
])
def generate_metrics(csv_file, output_file):
"""Generates metrics from the given CSV and writes them to the given
file-like object."""
reader = csv.reader(csv_file)
col_names = reader.next()
# formatter = MetricFormatter()
ledger = new_metric_ledger()
logging.info("Calculating billing metrics")
for row_list in reader:
row = Row(col_names, row_list)
ledger.process(row)
ledger.output(output_file)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
if os.getenv("REGION_NAME") != '':
region_name = os.getenv("REGION_NAME")
else:
region_name = 'us-west-1'
try:
tempdir = tempfile.mkdtemp(".awsbill")
csv_file = open_csv(tempdir, region_name)
output_file = open_output()
generate_metrics(csv_file, output_file)
logging.info("Removing temp directory '{0}'".format(tempdir))
shutil.rmtree(tempdir)
logging.info("Mission complete.")
except Exception, e:
logging.exception(e)
|
|
from datetime import timedelta
from datetime import datetime
from collections import defaultdict
import requests
import os
import urllib
import logging
import arrow
from impactstoryanalytics.widgets.widget import Widget
from impactstoryanalytics.lib import mixpanel_export
import uservoice
logger = logging.getLogger("impactstoryanalytics.widget_api_helpers")
## Utility functions
def get_raw_dataclip_data(query_url):
#example query_url: "https://dataclips.heroku.com/feblvvoknanzuiumyiawutmqdwbo.json"
raw_data = requests.get(query_url).json()
#print raw_data
return raw_data
def perc(num, den, round_to=2):
try:
return round(100 * num / den, round_to)
except ZeroDivisionError:
return None
class Converter():
@classmethod
def from_x_y_format(cls, lines):
events = defaultdict(dict)
for line in lines:
event_name = line["name"]
new_events_dict = cls.events_dict_from_line(line)
events = cls.merge_new_events_dict(events, new_events_dict, event_name)
events_list = cls.events_list_from_dict(events)
return events_list
@classmethod
def events_dict_from_line(cls, line):
ts_values = zip(line["x"], line["y"])
events_dict = {}
for ts_value in ts_values:
timestamp, value = ts_value
events_dict[timestamp] = value
return events_dict
@classmethod
def merge_new_events_dict(cls, old_events_dict, new_events_dict, event_name):
for ts, value in new_events_dict.iteritems():
old_events_dict[ts][event_name] = value
return old_events_dict
@classmethod
def events_list_from_dict(cls, events_dict):
events_list = []
for ts in sorted(events_dict.keys()):
dict_to_add = events_dict[ts]
dict_to_add["start_iso"] = arrow.get(ts).isoformat(" ")
events_list.append(dict_to_add)
return events_list
class Keenio():
def __init__(self, queries, shared_params={}):
default_params = {
"timeframe": "this_30_days",
"interval": "daily",
"timezone": 0,
}
url_roots = {
"context" : "https://api.keen.io/3.0/projects/51df37f0897a2c7fcd000000/queries",
"production": "https://api.keen.io/3.0/projects/51d858213843314922000002/queries"
}
api_keys = {
"context" : "b915f0ca9fcbe1cc4760640adf9f09fa1d330f74c763bfd1aa867d6148f528055a3f97afc6b111e8905ef78bfe7f97d1d2dd2b7ddbb0f9ed8e586fd69d79f12f2215d06298924631d8ccfa7a12845dde94921855ae223c69ad26789dca2ec5fd26296a80af72c3a014df5554948bac8e",
"production": "69023dd079bdb913522954c0f9bb010766be7e87a543674f8ee5d3a66e9b127f5ee641546858bf2c260af4831cd2f7bba4e37c22efb4b21b57bab2a36b9e8e3eccd57db3c75114ba0f788013a08f404738535e9a7eb8a29a30592095e5347e446cf61d50d5508a624934584e17a436ba"
}
self.queries = queries
for query in self.queries:
#set in priority order, highest priority last
self.queries[query]["params"] = dict(default_params.items() + shared_params.items() + queries[query]["params"].items())
#print self.queries[query]["params"]
for query in self.queries:
self.queries[query]["url"] = url_roots[self.queries[query]["project"]]
self.queries[query]["url"] += "/" + self.queries[query]["analysis"]
self.queries[query]["url"] += "?api_key=" + api_keys[self.queries[query]["project"]]
self.queries[query]["url"] += "&" + urllib.urlencode(self.queries[query]["params"])
print self.queries[query]["url"]
self.timebins = defaultdict(dict)
def timebin_extraction_data(self, raw_data):
pans = Widget.get_time_pan_list(100)
for row_from_keen in raw_data:
iso_time = row_from_keen["keen"]["timestamp"]
time = arrow.get(str(iso_time), 'YYYY-MM-DDTHH:mm:ss')
for key in row_from_keen.keys():
if key not in ["keen", "userId"]:
pans.stomp_to_pan(time, key, row_from_keen[key])
return pans.replace_NAs_with_zeroes().as_list()
def limit_to_timeframe(self, response, query_name):
try:
timeframe = self.queries[query_name]["params"]["timeframe"]
except KeyError:
#no timeframe so no change
return response
if ("this" in timeframe):
end_index = None
end_index_int = 0
else:
end_index = -1
end_index_int = -1
if ("30_days" in timeframe):
start_index = -30 - end_index_int
elif ("7_days" in timeframe):
start_index = -7 - end_index_int
response = response[start_index:end_index]
return response
def get_raw_data(self, return_raw_response=False):
response = []
for query_name in self.queries:
print "sending a query to keenio: " + query_name
r = requests.get(self.queries[query_name]["url"])
#print r.text
raw_data = r.json()["result"]
if return_raw_response:
return self.get_raw_raw_data_dict()
if self.queries[query_name]["analysis"] == "extraction":
response = self.timebin_extraction_data(raw_data)
#keenio extraction doesn't respect timeframe so do it ourselves
response = self.limit_to_timeframe(response, query_name)
else:
for row_from_keen in raw_data:
new_row = self.create_row(row_from_keen, query_name)
self.timebins[new_row["start_iso"]].update(new_row)
if not response:
response = self.timebins_as_list()
if "this" in self.queries[self.queries.keys()[0]]["params"]["timeframe"]:
response[-1]["end_iso"] = datetime.utcnow().isoformat()
return response
def get_raw_raw_data_dict(self):
response = {}
for query_name in self.queries:
print "sending a query to keenio: " + query_name
r = requests.get(self.queries[query_name]["url"])
raw_data = r.json()["result"]
response[query_name] = raw_data
return response
def create_row(self, row_from_keen, value_name):
return {
"start_iso": row_from_keen["timeframe"]["start"],
"end_iso": row_from_keen["timeframe"]["end"],
value_name: row_from_keen["value"]
}
def timebins_as_list(self):
ret = []
for k in sorted(self.timebins.keys()):
ret.append(self.timebins[k])
return ret
@classmethod
def ungroup(cls, rows, dict_key, group_by, prepend_group_name=False):
for row in rows:
for groupDict in row[dict_key]:
key = groupDict[group_by]
if prepend_group_name:
key = group_by + "_" + str(key)
val = groupDict["result"]
row[key] = val
del row[dict_key]
return rows
class Mixpanel():
@classmethod
def get_funnel_data(cls, api, funnel, funnel_params):
logger.info("Getting funnel data for " + funnel["name"])
funnel_params["funnel_id"] = funnel["funnel_id"]
funnel_data = api.request(['funnels'], funnel_params)
#print json.dumps(funnel_data, indent=4)
logger.info("found data")
return funnel_data["data"]
@classmethod
def get_funnels(cls, api):
funnels = api.request(['funnels', 'list'], {})
return funnels
@classmethod
def get_data(cls, funnel_name=None):
api = mixpanel_export.Mixpanel(
api_key = os.getenv("MIXPANEL_API_KEY"),
api_secret = os.getenv("MIXPANEL_API_SECRET")
)
funnels = cls.get_funnels(api)
funnel_params = {
# The first date in yyyy-mm-dd format from which a user can begin the first step in the funnel. This date is inclusive.
"to_date": datetime.utcnow().isoformat()[0:10] # today
,"from_date": (datetime.utcnow() - timedelta(days=30)).isoformat()[0:10]
# The number of days each user has to complete the funnel, starting from the time they
# triggered the first step in the funnel. May not be greater than 60 days.
# Note that we will query for events past the end of to_date to look for funnel completions.
#The default value is 14.
,"length": 1
# The number of days you want your results bucketed into. The default value is 1
,"interval": 1
}
response = {}
for funnel in funnels:
if funnel_name:
if (funnel_name != funnel["name"]):
continue
response[funnel["name"]] = cls.get_funnel_data(api, funnel, funnel_params)
return response
class Uservoice():
@classmethod
def get_uservoice_owner(cls):
SUBDOMAIN_NAME = 'impactstory'
API_KEY = os.getenv("USERVOICE_API_KEY")
API_SECRET = os.getenv("USERVOICE_API_SECRET")
client = uservoice.Client(SUBDOMAIN_NAME, API_KEY, API_SECRET)
owner = client.login_as_owner()
return owner
@classmethod
def get_ticket_stats(cls):
logger.info("Getting uservoice ticket stats")
owner = cls.get_uservoice_owner()
api_response = owner.get("/api/v1/reports/agent_backlog.json")
interesting_fields = [
"without_response_count",
"waiting_for_agent_count",
"total_count",
"median_open_time"
]
ticket_dict = dict((field, 0) for field in interesting_fields)
median_open_days = []
for agent in api_response["entries"]:
for field in interesting_fields:
if field == "median_open_time":
median_open_days += [open_time/(60.0*60*24) for open_time in agent["open_times"]]
else:
try:
ticket_dict[field] += agent[field]
except KeyError:
ticket_dict[field] += 0
median_open_days.sort()
try:
median_days = median_open_days[int(len(median_open_days)/2)]
ticket_dict["median_open_days"] = round(median_days, 1)
except IndexError:
ticket_dict["median_open_days"] = 0
logger.info("Found uservoice tickets: {all} total, {user} where a user answered last".format(
all=ticket_dict["total_count"],
user=ticket_dict["waiting_for_agent_count"]))
return ticket_dict
@classmethod
def get_ticket_details(cls):
logger.info("Getting uservoice ticket details")
owner = cls.get_uservoice_owner()
tickets = owner.get("/api/v1/tickets?state=open&per_page=100")["tickets"]
return tickets
@classmethod
def get_suggestion_counts(cls):
logger.info("Getting uservoice open suggestion count")
owner = cls.get_uservoice_owner()
suggestions_active = owner.get("/api/v1/suggestions?filter=active&per_page=1000")["suggestions"]
suggestions_inbox = owner.get("/api/v1/suggestions?filter=inbox&per_page=1000")["suggestions"]
suggestions = suggestions_active + suggestions_inbox
suggestion_dict = {}
for suggestion in suggestions:
status = "inbox"
if suggestion["status"]:
status = suggestion["status"]["name"]
suggestion_dict[status] = 1 + suggestion_dict.get(status, 0)
logger.info("Found uservoice suggestions: {total} total".format(
total=len(suggestions)))
return(suggestion_dict)
@classmethod
def get_closed_suggestion_count(cls):
logger.info("Getting uservoice closed suggestion count")
owner = cls.get_uservoice_owner()
closed_suggestions = owner.get("/api/v1/suggestions?filter=closed&per_page=1000")["suggestions"]
logger.info("Found uservoice suggestions: {total} total".format(
total=len(closed_suggestions)))
return(closed_suggestions)
@classmethod
def get_suggestion_details(cls):
logger.info("Getting uservoice suggestion details")
owner = cls.get_uservoice_owner()
suggestions_active = owner.get("/api/v1/suggestions?filter=active&per_page=1000")["suggestions"]
suggestions_inbox = owner.get("/api/v1/suggestions?filter=inbox&per_page=1000")["suggestions"]
suggestions = suggestions_active + suggestions_inbox
return suggestions
class Couchdb():
@classmethod
def get_view(cls, full_view_name, reduce_state=False):
logger.info("getting view from couch")
(design_doc_name, view_name) = full_view_name.split("/")
logger.info("full_view_name: " + full_view_name)
if reduce_state:
couch_query = "_design/{design_doc_name}/_view/{view_name}?reduce=true&group=true".format(
design_doc_name=design_doc_name,
view_name=view_name)
else:
couch_query = "_design/{design_doc_name}/_view/{view_name}".format(
design_doc_name=design_doc_name,
view_name=view_name)
logger.info("couch_querycouch_query: " + couch_query)
url = "/".join([
os.getenv("CLOUDANT_URL"),
os.getenv("CLOUDANT_DB"),
couch_query
])
logger.info("couchdb url: " + url)
response = requests.get(url).json()
return response["rows"]
|
|
#!/usr/bin/env python
"""Quick hack of 'modern' OpenGL example using pysdl2 and pyopengl
Implementation of:
http://www.learnopengl.com/#!Getting-started/Hello-Triangle
Compare to
https://github.com/neutralord/pyopengl-experiments/blob/master/red_book_samples/uniform.py
https://github.com/neutralord/pyopengl-experiments
http://schi.iteye.com/blog/1969710
"""
import glfw
import sys
import ctypes
import numpy
from OpenGL import GL, GLU
from OpenGL.GL import shaders
from OpenGL.arrays import vbo
from numpy import array
shaderProgram = None
VAO = None
VBO = None
indexData = None
### VERTEX SHADER
VERTEX = """
#version 330
layout (location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
"""
### FRAGMENT SHADER
FRAGMENT = """
#version 330
out vec4 color;
void main()
{
color = vec4(1.0f, 0.5f, 0.2f, 1.0f);
}
"""
def initialize():
global shaderProgram
global VAO
global VBO
global indexData
vertexShader = shaders.compileShader(VERTEX, GL.GL_VERTEX_SHADER)
fragmentShader = shaders.compileShader(FRAGMENT, GL.GL_FRAGMENT_SHADER)
shaderProgram = shaders.compileProgram(vertexShader, fragmentShader)
# Vertex Data in an array - 2 Triangles (Duplicate Vertices!)
'''
vertexData = numpy.array([
# First Triangle
0.5, 0.5, 0.0, # Top Right
0.5, -0.5, 0.0, # Bottom Right
-0.5, 0.5, 0.0, # Top Left
# Second Triangle
0.5, -0.5, 0.0, # Bottom Right
-0.5, -0.5, 0.0, # Bottom Left
-0.5, 0.5, 0.0, # Top Left
], dtype=numpy.float32)
'''
# Same Data as EBO w/ Indices for buffers
vertexData = numpy.array([
0.5, 0.5, 0.0, # Top Right
0.5, -0.5, 0.0, # Bottom Right
-0.5, -0.5, 0.0, # Bottom Left
-0.5, 0.5, 0.0, # Top Left
], dtype=numpy.float32)
indexData = numpy.array([
0, 1, 3, # First Triangle
1, 2, 3, # Second Triangle
], dtype=numpy.uint32)
# Core OpenGL requires that at least one OpenGL vertex array be bound
VAO = GL.glGenVertexArrays(1)
GL.glBindVertexArray(VAO)
# Need VBO for triangle vertices
VBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, VBO)
GL.glBufferData(GL.GL_ARRAY_BUFFER, vertexData.nbytes, vertexData, GL.GL_STATIC_DRAW)
# Can Make our lives easier, this is the same
'''
VBO = vbo.VBO(vertexData)
VBO.bind()
'''
# We make an EBO now
EBO = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, EBO)
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indexData.nbytes, indexData, GL.GL_STATIC_DRAW)
'''
EBO = vbo.VBO(indexData, target=GL.GL_ELEMENT_ARRAY_BUFFER)
EBO.bind()
'''
# enable array and set up data (last 0 for autopacking)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glEnableVertexAttribArray(0)
# Unbind so we don't mess w/ them
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindVertexArray(0)
# Wireframe Mode
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE);
def render():
global shaderProgram
global VAO
global EBO
global indexData
GL.glClearColor(0, 0, 0, 1)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# active shader program
GL.glUseProgram(shaderProgram)
try:
GL.glBindVertexArray(VAO)
# Draw a triangle
# GL.glDrawArrays(GL.GL_TRIANGLES, 0, 3)
# draw triangle, starting index of the vertex array, # of vertices (6 = indexData.size),
GL.glDrawElements(GL.GL_TRIANGLES, indexData.size, GL.GL_UNSIGNED_INT, None)
finally:
# Unbind when we finish
GL.glBindVertexArray(0)
GL.glUseProgram(0)
def main():
# Initialize the library
if not glfw.init():
return
# Set some window hints
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3);
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3);
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE);
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE);
glfw.window_hint(glfw.SAMPLES, 16)
# This works as expected
glfw.window_hint(glfw.RESIZABLE, 0)
# These should work, but don't :(
# could control focus w/ http://crunchbang.org/forums/viewtopic.php?id=22226
# ended up using xdotool, see run.py
glfw.window_hint(glfw.FOCUSED, 0)
# I've set 'shader-*' to raise in openbox-rc as workaround
# Looking at the code and confirming version 3.1.2 and it should work
glfw.window_hint(glfw.FLOATING, 1)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(300, 300, "shader-test", None, None)
if not window:
glfw.terminate()
return
# Move Window
glfw.set_window_pos(window, 1600, 50)
# Make the window's context current
glfw.make_context_current(window)
# vsync
glfw.swap_interval(1)
# Setup GL shaders, data, etc.
initialize()
# Loop until the user closes the window
while not glfw.window_should_close(window):
# Render here, e.g. using pyOpenGL
render()
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
glfw.terminate()
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
from base64 import b64encode
from hashlib import sha1
import os
import socket
import ssl
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError
from ws4py.websocket import WebSocket
from ws4py.compat import urlsplit
__all__ = ['WebSocketBaseClient']
class WebSocketBaseClient(WebSocket):
def __init__(self, url, protocols=None, extensions=None,
heartbeat_freq=None, ssl_options=None, headers=None):
"""
A websocket client that implements :rfc:`6455` and provides a simple
interface to communicate with a websocket server.
This class works on its own but will block if not run in
its own thread.
When an instance of this class is created, a :py:mod:`socket`
is created. If the connection is a TCP socket,
the nagle's algorithm is disabled.
The address of the server will be extracted from the given
websocket url.
The websocket key is randomly generated, reset the
`key` attribute if you want to provide yours.
For instance to create a TCP client:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws://localhost/ws')
Here is an example for a TCP client over SSL:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('wss://localhost/ws')
Finally an example of a Unix-domain connection:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock')
Note that in this case, the initial Upgrade request
will be sent to ``/``. You may need to change this
by setting the resource explicitely before connecting:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock')
>>> ws.resource = '/ws'
>>> ws.connect()
You may provide extra headers by passing a list of tuples
which must be unicode objects.
"""
self.url = url
self.host = None
self.scheme = None
self.port = None
self.unix_socket_path = None
self.resource = None
self.ssl_options = ssl_options or {}
self.extra_headers = headers or []
self._parse_url()
if self.unix_socket_path:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
else:
# Let's handle IPv4 and IPv6 addresses
# Simplified from CherryPy's code
try:
family, socktype, proto, canonname, sa = socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_PASSIVE)[0]
except socket.gaierror:
family = socket.AF_INET
if self.host.startswith('::'):
family = socket.AF_INET6
socktype = socket.SOCK_STREAM
proto = 0
canonname = ""
sa = (self.host, self.port, 0, 0)
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and \
self.host.startswith('::'):
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
pass
WebSocket.__init__(self, sock, protocols=protocols,
extensions=extensions,
heartbeat_freq=heartbeat_freq)
self.stream.always_mask = True
self.stream.expect_masking = False
self.key = b64encode(os.urandom(16))
# Adpated from: https://github.com/liris/websocket-client/blob/master/websocket.py#L105
def _parse_url(self):
"""
Parses a URL which must have one of the following forms:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
In the first two cases, the ``host`` and ``port``
attributes will be set to the parsed values. If no port
is explicitely provided, it will be either 80 or 443
based on the scheme. Also, the ``resource`` attribute is
set to the path segment of the URL (alongside any querystring).
In addition, if the scheme is ``ws+unix``, the
``unix_socket_path`` attribute is set to the path to
the Unix socket while the ``resource`` attribute is
set to ``/``.
"""
# Python 2.6.1 and below don't parse ws or wss urls properly. netloc is empty.
# See: https://github.com/Lawouach/WebSocket-for-Python/issues/59
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 80
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource
@property
def bind_addr(self):
"""
Returns the Unix socket path if or a tuple
``(host, port)`` depending on the initial
URL's scheme.
"""
return self.unix_socket_path or (self.host, self.port)
def close(self, code=1000, reason=''):
"""
Initiate the closing handshake with the server.
"""
if not self.client_terminated:
self.client_terminated = True
self._write(self.stream.close(code=code, reason=reason).single(mask=True))
def connect(self):
"""
Connects this websocket and starts the upgrade handshake
with the remote endpoint.
"""
if self.scheme == "wss":
# default port is now 443; upgrade self.sender to send ssl
self.sock = ssl.wrap_socket(self.sock, **self.ssl_options)
self.sock.connect(self.bind_addr)
self._write(self.handshake_request)
response = b''
doubleCLRF = b'\r\n\r\n'
while True:
bytes = self.sock.recv(128)
if not bytes:
break
response += bytes
if doubleCLRF in response:
break
if not response:
self.close_connection()
raise HandshakeError("Invalid response")
headers, _, body = response.partition(doubleCLRF)
response_line, _, headers = headers.partition(b'\r\n')
try:
self.process_response_line(response_line)
self.protocols, self.extensions = self.process_handshake_header(headers)
except HandshakeError:
self.close_connection()
raise
self.handshake_ok()
if body:
self.process(body)
@property
def handshake_headers(self):
"""
List of headers appropriate for the upgrade
handshake.
"""
headers = [
('Host', self.host),
('Connection', 'Upgrade'),
('Upgrade', 'websocket'),
('Sec-WebSocket-Key', self.key.decode('utf-8')),
('Origin', self.url),
('Sec-WebSocket-Version', str(max(WS_VERSION)))
]
if self.protocols:
headers.append(('Sec-WebSocket-Protocol', ','.join(self.protocols)))
if self.extra_headers:
headers.extend(self.extra_headers)
return headers
@property
def handshake_request(self):
"""
Prepare the request to be sent for the upgrade handshake.
"""
headers = self.handshake_headers
request = [("GET %s HTTP/1.1" % self.resource).encode('utf-8')]
for header, value in headers:
request.append(("%s: %s" % (header, value)).encode('utf-8'))
request.append(b'\r\n')
return b'\r\n'.join(request)
def process_response_line(self, response_line):
"""
Ensure that we received a HTTP `101` status code in
response to our request and if not raises :exc:`HandshakeError`.
"""
protocol, code, status = response_line.split(b' ', 2)
if code != b'101':
raise HandshakeError("Invalid response status: %s %s" % (code, status))
def process_handshake_header(self, headers):
"""
Read the upgrade handshake's response headers and
validate them against :rfc:`6455`.
"""
protocols = []
extensions = []
headers = headers.strip()
for header_line in headers.split(b'\r\n'):
header, value = header_line.split(b':', 1)
header = header.strip().lower()
value = value.strip().lower()
if header == 'upgrade' and value != 'websocket':
raise HandshakeError("Invalid Upgrade header: %s" % value)
elif header == 'connection' and value != 'upgrade':
raise HandshakeError("Invalid Connection header: %s" % value)
elif header == 'sec-websocket-accept':
match = b64encode(sha1(self.key.encode('utf-8') + WS_KEY).digest())
if value != match.lower():
raise HandshakeError("Invalid challenge response: %s" % value)
elif header == 'sec-websocket-protocol':
protocols = ','.join(value)
elif header == 'sec-websocket-extensions':
extensions = ','.join(value)
return protocols, extensions
|
|
"""
Virtual gateway for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
import collections
import itertools
import logging
import os
import traceback
from homeassistant.components.system_log import LogEntry, _figure_out_source
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from ..api import async_get_device_info
from .channels import MAINS_POWERED, ZDOChannel
from .const import (
ADD_DEVICE_RELAY_LOGGERS, ATTR_MANUFACTURER, BELLOWS, CONF_BAUDRATE,
CONF_DATABASE, CONF_RADIO_TYPE, CONF_USB_PATH, CONTROLLER, CURRENT,
DATA_ZHA, DATA_ZHA_BRIDGE_ID, DATA_ZHA_CORE_COMPONENT, DATA_ZHA_GATEWAY,
DATA_ZHA_RADIO, DEBUG_LEVELS, DEFAULT_BAUDRATE, DEFAULT_DATABASE_NAME,
DEVICE_FULL_INIT, DEVICE_INFO, DEVICE_JOINED, DEVICE_REMOVED, DOMAIN, IEEE,
LOG_ENTRY, LOG_OUTPUT, MODEL, NWK, ORIGINAL, RADIO, RADIO_DESCRIPTION,
RAW_INIT, SIGNAL_REMOVE, SIGNATURE, TYPE, ZHA, ZHA_GW_MSG, ZIGPY,
ZIGPY_DECONZ, ZIGPY_XBEE)
from .device import DeviceStatus, ZHADevice
from .discovery import (
async_create_device_entity, async_dispatch_discovery_info,
async_process_endpoint)
from .patches import apply_application_controller_patch
from .registries import RADIO_TYPES
from .store import async_get_registry
_LOGGER = logging.getLogger(__name__)
EntityReference = collections.namedtuple(
'EntityReference', 'reference_id zha_device cluster_channels device_info')
class ZHAGateway:
"""Gateway that handles events that happen on the ZHA Zigbee network."""
def __init__(self, hass, config):
"""Initialize the gateway."""
self._hass = hass
self._config = config
self._component = EntityComponent(_LOGGER, DOMAIN, hass)
self._devices = {}
self._device_registry = collections.defaultdict(list)
self.zha_storage = None
self.application_controller = None
self.radio_description = None
hass.data[DATA_ZHA][DATA_ZHA_CORE_COMPONENT] = self._component
hass.data[DATA_ZHA][DATA_ZHA_GATEWAY] = self
self._log_levels = {
ORIGINAL: async_capture_log_levels(),
CURRENT: async_capture_log_levels()
}
self.debug_enabled = False
self._log_relay_handler = LogRelayHandler(hass, self)
async def async_initialize(self, config_entry):
"""Initialize controller and connect radio."""
self.zha_storage = await async_get_registry(self._hass)
usb_path = config_entry.data.get(CONF_USB_PATH)
baudrate = self._config.get(CONF_BAUDRATE, DEFAULT_BAUDRATE)
radio_type = config_entry.data.get(CONF_RADIO_TYPE)
radio_details = RADIO_TYPES[radio_type][RADIO]()
radio = radio_details[RADIO]
self.radio_description = RADIO_TYPES[radio_type][RADIO_DESCRIPTION]
await radio.connect(usb_path, baudrate)
self._hass.data[DATA_ZHA][DATA_ZHA_RADIO] = radio
if CONF_DATABASE in self._config:
database = self._config[CONF_DATABASE]
else:
database = os.path.join(
self._hass.config.config_dir, DEFAULT_DATABASE_NAME)
self.application_controller = radio_details[CONTROLLER](
radio, database)
apply_application_controller_patch(self)
self.application_controller.add_listener(self)
await self.application_controller.startup(auto_form=True)
self._hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID] = str(
self.application_controller.ieee)
init_tasks = []
for device in self.application_controller.devices.values():
init_tasks.append(self.async_device_initialized(device, False))
await asyncio.gather(*init_tasks)
def device_joined(self, device):
"""Handle device joined.
At this point, no information about the device is known other than its
address
"""
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_JOINED,
NWK: device.nwk,
IEEE: str(device.ieee)
}
)
def raw_device_initialized(self, device):
"""Handle a device initialization without quirks loaded."""
endpoint_ids = device.endpoints.keys()
ept_id = next((ept_id for ept_id in endpoint_ids if ept_id != 0), None)
manufacturer = 'Unknown'
model = 'Unknown'
if ept_id is not None:
manufacturer = device.endpoints[ept_id].manufacturer
model = device.endpoints[ept_id].model
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: RAW_INIT,
NWK: device.nwk,
IEEE: str(device.ieee),
MODEL: model,
ATTR_MANUFACTURER: manufacturer,
SIGNATURE: device.get_signature()
}
)
def device_initialized(self, device):
"""Handle device joined and basic information discovered."""
self._hass.async_create_task(
self.async_device_initialized(device, True))
def device_left(self, device):
"""Handle device leaving the network."""
pass
def device_removed(self, device):
"""Handle device being removed from the network."""
device = self._devices.pop(device.ieee, None)
self._device_registry.pop(device.ieee, None)
if device is not None:
device_info = async_get_device_info(self._hass, device)
self._hass.async_create_task(device.async_unsub_dispatcher())
async_dispatcher_send(
self._hass,
"{}_{}".format(SIGNAL_REMOVE, str(device.ieee))
)
if device_info is not None:
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_REMOVED,
DEVICE_INFO: device_info
}
)
def get_device(self, ieee):
"""Return ZHADevice for given ieee."""
return self._devices.get(ieee)
def get_entity_reference(self, entity_id):
"""Return entity reference for given entity_id if found."""
for entity_reference in itertools.chain.from_iterable(
self.device_registry.values()):
if entity_id == entity_reference.reference_id:
return entity_reference
@property
def devices(self):
"""Return devices."""
return self._devices
@property
def device_registry(self):
"""Return entities by ieee."""
return self._device_registry
def register_entity_reference(
self, ieee, reference_id, zha_device, cluster_channels,
device_info):
"""Record the creation of a hass entity associated with ieee."""
self._device_registry[ieee].append(
EntityReference(
reference_id=reference_id,
zha_device=zha_device,
cluster_channels=cluster_channels,
device_info=device_info
)
)
@callback
def async_enable_debug_mode(self):
"""Enable debug mode for ZHA."""
self._log_levels[ORIGINAL] = async_capture_log_levels()
async_set_logger_levels(DEBUG_LEVELS)
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).addHandler(self._log_relay_handler)
self.debug_enabled = True
@callback
def async_disable_debug_mode(self):
"""Disable debug mode for ZHA."""
async_set_logger_levels(self._log_levels[ORIGINAL])
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).removeHandler(
self._log_relay_handler)
self.debug_enabled = False
@callback
def _async_get_or_create_device(self, zigpy_device, is_new_join):
"""Get or create a ZHA device."""
zha_device = self._devices.get(zigpy_device.ieee)
if zha_device is None:
zha_device = ZHADevice(self._hass, zigpy_device, self)
self._devices[zigpy_device.ieee] = zha_device
if not is_new_join:
entry = self.zha_storage.async_get_or_create(zha_device)
zha_device.async_update_last_seen(entry.last_seen)
zha_device.set_power_source(entry.power_source)
return zha_device
@callback
def async_device_became_available(
self, sender, is_reply, profile, cluster, src_ep, dst_ep, tsn,
command_id, args):
"""Handle tasks when a device becomes available."""
self.async_update_device(sender)
@callback
def async_update_device(self, sender):
"""Update device that has just become available."""
if sender.ieee in self.devices:
device = self.devices[sender.ieee]
# avoid a race condition during new joins
if device.status is DeviceStatus.INITIALIZED:
device.update_available(True)
async def async_update_device_storage(self):
"""Update the devices in the store."""
for device in self.devices.values():
self.zha_storage.async_update(device)
await self.zha_storage.async_save()
async def async_device_initialized(self, device, is_new_join):
"""Handle device joined and basic information discovered (async)."""
zha_device = self._async_get_or_create_device(device, is_new_join)
discovery_infos = []
for endpoint_id, endpoint in device.endpoints.items():
async_process_endpoint(
self._hass, self._config, endpoint_id, endpoint,
discovery_infos, device, zha_device, is_new_join
)
if endpoint_id != 0:
for cluster in endpoint.in_clusters.values():
cluster.bind_only = False
for cluster in endpoint.out_clusters.values():
cluster.bind_only = True
if is_new_join:
# configure the device
await zha_device.async_configure()
zha_device.update_available(True)
elif zha_device.power_source is not None\
and zha_device.power_source == MAINS_POWERED:
# the device isn't a battery powered device so we should be able
# to update it now
_LOGGER.debug(
"attempting to request fresh state for %s %s",
zha_device.name,
"with power source: {}".format(
ZDOChannel.POWER_SOURCES.get(zha_device.power_source)
)
)
await zha_device.async_initialize(from_cache=False)
else:
await zha_device.async_initialize(from_cache=True)
for discovery_info in discovery_infos:
async_dispatch_discovery_info(
self._hass,
is_new_join,
discovery_info
)
device_entity = async_create_device_entity(zha_device)
await self._component.async_add_entities([device_entity])
if is_new_join:
device_info = async_get_device_info(self._hass, zha_device)
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_FULL_INIT,
DEVICE_INFO: device_info
}
)
@callback
def async_capture_log_levels():
"""Capture current logger levels for ZHA."""
return {
BELLOWS: logging.getLogger(BELLOWS).getEffectiveLevel(),
ZHA: logging.getLogger(ZHA).getEffectiveLevel(),
ZIGPY: logging.getLogger(ZIGPY).getEffectiveLevel(),
ZIGPY_XBEE: logging.getLogger(ZIGPY_XBEE).getEffectiveLevel(),
ZIGPY_DECONZ: logging.getLogger(ZIGPY_DECONZ).getEffectiveLevel(),
}
@callback
def async_set_logger_levels(levels):
"""Set logger levels for ZHA."""
logging.getLogger(BELLOWS).setLevel(levels[BELLOWS])
logging.getLogger(ZHA).setLevel(levels[ZHA])
logging.getLogger(ZIGPY).setLevel(levels[ZIGPY])
logging.getLogger(ZIGPY_XBEE).setLevel(levels[ZIGPY_XBEE])
logging.getLogger(ZIGPY_DECONZ).setLevel(levels[ZIGPY_DECONZ])
class LogRelayHandler(logging.Handler):
"""Log handler for error messages."""
def __init__(self, hass, gateway):
"""Initialize a new LogErrorHandler."""
super().__init__()
self.hass = hass
self.gateway = gateway
def emit(self, record):
"""Relay log message via dispatcher."""
stack = []
if record.levelno >= logging.WARN:
if not record.exc_info:
stack = [f for f, _, _, _ in traceback.extract_stack()]
entry = LogEntry(record, stack,
_figure_out_source(record, stack, self.hass))
async_dispatcher_send(
self.hass,
ZHA_GW_MSG,
{
TYPE: LOG_OUTPUT,
LOG_ENTRY: entry.to_dict()
}
)
|
|
import pygame,random,sys,os
from pygame.locals import *
pygame.init()
# speed of the game
FPS=13.0
fpsClock=pygame.time.Clock()
scr_width = 610
scr_height = 480
screen = pygame.display.set_mode((scr_width,scr_height))
# initial state of the snake
n_blocks = 5
n_blocks1 = 5
origin = [50,50]
origin1 = [scr_width-50,50]
eaten = 1
eaten1 = 1
score = 0
score1 = 0
t = -1
class pause_scr_item(pygame.font.Font):
def __init__(self, text, font=None, font_size=48, font_color=(255, 255, 255), (pos_x, pos_y)=(0, 0)):
pygame.font.Font.__init__(self, font, font_size)
self.text = text
self.font_size = font_size
self.font_color = font_color
self.label = self.render(self.text, 1, self.font_color)
self.width = self.label.get_rect().width
self.height = self.label.get_rect().height
self.dimensions = (self.width, self.height)
self.pos_x = pos_x
self.pos_y = pos_y
self.position = pos_x, pos_y
def set_position(self, x, y):
self.position = (x, y)
self.pos_x = x
self.pos_y = y
def set_font_color(self, rgb_tuple):
self.font_color = rgb_tuple
self.label = self.render(self.text, 1, self.font_color)
def is_mouse_selection(self, (posx, posy)):
if (posx >= self.pos_x and posx <= self.pos_x + self.width) and (posy >= self.pos_y and posy <= self.pos_y + self.height):
return True
return False
class Background(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
class snake_block:
'Defines the snake'
color = [255,255,255]
width = 5
height = 5
x = 0
y = 0
class food_block:
'Food and parameters'
color = (250,0,0)
width = 5
height = 5
x = 80
y = 80
#initializing snake with 200 snake blocks
snake=[]
snake1=[]
food = food_block()
# initializing for a new game
def init():
global t, snake, food, eaten, score, FPS, n_blocks, origin, snake1, eaten1, score1, n_blocks1, origin1
hs = open("hs.txt","w")
hs.write("0")
hs.close()
snake = [snake_block() for _ in xrange(200)]
snake1 = [snake_block() for _ in xrange(200)]
food = food_block()
n_blocks = 5
n_blocks1 = 5
score = 0
score1 = 0
t = -1
origin[0] += n_blocks*5
for i in xrange(n_blocks):
snake[i].x = origin[0] - (snake[i].width*i)
snake[i].y = origin[1]
snake[i].color = [0,250,0]
origin1[0] += n_blocks1*5
for i in xrange(n_blocks1):
snake1[i].x = origin1[0] - (snake1[i].width*i)
snake1[i].y = origin1[1]
snake1[i].color = [0,0,220]
place_food()
eaten = 0
eaten1 = 0
FPS=13.0
# function to randomly place food
def place_food():
food.x = random.randrange(0, scr_width, 5)
food.y = 200
#food.y = random.randrange(45, scr_height, 5)
# function to move the snake blocks by following the head block
def follow(snake, n_blocks):
prev_x = snake[0].x
prev_y = snake[0].y
for i in range(1, n_blocks):
prex=snake[i].x
prey=snake[i].y
snake[i].x = prev_x
snake[i].y = prev_y
prev_x = prex
prev_y = prey
def move_right(snake,cur_dir,n_blocks):
if cur_dir != "LEFT":
follow(snake,n_blocks)
snake[0].x = (snake[0].x+snake[0].width)%(scr_width+5)
else: move_left(snake, cur_dir,n_blocks)
def move_left(snake,cur_dir,n_blocks):
if cur_dir != "RIGHT":
follow(snake,n_blocks)
snake[0].x = (snake[0].x-snake[0].width+scr_width+5)%(scr_width+5)
else: move_right(snake, cur_dir,n_blocks)
def move_up(snake,cur_dir,n_blocks):
if cur_dir != "DOWN":
follow(snake,n_blocks)
snake[0].y = (snake[0].y-snake[0].height+scr_height+5)%(scr_height+5)
else: move_down(snake, cur_dir,n_blocks)
def move_down(snake,cur_dir,n_blocks):
if cur_dir != "UP":
follow(snake,n_blocks)
snake[0].y = (snake[0].y+snake[0].height)%(scr_height+5)
else: move_up(snake, cur_dir,n_blocks)
def game_over(snake,n_blocks,mode):
global t
hs = open("hs.txt","r")
hscore = (int)(hs.read())
hs.close()
if score > hscore:
hs = open("hs.txt","w")
hs.write((str)(score))
hs.close()
if n_blocks <=2 : return
if mode == "multi":
for i in xrange(1,n_blocks):
if snake[0].x == snake[i].x and snake[0].y == snake[i].y:
t = FPS*10
snake[0].x = -1
for _ in xrange(n_blocks):
snake[_].color = (0,0,0)
break
else:
for i in xrange(1,n_blocks):
if snake[0].x == snake[i].x and snake[0].y == snake[i].y:
display_game_over_screen(mode)
def display_game_over_screen(mode):
global hscore
hs = open("hs.txt","r")
hscore = (int)(hs.read())
hs.close()
gover_font = pygame.font.SysFont(None, 48)
other_font = pygame.font.SysFont(None, 40)
gameover = gover_font.render("Gameover!", True, (255,255,255))
BackGround = Background("./s.jpg", [0,0])
if mode == "multi":
scored2 = other_font.render("Score: %d"%score1, True, (0,0,255))
scored2_pos = [(scr_width/2) - (scored2.get_rect().width / 2), (scr_height / 2) + (scored2.get_rect().height)/2 + 6]
scored= other_font.render("Score: %d"%score, True,(0,255,0))
play_again = other_font.render("Play Again?", True, (255,255,255))
quit = other_font.render("Quit", True, (255,255,255))
high_score = other_font.render("High Score: %d"%hscore, True, (255,255,255))
gameover_pos = [(scr_width / 2) - (gameover.get_rect().width / 2), (scr_height / 2) - (5*gameover.get_rect().height)/2]
high_score_pos = [(scr_width / 2) - (high_score.get_rect().width / 2), (scr_height/2) - (3*high_score.get_rect().height)/2 + 2]
scored_pos = [(scr_width / 2) - (scored.get_rect().width / 2), (scr_height / 2) - (scored.get_rect().height)/2 + 4]
play_again_pos = [(scr_width / 2) - (play_again.get_rect().width / 2), (scr_height / 2) + (3*play_again.get_rect().height)/2 + 8]
quit_pos = [(scr_width / 2) - (quit.get_rect().width / 2), (scr_height / 2) + (5*quit.get_rect().height)/2 + 10]
loop = True
while loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pos[0] >= play_again_pos[0] and pos[0] <= play_again_pos[0] + play_again.get_rect().width and pos[1] >= play_again_pos[1] and pos[1] <= play_again_pos[1] + play_again.get_rect().height: run(mode)
if pos[0] >= quit_pos[0] and pos[0] <= quit_pos[0] + quit.get_rect().width and pos[1] >= quit_pos[1] and pos[1] <= quit_pos[1] + quit.get_rect().height:
pygame.quit()
sys.exit()
screen.fill((0,0,0))
screen.blit(BackGround.image, BackGround.rect)
screen.blit(gameover, gameover_pos)
screen.blit(high_score, high_score_pos)
screen.blit(scored, scored_pos)
if mode=="multi":
screen.blit(scored2, scored2_pos)
screen.blit(play_again, play_again_pos)
screen.blit(quit, quit_pos)
pygame.display.flip()
def score_toolbar(mode):
t_height = 40
t_width = scr_width
global t
font = pygame.font.SysFont(None, t_height/2)
score_txt = font.render("S1: %d"%score, True, (0,255,0))
score_txt_pos = [10, t_height/2 - score_txt.get_rect().height/2]
screen.blit(score_txt, score_txt_pos)
if mode=="multi":
score1_txt = font.render("S2: %d"%score1, True, (0,0,255))
score1_txt_pos = [t_width-50, t_height/2 - score1_txt.get_rect().height/2]
time_txt = font.render("Time Left: %0.2f"%(t/FPS), True,(255,255,255))
time_position = [(t_width/2)-time_txt.get_rect().width/2, t_height/2 - time_txt.get_rect().height/2]
screen.blit(score1_txt, score1_txt_pos)
if t >= 0 :
screen.blit(time_txt, time_position)
def draw(mode):
global snake, snake1, eaten, n_blocks, FPS, score, eaten1, n_blocks1, score1
BLACK = [0,0,0]
WHITE = [255,255,255]
GREEN = [0,250,0]
BLUE = [0,0,220]
screen.fill(BLACK)
score_toolbar(mode)
for i in xrange(n_blocks):
pygame.draw.rect(screen,snake[i].color,(((snake[i].x%(scr_width+5)),(snake[i].y%(scr_height+5))),(snake[i].width,snake[i].height)))
eaten = snake[0].x == food.x and snake[0].y == food.y
if eaten:
place_food()
eaten = 0
eaten1=0
n_blocks += 1
snake[n_blocks-1].x=snake[n_blocks-2].x # adding new block when food is consumed at the last block position
snake[n_blocks-1].y=snake[n_blocks-2].y
snake[n_blocks-1].color = GREEN
FPS += 0.75 # increasing speed after every food consumption
score += 10
if mode=="multi":
for i in xrange(n_blocks1):
pygame.draw.rect(screen,snake1[i].color,(((snake1[i].x%(scr_width+5)),(snake1[i].y%(scr_height+5))),(snake1[i].width,snake1[i].height)))
eaten1 = snake1[0].x == food.x and snake1[0].y == food.y
if eaten1:
place_food()
eaten1 = 0
eaten=0
n_blocks1 += 1
snake1[n_blocks1-1].x=snake1[n_blocks1-2].x # adding new block when food is consumed at the last block position
snake1[n_blocks1-1].y=snake1[n_blocks1-2].y
snake1[n_blocks1-1].color = BLUE
FPS += 0.75 # increasing speed after every food consumption
score1 += 10
pygame.draw.rect(screen,food.color,((food.x, food.y), (food.width, food.height)))
pygame.display.update()
fpsClock.tick(FPS)
# pause the game
def pause():
loop = True
items_arr = ['Resume', 'New Game', 'Quit']
items = []
BackGround = Background("./s.jpg", [0,0])
for index,item in enumerate(items_arr):
menu_item = pause_scr_item(item)
t_h = len(items_arr) * menu_item.height
pos_x = (scr_width / 2) - (menu_item.width / 2)
pos_y = (scr_height / 2) - (t_h / 2) + ((index * 2) + index * menu_item.height)
menu_item.set_position(pos_x, pos_y)
items.append(menu_item)
screen.fill([0,0,0])
while loop:
pygame.time.Clock().tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for item in items:
if item.is_mouse_selection(pos):
loop = False
if item.text == "Quit":
pygame.quit()
sys.exit()
if item.text == "New Game":
pygame.quit()
os.system("python main.py")
sys.exit()
screen.fill([0,0,0])
screen.blit(BackGround.image, BackGround.rect)
for item in items:
if item.is_mouse_selection(pygame.mouse.get_pos()):
item.set_font_color((255,255,255))
item.set_bold(True)
else:
item.set_font_color((255,255,255))
item.set_bold(False)
screen.blit(item.label, item.position)
pygame.display.flip()
def run(mode):
p_right = 0
p_left = 0
p_up = 0
p_down = 0
p1_right = 0
p1_left = 0
p1_up = 0
p1_down = 0
cur_dir = "RIGHT"
prev_dir = ""
cur_dir1 = "RIGHT"
prev_dir1 = ""
global t
main_loop = True
init()
while main_loop:
draw(mode)
pygame.image.save(screen, "s.jpg")
if t==-1:
game_over(snake,n_blocks,mode)
game_over(snake1,n_blocks1,mode)
if t > 0:
t-=1
elif t!=-1:
display_game_over_screen(mode)
for event in pygame.event.get():
if event.type ==pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == K_DOWN and cur_dir != "UP":
p_down=1
prev_dir = cur_dir
cur_dir = "DOWN"
p_left=0
p_right=0
p_up=0
elif event.key == K_UP and cur_dir != "DOWN":
p_up=1
prev_dir = cur_dir
cur_dir = "UP"
p_down=0
p_left=0
p_right=0
elif event.key == K_LEFT and cur_dir != "RIGHT":
p_left=1
prev_dir = cur_dir
cur_dir = "LEFT"
p_right=0
p_up=0
p_down=0
elif event.key == K_RIGHT and cur_dir != "LEFT":
p_right=1
prev_dir = cur_dir
cur_dir = "RIGHT"
p_up=0
p_left=0
p_down=0
elif event.key == K_s and cur_dir1 != "UP":
p1_down=1
prev_dir1 = cur_dir1
cur_dir1 = "DOWN"
p1_left=0
p1_right=0
p1_up=0
elif event.key == K_w and cur_dir1 != "DOWN":
p1_up=1
prev_dir1 = cur_dir1
cur_dir1 = "UP"
p1_down=0
p1_left=0
p1_right=0
elif event.key == K_a and cur_dir1 != "RIGHT":
p1_left=1
prev_dir1 = cur_dir1
cur_dir1 = "LEFT"
p1_right=0
p1_up=0
p1_down=0
elif event.key == K_d and cur_dir1 != "LEFT":
p1_right=1
prev_dir1 = cur_dir1
cur_dir1 = "RIGHT"
p1_up=0
p1_left=0
p1_down=0
elif event.key == K_ESCAPE:
pause()
if p_left:
move_left(snake, prev_dir,n_blocks)
elif p_right:
move_right(snake, prev_dir,n_blocks)
elif p_up:
move_up(snake, prev_dir,n_blocks)
elif p_down:
move_down(snake, prev_dir,n_blocks)
if p1_left:
move_left(snake1, prev_dir1,n_blocks1)
elif p1_right:
move_right(snake1, prev_dir1,n_blocks1)
elif p1_up:
move_up(snake1, prev_dir1,n_blocks1)
elif p1_down:
move_down(snake1, prev_dir1,n_blocks1)
#-----------------------------------------------------------------------
def move_min_horizontal(cur_dir, prev_dir):
if food.x>snake[0].x:
print cur_dir
if (food.x - snake[0].x) < (scr_width - food.x) + snake[0].x and cur_dir!="LEFT":
prev_dir = cur_dir
cur_dir = "RIGHT"
move_right(snake, prev_dir, n_blocks)
elif cur_dir!="RIGHT":
prev_dir = cur_dir
cur_dir = "LEFT"
move_left(snake, prev_dir, n_blocks)
else:
prev_dir = cur_dir
cur_dir="DOWN"
move_down(snake, prev_dir, n_blocks)
else:
print cur_dir
if (-food.x + snake[0].x) < (scr_width + food.x) - snake[0].x and cur_dir!="RIGHT":
prev_dir = cur_dir
cur_dir = "LEFT"
move_left(snake, prev_dir, n_blocks)
elif cur_dir!="LEFT":
prev_dir = cur_dir
cur_dir = "RIGHT"
move_right(snake, prev_dir, n_blocks)
else:
prev_dir = cur_dir
cur_dir = "DOWN"
move_down(snake, prev_dir, n_blocks)
return [cur_dir, prev_dir]
def move_min_vertical(cur_dir, prev_dir):
if food.y>snake[0].y:
if (food.y - snake[0].y) < (scr_height - food.y) + snake[0].y and cur_dir!="UP":
prev_dir = cur_dir
cur_dir = "DOWN"
move_down(snake, prev_dir, n_blocks)
elif cur_dir!="UP" and cur_dir!="DOWN":
prev_dir = cur_dir
cur_dir = "UP"
move_up(snake,prev_dir, n_blocks)
else:
prev_dir = cur_dir
cur_dir = "RIGHT"
move_right(snake, prev_dir, n_blocks)
else:
if (-food.y + snake[0].y) < (scr_height + food.y) - snake[0].y and cur_dir!="DOWN":
prev_dir = cur_dir
cur_dir = "UP"
move_up(snake,prev_dir, n_blocks)
elif cur_dir!="UP" and cur_dir!="DOWN":
prev_dir = cur_dir
cur_dir = "DOWN"
move_down(snake, prev_dir, n_blocks)
else:
prev_dir = cur_dir
cur_dir = "RIGHT"
move_right(snake, prev_dir, n_blocks)
return [cur_dir, prev_dir]
def cpu_player():
p_right = 0
p_left = 0
p_up = 0
p_down = 0
cur_dir = ""
prev_dir = ""
global t
main_loop = True
init()
while main_loop:
draw("")
pygame.image.save(screen, "s.jpg")
if t==-1:
print cur_dir
game_over(snake,n_blocks,"")
if t > 0:
t-=1
elif t!=-1:
display_game_over_screen("")
for event in pygame.event.get():
if event.type ==pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
pause()
print food.x, food.y
print snake[0].x, snake[0].y
print "-----------"
if food.y == snake[0].y or food.x != snake[0].x:
cur_dir, prev_dir = move_min_horizontal(cur_dir, prev_dir)
else:
cur_dir, prev_dir = move_min_vertical(cur_dir, prev_dir)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from shutil import rmtree
import tempfile
import unittest
import numpy as np
from pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, \
MultilayerPerceptronClassifier, OneVsRest
from pyspark.ml.clustering import DistributedLDAModel, KMeans, LocalLDAModel, LDA, LDAModel
from pyspark.ml.fpm import FPGrowth
from pyspark.ml.linalg import Matrices, Vectors
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression
from pyspark.sql import Row
from pyspark.testing.mlutils import SparkSessionTestCase
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class MultilayerPerceptronClassifierTest(SparkSessionTestCase):
def test_raw_and_probability_prediction(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],
blockSize=128, seed=123)
model = mlp.fit(df)
test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()
result = model.transform(test).head()
expected_prediction = 2.0
expected_probability = [0.0, 0.0, 1.0]
expected_rawPrediction = [-11.6081922998, -8.15827998691, 22.17757045]
self.assertTrue(result.prediction, expected_prediction)
self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, parallelism=1)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "rawPrediction", "prediction"])
def test_parallelism_doesnt_change_output(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)
modelPar1 = ovrPar1.fit(df)
ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)
modelPar2 = ovrPar2.fit(df)
for i, model in enumerate(modelPar1.models):
self.assertTrue(np.allclose(model.coefficients.toArray(),
modelPar2.models[i].coefficients.toArray(), atol=1E-4))
self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class KMeansTests(SparkSessionTestCase):
def test_kmeans_cosine_distance(self):
data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),
(Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),
(Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=3, seed=1, distanceMeasure="cosine")
model = kmeans.fit(df)
result = model.transform(df).collect()
self.assertTrue(result[0].prediction == result[1].prediction)
self.assertTrue(result[2].prediction == result[3].prediction)
self.assertTrue(result[4].prediction == result[5].prediction)
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0, 1.0), ([2], [1], 1.0, 1.0)],
["antecedent", "consequent", "confidence", "lift"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LinearRegressionTest(SparkSessionTestCase):
def test_linear_regression_with_huber_loss(self):
data_path = "data/mllib/sample_linear_regression_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lir = LinearRegression(loss="huber", epsilon=2.0)
model = lir.fit(df)
expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,
1.2612, -0.333, -0.5694, -0.6311, 0.6053]
expectedIntercept = 0.1607
expectedScale = 9.758
self.assertTrue(
np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))
self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))
self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))
if __name__ == "__main__":
from pyspark.ml.tests.test_algorithms import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
"""
SMlib.plugins
=================
Here, 'plugins' are widgets designed specifically for SM
These plugins inherit the following classes
(SMderPluginMixin & SMPluginWidget)
"""
from PyQt4.QtGui import (QDockWidget, QWidget, QShortcut, QCursor, QKeySequence, QMainWindow, QApplication)
from PyQt4.QtCore import SIGNAL, Qt, QObject, pyqtSignal
# Local imports
from SMlib.utils.qthelpers import toggle_actions, get_icon
from SMlib.config import CONF
from SMlib.configs.userconfig import NoDefault
from SMlib.configs.guiconfig import get_font, set_font
from SMlib.plugins.configdialog import SMConfigPage
class PluginConfigPage(SMConfigPage):
"""Plugin configuration dialog box page widget"""
def __init__(self, plugin, parent):
self.plugin = plugin
self.get_option = plugin.get_option
self.set_option = plugin.set_option
self.get_name = plugin.get_plugin_title
self.get_icon = plugin.get_plugin_icon
self.get_font = plugin.get_plugin_font
self.set_font = plugin.set_plugin_font
self.apply_settings = plugin.apply_plugin_settings
SMConfigPage.__init__(self, parent)
class SMPluginMixin(object):
"""
Useful methods to bind widgets to the main window
See SMPluginWidget class for required widget interface
Signals:
sig_option_changed
Example:
plugin.sig_option_changed.emit('show_all', checked)
'show_message(QString,int)'
"""
CONF_SECTION = None
CONFIGWIDGET_CLASS = None
ALLOWED_AREAS = Qt.AllDockWidgetAreas
LOCATION = Qt.LeftDockWidgetArea
FEATURES = QDockWidget.DockWidgetClosable | \
QDockWidget.DockWidgetFloatable | \
QDockWidget.DockWidgetMovable
DISABLE_ACTIONS_WHEN_HIDDEN = True
sig_option_changed = None
def __init__(self, main):
"""Bind widget to a QMainWindow instance"""
super(SMPluginMixin, self).__init__()
assert self.CONF_SECTION is not None
self.main = main
self.default_margins = None
self.plugin_actions = None
self.dockwidget = None
self.mainwindow = None
self.ismaximized = False
self.isvisible = False
def initialize_plugin(self):
"""Initialize plugin: connect signals, setup actions, ..."""
self.plugin_actions = self.get_plugin_actions()
QObject.connect(self, SIGNAL('show_message(QString,int)'),
self.show_message)
QObject.connect(self, SIGNAL('update_plugin_title()'),
self.__update_plugin_title)
if self.sig_option_changed is not None:
self.sig_option_changed.connect(self.set_option)
self.setWindowTitle(self.get_plugin_title())
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
# Was written to handle the very first plugin position in Spyder's
# main window layout, but this could also be used for other things
# (see for example the IPython console plugin for which this method
# had to be written to handle the fact that this plugin was
# introduced between v2.1 and v2.2)
raise NotImplementedError
def initialize_plugin_in_mainwindow_layout(self):
"""If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout"""
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
def update_margins(self):
layout = self.layout()
if self.default_margins is None:
self.default_margins = layout.getContentsMargins()
if CONF.get('main', 'use_custom_margin', True):
margin = CONF.get('main', 'custom_margin', 0)
layout.setContentsMargins(*[margin]*4)
else:
layout.setContentsMargins(*self.default_margins)
def __update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or mainwindow title"""
if self.dockwidget is not None:
win = self.dockwidget
elif self.mainwindow is not None:
win = self.mainwindow
else:
return
win.setWindowTitle(self.get_plugin_title())
def create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# This is not clear yet why the following do not work...
# (see Issue #880)
## # Using Qt.Window window flags solves Issue #880 (detached dockwidgets
## # are not painted after restarting Spyder and restoring their hexstate)
## # but it does not work with PyQt <=v4.7 (dockwidgets can't be docked)
## # or non-Windows platforms (lot of warnings are printed out)
## # (so in those cases, we use the default window flags: Qt.Widget):
## flags = Qt.Widget if is_old_pyqt or os.name != 'nt' else Qt.Window
dock = QDockWidget(self.get_plugin_title(), self.main)#, flags)
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(self.ALLOWED_AREAS)
dock.setFeatures(self.FEATURES)
dock.setWidget(self)
self.update_margins()
self.connect(dock, SIGNAL('visibilityChanged(bool)'),
self.visibility_changed)
self.dockwidget = dock
short = self.get_option("shortcut", None)
if short is not None:
shortcut = QShortcut(QKeySequence(short),
self.main, self.switch_to_plugin)
self.register_shortcut(shortcut, "_",
"Switch to %s" % self.CONF_SECTION,
default=short)
return (dock, self.LOCATION)
def create_mainwindow(self):
"""
Create a QMainWindow instance containing this plugin
Note: this method is currently not used
"""
self.mainwindow = mainwindow = QMainWindow()
mainwindow.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_widget_icon()
if isinstance(icon, basestring):
icon = get_icon(icon)
mainwindow.setWindowIcon(icon)
mainwindow.setWindowTitle(self.get_plugin_title())
mainwindow.setCentralWidget(self)
self.refresh_plugin()
return mainwindow
def create_configwidget(self, parent):
"""Create configuration dialog box page widget"""
if self.CONFIGWIDGET_CLASS is not None:
configwidget = self.CONFIGWIDGET_CLASS(self, parent)
configwidget.initialize()
return configwidget
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
raise NotImplementedError
def register_shortcut(self, qaction_or_qshortcut, context, name,
default=NoDefault):
"""
Register QAction or QShortcut to SM main application,
with shortcut (context, name, default)
"""
self.main.register_shortcut(qaction_or_qshortcut,
context, name, default)
def register_widget_shortcuts(self, context, widget):
"""
Register widget shortcuts
widget interface must have a method called 'get_shortcut_data'
"""
for qshortcut, name, default in widget.get_shortcut_data():
self.register_shortcut(qshortcut, context, name, default)
def switch_to_plugin(self):
"""Switch to plugin
This method is called when pressing plugin's shortcut key"""
if not self.ismaximized:
self.dockwidget.show()
self.visibility_changed(True)
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
if enable:
self.dockwidget.raise_()
widget = self.get_focus_widget()
if widget is not None:
widget.setFocus()
visible = self.dockwidget.isVisible() or self.ismaximized
if self.DISABLE_ACTIONS_WHEN_HIDDEN:
toggle_actions(self.plugin_actions, visible)
self.isvisible = enable and visible
if self.isvisible:
self.refresh_plugin() # To give focus to the plugin's widget
def set_option(self, option, value):
"""
Set a plugin option in configuration file
Use a SIGNAL to call it, e.g.:
plugin.sig_option_changed.emit('show_all', checked)
"""
CONF.set(self.CONF_SECTION, str(option), value)
def get_option(self, option, default=NoDefault):
"""Get a plugin option from configuration file"""
return CONF.get(self.CONF_SECTION, option, default)
def get_plugin_font(self, option=None):
"""Return plugin font option"""
return get_font(self.CONF_SECTION, option)
def set_plugin_font(self, font, option=None):
"""Set plugin font option"""
set_font(font, self.CONF_SECTION, option)
def show_message(self, message, timeout=0):
"""Show message in main window's status bar"""
self.main.statusBar().showMessage(message, timeout)
def starting_long_process(self, message):
"""
Showing message in main window's status bar
and changing mouse cursor to Qt.WaitCursor
"""
self.show_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def ending_long_process(self, message=""):
"""
Clearing main window's status bar
and restoring mouse cursor
"""
QApplication.restoreOverrideCursor()
self.show_message(message, timeout=2000)
QApplication.processEvents()
def set_default_color_scheme(self, name='SM'):
"""Set default color scheme (only once)"""
color_scheme_name = self.get_option('color_scheme_name', None)
if color_scheme_name is None:
names = CONF.get("color_schemes", "names")
if name not in names:
name = names[0]
self.set_option('color_scheme_name', name)
class SMPluginWidget(QWidget, SMPluginMixin):
"""
SM base widget class
SM's widgets either inherit this class or reimplement its interface
"""
#sig_option_changed = Signal(str, object)
sig_option_changed = pyqtSignal(str, object)
def __init__(self, parent):
QWidget.__init__(self, parent)
SMPluginMixin.__init__(self, parent)
def get_plugin_title(self):
"""
Return plugin title
Note: after some thinking, it appears that using a method
is more flexible here than using a class attribute
"""
raise NotImplementedError
def get_plugin_icon(self):
"""
Return plugin icon (QIcon instance)
Note: this is required for plugins creating a main window
(see SpyderPluginMixin.create_mainwindow)
and for configuration dialog widgets creation
"""
return get_icon('qt.png')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
pass
def closing_plugin(self, cancelable=False):
"""
Perform actions before parent main window is closed
Return True or False whether the plugin may be closed immediately or not
Note: returned value is ignored if *cancelable* is False
"""
raise NotImplementedError
def refresh_plugin(self):
"""Refresh widget"""
raise NotImplementedError
def get_plugin_actions(self):
"""
Return a list of actions related to plugin
Note: these actions will be enabled when plugin's dockwidget is visible
and they will be disabled when it's hidden
"""
raise NotImplementedError
def register_plugin(self):
"""Register plugin in Spyder's main window"""
raise NotImplementedError
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RFC 2328 OSPF version 2
"""
import six
import struct
try:
# Python 3
from functools import reduce
except ImportError:
# Python 2
pass
from ryu.lib.stringify import StringifyMixin
from ryu.lib.packet import packet_base
from ryu.lib.packet import packet_utils
from ryu.lib.packet import stream_parser
from ryu.lib import addrconv
import logging
_VERSION = 2
OSPF_MSG_UNKNOWN = 0
OSPF_MSG_HELLO = 1
OSPF_MSG_DB_DESC = 2
OSPF_MSG_LS_REQ = 3
OSPF_MSG_LS_UPD = 4
OSPF_MSG_LS_ACK = 5
OSPF_UNKNOWN_LSA = 0
OSPF_ROUTER_LSA = 1
OSPF_NETWORK_LSA = 2
OSPF_SUMMARY_LSA = 3
OSPF_ASBR_SUMMARY_LSA = 4
OSPF_AS_EXTERNAL_LSA = 5
OSPF_AS_NSSA_LSA = 7 # RFC 3101
OSPF_OPAQUE_LINK_LSA = 9 # RFC 5250
OSPF_OPAQUE_AREA_LSA = 10 # RFC 5250
OSPF_OPAQUE_AS_LSA = 11 # RFC 5250
OSPF_OPTION_T = 1 # Obsolete
OSPF_OPTION_E = 1 << 1 # RFC 2328
OSPF_OPTION_MC = 1 << 2 # RFC 1584
OSPF_OPTION_NP = 1 << 3 # RFC 3101
OSPF_OPTION_EA = 1 << 4 # Obsolete
OSPF_OPTION_DC = 1 << 5 # RFC 2370
OSPF_OPTION_DN = 1 << 7 # RFC 2567
LSA_LINK_TYPE_P2P = 1
LSA_LINK_TYPE_TRANSIT = 2
LSA_LINK_TYPE_STUB = 3
LSA_LINK_TYPE_VL = 4
ROUTER_LSA_BORDER = 0x01 # The router is an ABR
ROUTER_LSA_EXTERNAL = 0x02 # The router is an ASBR
ROUTER_LSA_VIRTUAL = 0x04 # The router has a VL in this area
ROUTER_LSA_NT = 0x10 # The router always translates Type-7
ROUTER_LSA_SHORTCUT = 0x20 # Shortcut-ABR specific flag
AS_EXTERNAL_METRIC = 0x80
OSPF_OPAQUE_TYPE_UNKNOWN = 0
OSPF_OPAQUE_TYPE_EXTENDED_PREFIX_LSA = 7
OSPF_OPAQUE_TYPE_EXTENDED_LINK_LSA = 8
OSPF_EXTENDED_PREFIX_TLV = 1
OSPF_EXTENDED_PREFIX_SID_SUBTLV = 2
class InvalidChecksum(Exception):
pass
class _TypeDisp(object):
_TYPES = {}
_REV_TYPES = None
_UNKNOWN_TYPE = None
@classmethod
def register_unknown_type(cls):
def _register_type(subcls):
cls._UNKNOWN_TYPE = subcls
return subcls
return _register_type
@classmethod
def register_type(cls, type_):
cls._TYPES = cls._TYPES.copy()
def _register_type(subcls):
cls._TYPES[type_] = subcls
cls._REV_TYPES = None
return subcls
return _register_type
@classmethod
def _lookup_type(cls, type_):
try:
return cls._TYPES[type_]
except KeyError:
return cls._UNKNOWN_TYPE
@classmethod
def _rev_lookup_type(cls, targ_cls):
if cls._REV_TYPES is None:
rev = dict((v, k) for k, v in cls._TYPES.items())
cls._REV_TYPES = rev
return cls._REV_TYPES[targ_cls]
class LSAHeader(StringifyMixin):
_HDR_PACK_STR = '!HBB4s4sIHH'
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_UNKNOWN_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=0, length=0, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN,
opaque_id=0):
self.ls_age = ls_age
self.options = options
self.type_ = type_
if self.type_ < OSPF_OPAQUE_LINK_LSA:
self.id_ = id_
else:
self.opaque_type = opaque_type
self.opaque_id = opaque_id
self.adv_router = adv_router
self.ls_seqnum = ls_seqnum
self.checksum = checksum
self.length = length
@classmethod
def parser(cls, buf):
if len(buf) < cls._HDR_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._HDR_LEN))
(ls_age, options, type_, id_, adv_router, ls_seqnum, checksum,
length,) = struct.unpack_from(cls._HDR_PACK_STR, six.binary_type(buf))
adv_router = addrconv.ipv4.bin_to_text(adv_router)
rest = buf[cls._HDR_LEN:]
lsacls = LSA._lookup_type(type_)
value = {
"ls_age": ls_age,
"options": options,
"type_": type_,
"adv_router": adv_router,
"ls_seqnum": ls_seqnum,
"checksum": checksum,
"length": length,
}
if issubclass(lsacls, OpaqueLSA):
(id_,) = struct.unpack_from('!I', id_)
value['opaque_type'] = (id_ & 0xff000000) >> 24
value['opaque_id'] = (id_ & 0xffffff)
else:
value['id_'] = addrconv.ipv4.bin_to_text(id_)
return value, rest
def serialize(self):
if self.type_ < OSPF_OPAQUE_LINK_LSA:
id_ = addrconv.ipv4.text_to_bin(self.id_)
else:
id_ = (self.opaque_type << 24) + self.opaque_id
(id_,) = struct.unpack_from('4s', struct.pack('!I', id_))
adv_router = addrconv.ipv4.text_to_bin(self.adv_router)
return bytearray(struct.pack(self._HDR_PACK_STR, self.ls_age,
self.options, self.type_, id_, adv_router,
self.ls_seqnum, self.checksum, self.length))
class LSA(_TypeDisp, StringifyMixin):
def __init__(self, ls_age=0, options=0, type_=OSPF_UNKNOWN_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=0, length=0, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN,
opaque_id=0):
if type_ < OSPF_OPAQUE_LINK_LSA:
self.header = LSAHeader(ls_age, options, type_, id_, adv_router,
ls_seqnum, 0, 0)
else:
self.header = LSAHeader(ls_age, options, type_, 0, adv_router,
ls_seqnum, 0, 0, opaque_type, opaque_id)
if not (checksum or length):
tail = self.serialize_tail()
length = self.header._HDR_LEN + len(tail)
if not checksum:
head = self.header.serialize()
checksum = packet_utils.fletcher_checksum(head[2:], 14)
self.header.length = length
self.header.checksum = checksum
@classmethod
def parser(cls, buf):
hdr, rest = LSAHeader.parser(buf)
if len(buf) < hdr['length']:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), hdr['length']))
# exclude ls_age for checksum calculation
csum = packet_utils.fletcher_checksum(buf[2:hdr['length']], 14)
if csum != hdr['checksum']:
raise InvalidChecksum("header has %d, but calculated value is %d"
% (hdr['checksum'], csum))
subcls = cls._lookup_type(hdr['type_'])
body = rest[:hdr['length'] - LSAHeader._HDR_LEN]
rest = rest[hdr['length'] - LSAHeader._HDR_LEN:]
if issubclass(subcls, OpaqueLSA):
kwargs = subcls.parser(body, hdr['opaque_type'])
else:
kwargs = subcls.parser(body)
kwargs.update(hdr)
return subcls(**kwargs), subcls, rest
def serialize(self):
tail = self.serialize_tail()
self.header.length = self.header._HDR_LEN + len(tail)
head = self.header.serialize()
# exclude ls_age for checksum calculation
csum = packet_utils.fletcher_checksum(head[2:] + tail, 14)
self.header.checksum = csum
struct.pack_into("!H", head, 16, csum)
return head + tail
@LSA.register_type(OSPF_ROUTER_LSA)
class RouterLSA(LSA):
_PACK_STR = '!BBH'
_PACK_LEN = struct.calcsize(_PACK_STR) # 4bytes
class Link(StringifyMixin):
_PACK_STR = '!4s4sBBH'
_PACK_LEN = struct.calcsize(_PACK_STR) # 12bytes
def __init__(self, id_='0.0.0.0', data='0.0.0.0',
type_=LSA_LINK_TYPE_STUB, tos=0, metric=10):
self.id_ = id_
self.data = data
self.type_ = type_
self.tos = tos
self.metric = metric
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
link = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(id_, data, type_, tos, metric) = \
struct.unpack_from(cls._PACK_STR, six.binary_type(link))
id_ = addrconv.ipv4.bin_to_text(id_)
data = addrconv.ipv4.bin_to_text(data)
return cls(id_, data, type_, tos, metric), rest
def serialize(self):
id_ = addrconv.ipv4.text_to_bin(self.id_)
data = addrconv.ipv4.text_to_bin(self.data)
return bytearray(struct.pack(self._PACK_STR, id_, data, self.type_,
self.tos, self.metric))
def __init__(self, ls_age=0, options=0, type_=OSPF_ROUTER_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, flags=0, links=None):
links = links if links else []
self.flags = flags
self.links = links
super(RouterLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
links = []
hdr = buf[:cls._PACK_LEN]
buf = buf[cls._PACK_LEN:]
(flags, padding, num) = struct.unpack_from(cls._PACK_STR,
six.binary_type(hdr))
while buf:
link, buf = cls.Link.parser(buf)
links.append(link)
assert(len(links) == num)
return {
"flags": flags,
"links": links,
}
def serialize_tail(self):
head = bytearray(struct.pack(self._PACK_STR, self.flags, 0,
len(self.links)))
try:
return head + reduce(lambda a, b: a + b,
(link.serialize() for link in self.links))
except TypeError:
return head
@LSA.register_type(OSPF_NETWORK_LSA)
class NetworkLSA(LSA):
_PACK_STR = '!4s'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_NETWORK_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, mask='0.0.0.0', routers=None):
routers = routers if routers else []
self.mask = mask
self.routers = routers
super(NetworkLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
binmask = buf[:cls._PACK_LEN]
(mask,) = struct.unpack_from(cls._PACK_STR, six.binary_type(binmask))
mask = addrconv.ipv4.bin_to_text(mask)
buf = buf[cls._PACK_LEN:]
routers = []
while buf:
binrouter = buf[:cls._PACK_LEN]
(router,) = struct.unpack_from(cls._PACK_STR,
six.binary_type(binrouter))
router = addrconv.ipv4.bin_to_text(router)
routers.append(router)
buf = buf[cls._PACK_LEN:]
return {
"mask": mask,
"routers": routers,
}
def serialize_tail(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
routers = [addrconv.ipv4.text_to_bin(
router) for router in self.routers]
return bytearray(struct.pack("!" + "4s" * (1 + len(routers)), mask,
*routers))
@LSA.register_type(OSPF_SUMMARY_LSA)
class SummaryLSA(LSA):
_PACK_STR = '!4sBBH'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, ls_age=0, options=0, type_=OSPF_SUMMARY_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, mask='0.0.0.0', tos=0, metric=0):
self.mask = mask
self.tos = tos
self.metric = metric
super(SummaryLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls_PACK_LEN))
buf = buf[:cls._PACK_LEN]
(mask, tos, metric_fst, metric_lst) = struct.unpack_from(
cls._PACK_STR, six.binary_type(buf))
mask = addrconv.ipv4.bin_to_text(mask)
metric = metric_fst << 16 | (metric_lst & 0xffff)
return {
"mask": mask,
"tos": tos,
"metric": metric,
}
def serialize_tail(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
metric_fst = (self.metric >> 16) & 0xff
metric_lst = self.metric & 0xffff
return bytearray(struct.pack(self._PACK_STR, mask, self.tos, metric))
@LSA.register_type(OSPF_ASBR_SUMMARY_LSA)
class ASBRSummaryLSA(LSA):
pass
@LSA.register_type(OSPF_AS_EXTERNAL_LSA)
class ASExternalLSA(LSA):
class ExternalNetwork(StringifyMixin):
_PACK_STR = '!4sBBH4sI'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, mask='0.0.0.0', flags=0, metric=0,
fwd_addr='0.0.0.0', tag=0):
self.mask = mask
self.flags = flags
self.metric = metric
self.fwd_addr = fwd_addr
self.tag = tag
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
ext_nw = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(mask, flags, metric_fst, metric_lst, fwd_addr,
tag) = struct.unpack_from(cls._PACK_STR, six.binary_type(ext_nw))
mask = addrconv.ipv4.bin_to_text(mask)
metric = metric_fst << 16 | (metric_lst & 0xffff)
fwd_addr = addrconv.ipv4.bin_to_text(fwd_addr)
return cls(mask, flags, metric, fwd_addr, tag), rest
def serialize(self):
mask = addrconv.ipv4.text_to_bin(self.mask)
metric_fst = (self.metric >> 16) & 0xff
metric_lst = self.metric & 0xffff
fwd_addr = addrconv.ipv4.text_to_bin(self.fwd_addr)
return bytearray(struct.pack(self._PACK_STR, mask, self.flags,
metric_fst, metric_lst, fwd_addr,
self.tag))
def __init__(self, ls_age=0, options=0, type_=OSPF_AS_EXTERNAL_LSA,
id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0,
checksum=None, length=None, extnws=None):
extnws = extnws if extnws else []
self.extnws = extnws
super(ASExternalLSA, self).__init__(ls_age, options, type_, id_,
adv_router, ls_seqnum, checksum,
length)
@classmethod
def parser(cls, buf):
extnws = []
while buf:
extnw, buf = cls.ExternalNetwork.parser(buf)
extnws.append(extnw)
return {
"extnws": extnws,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(extnw.serialize() for extnw in self.extnws))
@LSA.register_type(OSPF_AS_NSSA_LSA)
class NSSAExternalLSA(LSA):
pass
class ExtendedPrefixTLV(StringifyMixin, _TypeDisp):
pass
@ExtendedPrefixTLV.register_type(OSPF_EXTENDED_PREFIX_TLV)
class ExtendedPrefixTLV(ExtendedPrefixTLV):
_VALUE_PACK_STR = '!HHBBBB4s'
_VALUE_PACK_LEN = struct.calcsize(_VALUE_PACK_STR)
_VALUE_FIELDS = ['route_type', 'prefix_length', 'address_family', '_pad'
'prefix']
def __init__(self, type_=OSPF_EXTENDED_PREFIX_TLV, length=0, route_type=0,
address_family=0, prefix='0.0.0.0/0'):
self.type_ = type_
self.length = length
self.route_type = route_type
self.address_family = address_family
self.prefix = prefix
@classmethod
def parser(cls, buf):
rest = buf[cls._VALUE_PACK_LEN:]
buf = buf[:cls._VALUE_PACK_LEN]
(type_, length, route_type, prefix_length, address_family, _pad,
prefix) = struct.unpack_from(cls._VALUE_PACK_STR, buf)
prefix = addrconv.ipv4.bin_to_text(prefix)
prefix = "%s/%d" % (prefix, prefix_length)
return cls(type_, length, route_type, address_family, prefix), rest
def serialize(self):
prefix, prefix_length = self.prefix.split('/')
prefix = addrconv.ipv4.text_to_bin(prefix)
prefix_length = int(prefix_length)
return struct.pack(self._VALUE_PACK_STR, OSPF_EXTENDED_PREFIX_TLV,
self._VALUE_PACK_LEN - 4, self.route_type,
prefix_length, self.address_family, 0, prefix)
@ExtendedPrefixTLV.register_type(OSPF_EXTENDED_PREFIX_SID_SUBTLV)
class PrefixSIDSubTLV(ExtendedPrefixTLV):
_VALUE_PACK_STR = '!HHBBBBHHI'
_VALUE_PACK_LEN = struct.calcsize(_VALUE_PACK_STR)
_VALUE_FIELDS = ['flags', 'mt_id', 'algorithm', '_pad', 'range_size',
'_pad', 'index']
def __init__(self, type_=OSPF_EXTENDED_PREFIX_SID_SUBTLV, length=0,
flags=0, mt_id=0, algorithm=0, range_size=0, index=0):
self.type_ = type_
self.length = length
self.flags = flags
self.mt_id = mt_id
self.algorithm = algorithm
self.range_size = range_size
self.index = index
@classmethod
def parser(cls, buf):
rest = buf[cls._VALUE_PACK_LEN:]
buf = buf[:cls._VALUE_PACK_LEN]
(type_, length, flags, mt_id, algorithm, _pad, range_size, _pad,
index) = struct.unpack_from(cls._VALUE_PACK_STR, buf)
return cls(type_, length, flags, mt_id, algorithm, range_size,
index), rest
def serialize(self):
return struct.pack(self._VALUE_PACK_STR,
OSPF_EXTENDED_PREFIX_SID_SUBTLV,
self._VALUE_PACK_LEN - 4, self.flags, self.mt_id,
self.algorithm, 0, self.range_size, 0, self.index)
class OpaqueBody(StringifyMixin, _TypeDisp):
def __init__(self, tlvs=None):
tlvs = tlvs if tlvs else []
self.tlvs = tlvs
def serialize(self):
return reduce(lambda a, b: a + b,
(tlv.serialize() for tlv in self.tlvs))
@OpaqueBody.register_type(OSPF_OPAQUE_TYPE_EXTENDED_PREFIX_LSA)
class ExtendedPrefixOpaqueBody(OpaqueBody):
@classmethod
def parser(cls, buf):
buf = six.binary_type(buf)
tlvs = []
while buf:
(type_, length) = struct.unpack_from('!HH', buf)
if len(buf[struct.calcsize('!HH'):]) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
tlvcls = ExtendedPrefixTLV._lookup_type(type_)
if tlvcls:
tlv, buf = tlvcls.parser(buf)
tlvs.append(tlv)
return cls(tlvs)
@OpaqueBody.register_type(OSPF_OPAQUE_TYPE_EXTENDED_LINK_LSA)
class ExtendedLinkOpaqueBody(OpaqueBody):
@classmethod
def parser(cls, buf):
buf = six.binary_type(buf)
tlvs = []
while buf:
(type_, length) = struct.unpack_from('!HH', buf)
if len(buf[struct.calcsize('!HH'):]) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
tlvcls = ExtendedLinkTLV._lookup_type(type_)
if tlvcls:
tlv, buf = tlvcls.parser(buf)
tlvs.append(tlv)
return cls(tlvs)
class OpaqueLSA(LSA):
@classmethod
def parser(cls, buf, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN):
opaquecls = OpaqueBody._lookup_type(opaque_type)
if opaquecls:
data = opaquecls.parser(buf)
else:
data = buf
return {'data': data}
def serialize_tail(self):
if isinstance(self.data, OpaqueBody):
return self.data.serialize()
else:
return self.data
@LSA.register_type(OSPF_OPAQUE_LINK_LSA)
class LocalOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_LINK_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(LocalOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
@LSA.register_type(OSPF_OPAQUE_AREA_LSA)
class AreaOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_AREA_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(AreaOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
@LSA.register_type(OSPF_OPAQUE_AS_LSA)
class ASOpaqueLSA(OpaqueLSA):
def __init__(self, ls_age=0, options=0, type_=OSPF_OPAQUE_AS_LSA,
adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0,
opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0, data=None):
self.data = data
super(ASOpaqueLSA, self).__init__(ls_age, options, type_, 0,
adv_router, ls_seqnum, checksum,
length, opaque_type, opaque_id)
class OSPFMessage(packet_base.PacketBase, _TypeDisp):
"""Base class for OSPF version 2 messages.
"""
_HDR_PACK_STR = '!BBH4s4sHHQ'
_HDR_LEN = struct.calcsize(_HDR_PACK_STR)
def __init__(self, type_, length=None, router_id='0.0.0.0',
area_id='0.0.0.0', au_type=1, authentication=0, checksum=None,
version=_VERSION):
self.version = version
self.type_ = type_
self.length = length
self.router_id = router_id
self.area_id = area_id
self.checksum = checksum
self.au_type = au_type
self.authentication = authentication
@classmethod
def _parser(cls, buf):
if len(buf) < cls._HDR_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._HDR_LEN))
(version, type_, length, router_id, area_id, checksum, au_type,
authentication) = struct.unpack_from(cls._HDR_PACK_STR,
six.binary_type(buf))
# Exclude checksum and authentication field for checksum validation.
if packet_utils.checksum(buf[:12] + buf[14:16] + buf[cls._HDR_LEN:]) \
!= checksum:
raise InvalidChecksum
if len(buf) < length:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), length))
router_id = addrconv.ipv4.bin_to_text(router_id)
area_id = addrconv.ipv4.bin_to_text(area_id)
binmsg = buf[cls._HDR_LEN:length]
rest = buf[length:]
subcls = cls._lookup_type(type_)
kwargs = subcls.parser(binmsg)
return subcls(length, router_id, area_id, au_type, int(authentication),
checksum, version, **kwargs), None, rest
@classmethod
def parser(cls, buf):
try:
return cls._parser(buf)
except:
return None, None, buf
def serialize(self, payload=None, prev=None):
tail = self.serialize_tail()
self.length = self._HDR_LEN + len(tail)
head = bytearray(struct.pack(self._HDR_PACK_STR, self.version,
self.type_, self.length,
addrconv.ipv4.text_to_bin(self.router_id),
addrconv.ipv4.text_to_bin(self.area_id), 0,
self.au_type, self.authentication))
buf = head + tail
csum = packet_utils.checksum(buf[:12] + buf[14:16] +
buf[self._HDR_LEN:])
self.checksum = csum
struct.pack_into("!H", buf, 12, csum)
return buf
# alias
ospf = OSPFMessage
@OSPFMessage.register_type(OSPF_MSG_HELLO)
class OSPFHello(OSPFMessage):
_PACK_STR = '!4sHBBI4s4s' # + neighbors
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
mask='0.0.0.0', hello_interval=10, options=0, priority=1,
dead_interval=40, designated_router='0.0.0.0',
backup_router='0.0.0.0', neighbors=None):
neighbors = neighbors if neighbors else []
super(OSPFHello, self).__init__(OSPF_MSG_HELLO, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.mask = mask
self.hello_interval = hello_interval
self.options = options
self.priority = priority
self.dead_interval = dead_interval
self.designated_router = designated_router
self.backup_router = backup_router
self.neighbors = neighbors
@classmethod
def parser(cls, buf):
(mask, hello_interval, options, priority, dead_interval,
designated_router, backup_router) = struct.unpack_from(cls._PACK_STR,
six.binary_type(buf))
mask = addrconv.ipv4.bin_to_text(mask)
designated_router = addrconv.ipv4.bin_to_text(designated_router)
backup_router = addrconv.ipv4.bin_to_text(backup_router)
neighbors = []
binneighbors = buf[cls._PACK_LEN:len(buf)]
while binneighbors:
n = binneighbors[:4]
n = addrconv.ipv4.bin_to_text(six.binary_type(n))
binneighbors = binneighbors[4:]
neighbors.append(n)
return {
"mask": mask,
"hello_interval": hello_interval,
"options": options,
"priority": priority,
"dead_interval": dead_interval,
"designated_router": designated_router,
"backup_router": backup_router,
"neighbors": neighbors,
}
def serialize_tail(self):
head = bytearray(struct.pack(self._PACK_STR,
addrconv.ipv4.text_to_bin(self.mask),
self.hello_interval, self.options, self.priority,
self.dead_interval,
addrconv.ipv4.text_to_bin(self.designated_router),
addrconv.ipv4.text_to_bin(self.backup_router)))
try:
return head + reduce(lambda a, b: a + b,
(addrconv.ipv4.text_to_bin(
n) for n in self.neighbors))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_DB_DESC)
class OSPFDBDesc(OSPFMessage):
_PACK_STR = '!HBBI' # + LSA_HEADERS
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
mtu=1500, options=0, i_flag=0, m_flag=0, ms_flag=0,
sequence_number=0, lsa_headers=None):
lsa_headers = lsa_headers if lsa_headers else []
super(OSPFDBDesc, self).__init__(OSPF_MSG_DB_DESC, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.mtu = mtu
self.options = options
self.i_flag = i_flag
self.m_flag = m_flag
self.ms_flag = ms_flag
self.sequence_number = sequence_number
self.lsa_headers = lsa_headers
@classmethod
def parser(cls, buf):
(mtu, options, flags,
sequence_number) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
i_flag = (flags >> 2) & 0x1
m_flag = (flags >> 1) & 0x1
ms_flag = flags & 0x1
lsahdrs = []
buf = buf[cls._PACK_LEN:]
while buf:
kwargs, buf = LSAHeader.parser(buf)
lsahdrs.append(LSAHeader(**kwargs))
return {
"mtu": mtu,
"options": options,
"i_flag": i_flag,
"m_flag": m_flag,
"ms_flag": ms_flag,
"sequence_number": sequence_number,
"lsa_headers": lsahdrs,
}
def serialize_tail(self):
flags = ((self.i_flag & 0x1) << 2) ^ \
((self.m_flag & 0x1) << 1) ^ \
(self.ms_flag & 0x1)
head = bytearray(struct.pack(self._PACK_STR, self.mtu,
self.options, flags,
self.sequence_number))
try:
return head + reduce(lambda a, b: a + b,
(hdr.serialize() for hdr in self.lsa_headers))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_LS_REQ)
class OSPFLSReq(OSPFMessage):
_MIN_LEN = OSPFMessage._HDR_LEN
class Request(StringifyMixin):
_PACK_STR = '!I4s4s'
_PACK_LEN = struct.calcsize(_PACK_STR)
def __init__(self, type_=OSPF_UNKNOWN_LSA, id_='0.0.0.0',
adv_router='0.0.0.0'):
self.type_ = type_
self.id = id_
self.adv_router = adv_router
@classmethod
def parser(cls, buf):
if len(buf) < cls._PACK_LEN:
raise stream_parser.StreamParser.TooSmallException(
'%d < %d' % (len(buf), cls._PACK_LEN))
link = buf[:cls._PACK_LEN]
rest = buf[cls._PACK_LEN:]
(type_, id_, adv_router) = struct.unpack_from(cls._PACK_STR,
six.binary_type(link))
id_ = addrconv.ipv4.bin_to_text(id_)
adv_router = addrconv.ipv4.bin_to_text(adv_router)
return cls(type_, id_, adv_router), rest
def serialize(self):
id_ = addrconv.ipv4.text_to_bin(self.id)
adv_router = addrconv.ipv4.text_to_bin(self.adv_router)
return bytearray(struct.pack(self._PACK_STR, self.type_,
id_, adv_router))
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsa_requests=None):
lsa_requests = lsa_requests if lsa_requests else []
super(OSPFLSReq, self).__init__(OSPF_MSG_LS_REQ, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsa_requests = lsa_requests
@classmethod
def parser(cls, buf):
reqs = []
while buf:
req, buf = cls.Request.parser(buf)
reqs.append(req)
return {
"lsa_requests": reqs,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(req.serialize() for req in self.lsa_requests))
@OSPFMessage.register_type(OSPF_MSG_LS_UPD)
class OSPFLSUpd(OSPFMessage):
_PACK_STR = '!I'
_PACK_LEN = struct.calcsize(_PACK_STR)
_MIN_LEN = OSPFMessage._HDR_LEN + _PACK_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsas=None):
lsas = lsas if lsas else []
super(OSPFLSUpd, self).__init__(OSPF_MSG_LS_UPD, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsas = lsas
@classmethod
def parser(cls, buf):
binnum = buf[:cls._PACK_LEN]
(num,) = struct.unpack_from(cls._PACK_STR, six.binary_type(binnum))
buf = buf[cls._PACK_LEN:]
lsas = []
while buf:
lsa, _cls, buf = LSA.parser(buf)
lsas.append(lsa)
assert(len(lsas) == num)
return {
"lsas": lsas,
}
def serialize_tail(self):
head = bytearray(struct.pack(self._PACK_STR, len(self.lsas)))
try:
return head + reduce(lambda a, b: a + b,
(lsa.serialize() for lsa in self.lsas))
except TypeError:
return head
@OSPFMessage.register_type(OSPF_MSG_LS_ACK)
class OSPFLSAck(OSPFMessage):
_MIN_LEN = OSPFMessage._HDR_LEN
def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0',
au_type=1, authentication=0, checksum=None, version=_VERSION,
lsa_headers=None):
lsa_headers = lsa_headers if lsa_headers else []
super(OSPFLSAck, self).__init__(OSPF_MSG_LS_ACK, length, router_id,
area_id, au_type, authentication,
checksum, version)
self.lsa_headers = lsa_headers
@classmethod
def parser(cls, buf):
lsahdrs = []
while buf:
kwargs, buf = LSAHeader.parser(buf)
lsahdrs.append(LSAHeader(**kwargs))
return {
"lsa_headers": lsahdrs,
}
def serialize_tail(self):
return reduce(lambda a, b: a + b,
(hdr.serialize() for hdr in self.lsa_headers))
|
|
# -*- coding: utf-8 -*-
"""
Unit tests for the conversion module.
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import neo
import numpy as np
import quantities as pq
from numpy.testing import (assert_array_almost_equal, assert_array_equal)
import elephant.conversion as cv
from elephant.utils import get_common_start_stop_times
from elephant.spike_train_generation import homogeneous_poisson_process
def get_nearest(times, time):
return (np.abs(times - time)).argmin()
class binarize_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_1d = np.array([1.23, 0.3, 0.87, 0.56])
def test_binarize_with_spiketrain_exact(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=100)
times = np.arange(0, 10. + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_spiketrain_exact_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=100)
times = np.arange(5., 10. + .01, .01)
target = np.zeros_like(times).astype('bool')
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True, t_start=5., t_stop=10.)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_spiketrain_round(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=10.0)
times = np.arange(0, 10. + .1, .1)
target = np.zeros_like(times).astype('bool')
for time in np.round(self.test_array_1d, 1):
target[get_nearest(times, time)] = True
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_quantities_exact(self):
st = pq.Quantity(self.test_array_1d, units='ms')
times = np.arange(0, 1.23 + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True,
sampling_rate=100. * pq.kHz)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_quantities_exact_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
times = np.arange(0, 10. + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True, t_stop=10.,
sampling_rate=100. * pq.kHz)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_quantities_round_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
times = np.arange(5., 10. + .1, .1)
target = np.zeros_like(times).astype('bool')
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True, t_start=5., t_stop=10.,
sampling_rate=10. * pq.kHz)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_plain_array_exact(self):
st = self.test_array_1d
times = np.arange(0, 1.23 + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
res, tres = cv.binarize(st, return_times=True, sampling_rate=100)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_plain_array_exact_set_ends(self):
st = self.test_array_1d
times = np.arange(0, 10. + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
res, tres = cv.binarize(st, return_times=True, t_stop=10.,
sampling_rate=100.)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_no_time(self):
st = self.test_array_1d
times = np.arange(0, 1.23 + .01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
res0, tres = cv.binarize(st, return_times=True, sampling_rate=100)
res1 = cv.binarize(st, return_times=False, sampling_rate=100)
res2 = cv.binarize(st, sampling_rate=100)
assert_array_almost_equal(res0, res1, decimal=9)
assert_array_almost_equal(res0, res2, decimal=9)
def test_binariz_rate_with_plain_array_and_units_typeerror(self):
st = self.test_array_1d
self.assertRaises(TypeError, cv.binarize, st,
t_start=pq.Quantity(0, 'ms'),
sampling_rate=10.)
self.assertRaises(TypeError, cv.binarize, st,
t_stop=pq.Quantity(10, 'ms'),
sampling_rate=10.)
self.assertRaises(TypeError, cv.binarize, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'),
sampling_rate=10.)
self.assertRaises(TypeError, cv.binarize, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.,
sampling_rate=10.)
self.assertRaises(TypeError, cv.binarize, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'),
sampling_rate=10.)
self.assertRaises(TypeError, cv.binarize, st,
sampling_rate=10. * pq.Hz)
def test_binariz_without_sampling_rate_valueerror(self):
st0 = self.test_array_1d
st1 = pq.Quantity(st0, 'ms')
self.assertRaises(ValueError, cv.binarize, st0)
self.assertRaises(ValueError, cv.binarize, st0,
t_start=0)
self.assertRaises(ValueError, cv.binarize, st0,
t_stop=10)
self.assertRaises(ValueError, cv.binarize, st0,
t_start=0, t_stop=10)
self.assertRaises(ValueError, cv.binarize, st1,
t_start=pq.Quantity(0, 'ms'), t_stop=10.)
self.assertRaises(ValueError, cv.binarize, st1,
t_start=0., t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(ValueError, cv.binarize, st1)
def test_bin_edges_empty_binned_spiketrain(self):
st = neo.SpikeTrain(times=np.array([2.5]) * pq.s, t_start=0 * pq.s,
t_stop=3 * pq.s)
with self.assertWarns(UserWarning):
bst = cv.BinnedSpikeTrain(st, bin_size=2 * pq.s, t_start=0 * pq.s,
t_stop=3 * pq.s)
assert_array_equal(bst.bin_edges, [0., 2.] * pq.s)
assert_array_equal(bst.spike_indices, [[]]) # no binned spikes
self.assertEqual(bst.get_num_of_spikes(), 0)
class BinnedSpikeTrainTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.bin_size = 1 * pq.s
self.tolerance = 1e-8
def test_binarize(self):
spiketrains = [self.spiketrain_a, self.spiketrain_b,
self.spiketrain_a, self.spiketrain_b]
for sparse_format in ("csr", "csc"):
bst = cv.BinnedSpikeTrain(spiketrains=spiketrains,
bin_size=self.bin_size,
sparse_format=sparse_format)
bst_bin = bst.binarize(copy=True)
bst_copy = bst.copy()
assert_array_equal(bst_bin.to_array(), bst.to_bool_array())
bst_copy.sparse_matrix.data[:] = 1
self.assertEqual(bst_bin, bst_copy)
def test_slice(self):
spiketrains = [self.spiketrain_a, self.spiketrain_b,
self.spiketrain_a, self.spiketrain_b]
bst = cv.BinnedSpikeTrain(spiketrains=spiketrains,
bin_size=self.bin_size)
self.assertEqual(bst[:, :], bst)
self.assertEqual(bst[1:], cv.BinnedSpikeTrain(spiketrains[1:],
bin_size=self.bin_size))
self.assertEqual(bst[:, :4], bst.time_slice(t_stop=4 * pq.s))
self.assertEqual(bst[:, 1:-1], cv.BinnedSpikeTrain(
spiketrains, bin_size=self.bin_size,
t_start=1 * pq.s, t_stop=9 * pq.s
))
self.assertEqual(bst[0, 0], cv.BinnedSpikeTrain(
neo.SpikeTrain([0.5, 0.7], t_stop=1, units='s'),
bin_size=self.bin_size
))
# 2-seconds stride: leave [0..1, 2..3, 4..5, 6..7] interval
self.assertEqual(bst[0, ::2], cv.BinnedSpikeTrain(
neo.SpikeTrain([0.5, 0.7, 4.3, 6.7], t_stop=10, units='s'),
bin_size=2 * self.bin_size
))
bst_copy = bst.copy()
bst_copy[:] = 1
assert_array_equal(bst_copy.sparse_matrix.todense(), 1)
def test_time_slice(self):
spiketrains = [self.spiketrain_a, self.spiketrain_b]
bst = cv.BinnedSpikeTrain(spiketrains=spiketrains,
bin_size=self.bin_size)
bst_equal = bst.time_slice(t_start=bst.t_start - 5 * pq.s,
t_stop=bst.t_stop + 5 * pq.s)
self.assertEqual(bst_equal, bst)
bst_same = bst.time_slice(t_start=None, t_stop=None)
self.assertIs(bst_same, bst)
bst_copy = bst.time_slice(t_start=None, t_stop=None, copy=True)
self.assertIsNot(bst_copy, bst)
self.assertEqual(bst_copy, bst)
bst_empty = bst.time_slice(t_start=0.2 * pq.s, t_stop=0.3 * pq.s)
self.assertEqual(bst_empty.n_bins, 0)
t_range = np.arange(0, 10, self.bin_size.item()) * pq.s
for i, t_start in enumerate(t_range[:-1]):
for t_stop in t_range[i + 1:]:
bst_ij = bst.time_slice(t_start=t_start, t_stop=t_stop)
bst_ij2 = bst_ij.time_slice(t_start=t_start, t_stop=t_stop)
self.assertEqual(bst_ij2, bst_ij)
self.assertEqual(bst_ij2.tolerance, bst.tolerance)
sts = [st.time_slice(t_start=t_start, t_stop=t_stop)
for st in spiketrains]
bst_ref = cv.BinnedSpikeTrain(sts, bin_size=self.bin_size)
self.assertEqual(bst_ij, bst_ref)
# invalid input: not a quantity
self.assertRaises(TypeError, bst.time_slice, t_start=2)
def test_to_spike_trains(self):
np.random.seed(1)
spiketrains = [homogeneous_poisson_process(rate=10 * pq.Hz,
t_start=-1 * pq.s,
t_stop=10 * pq.s)]
for sparse_format in ("csr", "csc"):
bst1 = cv.BinnedSpikeTrain(
spiketrains=[self.spiketrain_a, self.spiketrain_b],
bin_size=self.bin_size, sparse_format=sparse_format
)
bst2 = cv.BinnedSpikeTrain(spiketrains=spiketrains,
bin_size=300 * pq.ms,
sparse_format=sparse_format)
for bst in (bst1, bst2):
for spikes in ("random", "left", "center"):
spiketrains_gen = bst.to_spike_trains(spikes=spikes,
annotate_bins=True)
for st, indices in zip(spiketrains_gen, bst.spike_indices):
# check sorted
self.assertTrue((np.diff(st.magnitude) > 0).all())
assert_array_equal(st.array_annotations['bins'],
indices)
self.assertEqual(st.annotations['bin_size'],
bst.bin_size)
self.assertEqual(st.t_start, bst.t_start)
self.assertEqual(st.t_stop, bst.t_stop)
bst_same = cv.BinnedSpikeTrain(spiketrains_gen,
bin_size=bst.bin_size,
sparse_format=sparse_format)
self.assertEqual(bst_same, bst)
# invalid mode
self.assertRaises(ValueError, bst.to_spike_trains,
spikes='right')
def test_get_num_of_spikes(self):
spiketrains = [self.spiketrain_a, self.spiketrain_b]
for spiketrain in spiketrains:
binned = cv.BinnedSpikeTrain(spiketrain, n_bins=10,
bin_size=1 * pq.s, t_start=0 * pq.s)
self.assertEqual(binned.get_num_of_spikes(),
len(binned.spike_indices[0]))
for sparse_format in ("csr", "csc"):
binned_matrix = cv.BinnedSpikeTrain(spiketrains, n_bins=10,
bin_size=1 * pq.s,
sparse_format=sparse_format)
n_spikes_per_row = binned_matrix.get_num_of_spikes(axis=1)
n_spikes_per_row_from_indices = list(
map(len, binned_matrix.spike_indices))
assert_array_equal(n_spikes_per_row, n_spikes_per_row_from_indices)
self.assertEqual(binned_matrix.get_num_of_spikes(),
sum(n_spikes_per_row_from_indices))
def test_binned_spiketrain_sparse(self):
a = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s)
b = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s)
bin_size = 1 * pq.s
nbins = 10
x = cv.BinnedSpikeTrain([a, b], n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
x_sparse = [2, 1, 2, 1]
assert_array_equal(x.sparse_matrix.data, x_sparse)
assert_array_equal(x.spike_indices, [[1, 1, 4], [1, 1, 4]])
def test_binned_spiketrain_shape(self):
a = self.spiketrain_a
x = cv.BinnedSpikeTrain(a, n_bins=10,
bin_size=self.bin_size,
t_start=0 * pq.s)
x_bool = cv.BinnedSpikeTrain(a, n_bins=10, bin_size=self.bin_size,
t_start=0 * pq.s)
self.assertEqual(x.to_array().shape, (1, 10))
self.assertEqual(x_bool.to_bool_array().shape, (1, 10))
# shape of the matrix for a list of spike trains
def test_binned_spiketrain_shape_list(self):
a = self.spiketrain_a
b = self.spiketrain_b
c = [a, b]
nbins = 5
x = cv.BinnedSpikeTrain(c, n_bins=nbins, t_start=0 * pq.s,
t_stop=10.0 * pq.s)
x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, t_start=0 * pq.s,
t_stop=10.0 * pq.s)
self.assertEqual(x.to_array().shape, (2, 5))
self.assertEqual(x_bool.to_bool_array().shape, (2, 5))
def test_binned_spiketrain_neg_times(self):
a = neo.SpikeTrain(
[-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
t_start=-6.5 * pq.s, t_stop=10.0 * pq.s)
bin_size = self.bin_size
nbins = 16
x = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size,
t_start=-6.5 * pq.s)
y = [[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0]]
assert_array_equal(x.to_bool_array(), y)
def test_binned_spiketrain_neg_times_list(self):
a = neo.SpikeTrain(
[-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
t_start=-7 * pq.s, t_stop=7 * pq.s)
b = neo.SpikeTrain(
[-0.1, -0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s,
t_start=-1 * pq.s, t_stop=8 * pq.s)
spiketrains = [a, b]
# not the same t_start and t_stop
self.assertRaises(ValueError, cv.BinnedSpikeTrain,
spiketrains=spiketrains,
bin_size=self.bin_size)
t_start, t_stop = get_common_start_stop_times(spiketrains)
self.assertEqual(t_start, -1 * pq.s)
self.assertEqual(t_stop, 7 * pq.s)
x_bool = cv.BinnedSpikeTrain(spiketrains, bin_size=self.bin_size,
t_start=t_start, t_stop=t_stop)
y_bool = [[0, 1, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 0]]
assert_array_equal(x_bool.to_bool_array(), y_bool)
# checking spike_indices(f) and matrix(m) for 1 spiketrain
def test_binned_spiketrain_indices(self):
a = self.spiketrain_a
bin_size = self.bin_size
nbins = 10
x = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
x_bool = cv.BinnedSpikeTrain(a, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
y_matrix = [[2., 1., 0., 1., 1., 1., 1., 0., 0., 0.]]
y_bool_matrix = [[1., 1., 0., 1., 1., 1., 1., 0., 0., 0.]]
assert_array_equal(x.to_array(), y_matrix)
assert_array_equal(x_bool.to_bool_array(), y_bool_matrix)
s = x_bool.to_sparse_bool_array()[
x_bool.to_sparse_bool_array().nonzero()]
assert_array_equal(s, [[True] * 6])
def test_binned_spiketrain_list(self):
a = self.spiketrain_a
b = self.spiketrain_b
bin_size = self.bin_size
nbins = 10
c = [a, b]
x = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
y_matrix = [[2, 1, 0, 1, 1, 1, 1, 0, 0, 0],
[2, 1, 1, 0, 1, 1, 0, 0, 1, 0]]
y_matrix_bool = [[1, 1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 0, 1, 0]]
assert_array_equal(x.to_array(), y_matrix)
assert_array_equal(x_bool.to_bool_array(), y_matrix_bool)
# t_stop is None
def test_binned_spiketrain_list_t_stop(self):
a = self.spiketrain_a
b = self.spiketrain_b
c = [a, b]
bin_size = self.bin_size
nbins = 10
x = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s,
t_stop=None)
x_bool = cv.BinnedSpikeTrain(c, n_bins=nbins, bin_size=bin_size,
t_start=0 * pq.s)
self.assertEqual(x.t_stop, 10 * pq.s)
self.assertEqual(x_bool.t_stop, 10 * pq.s)
# Test number of bins
def test_binned_spiketrain_list_numbins(self):
a = self.spiketrain_a
b = self.spiketrain_b
c = [a, b]
bin_size = 1 * pq.s
x = cv.BinnedSpikeTrain(c, bin_size=bin_size, t_start=0 * pq.s,
t_stop=10. * pq.s)
x_bool = cv.BinnedSpikeTrain(c, bin_size=bin_size, t_start=0 * pq.s,
t_stop=10. * pq.s)
self.assertEqual(x.n_bins, 10)
self.assertEqual(x_bool.n_bins, 10)
def test_binned_spiketrain_matrix(self):
# Init
a = self.spiketrain_a
b = self.spiketrain_b
x_bool_a = cv.BinnedSpikeTrain(a, bin_size=pq.s, t_start=0 * pq.s,
t_stop=10. * pq.s)
x_bool_b = cv.BinnedSpikeTrain(b, bin_size=pq.s, t_start=0 * pq.s,
t_stop=10. * pq.s)
# Assumed results
y_matrix_a = [[2, 1, 0, 1, 1, 1, 1, 0, 0, 0]]
y_matrix_bool_a = [[1, 1, 0, 1, 1, 1, 1, 0, 0, 0]]
y_matrix_bool_b = [[1, 1, 1, 0, 1, 1, 0, 0, 1, 0]]
# Asserts
assert_array_equal(x_bool_a.to_bool_array(), y_matrix_bool_a)
assert_array_equal(x_bool_b.to_bool_array(), y_matrix_bool_b)
assert_array_equal(x_bool_a.to_array(), y_matrix_a)
# Test if t_start is calculated correctly
def test_binned_spiketrain_parameter_calc_tstart(self):
x = cv.BinnedSpikeTrain(self.spiketrain_a, bin_size=1 * pq.s,
n_bins=10, t_stop=10. * pq.s)
self.assertEqual(x.t_start, 0. * pq.s)
self.assertEqual(x.t_stop, 10. * pq.s)
self.assertEqual(x.bin_size, 1 * pq.s)
self.assertEqual(x.n_bins, 10)
# Test if error raises when type of n_bins is not an integer
def test_binned_spiketrain_n_bins_not_int(self):
a = self.spiketrain_a
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, bin_size=pq.s,
n_bins=1.4, t_start=0 * pq.s,
t_stop=10. * pq.s)
def test_to_array(self):
x = cv.BinnedSpikeTrain(self.spiketrain_a, bin_size=1 * pq.s,
n_bins=10, t_stop=10. * pq.s)
arr_float = x.to_array(dtype=np.float32)
assert_array_equal(arr_float, x.to_array().astype(np.float32))
# Test if error is raised when providing insufficient number of
# parameters
def test_binned_spiketrain_insufficient_arguments(self):
a = self.spiketrain_a
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a)
self.assertRaises(
ValueError,
cv.BinnedSpikeTrain,
a,
bin_size=1 * pq.s,
t_start=0 * pq.s,
t_stop=0 * pq.s)
def test_different_input_types(self):
a = self.spiketrain_a
q = [1, 2, 3] * pq.s
self.assertRaises(ValueError, cv.BinnedSpikeTrain,
spiketrains=[a, q], bin_size=pq.s)
def test_get_start_stop(self):
a = self.spiketrain_a
b = neo.SpikeTrain(
[-0.1, -0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s,
t_start=-1 * pq.s, t_stop=8 * pq.s)
start, stop = get_common_start_stop_times(a)
self.assertEqual(start, a.t_start)
self.assertEqual(stop, a.t_stop)
start, stop = get_common_start_stop_times([a, b])
self.assertEqual(start, a.t_start)
self.assertEqual(stop, b.t_stop)
def test_consistency_errors(self):
a = self.spiketrain_a
b = neo.SpikeTrain([-2, -1] * pq.s, t_start=-2 * pq.s,
t_stop=-1 * pq.s)
self.assertRaises(TypeError, cv.BinnedSpikeTrain, [a, b], t_start=5,
t_stop=0, bin_size=pq.s, n_bins=10)
b = neo.SpikeTrain([-7, -8, -9] * pq.s, t_start=-9 * pq.s,
t_stop=-7 * pq.s)
self.assertRaises(TypeError, cv.BinnedSpikeTrain, b, t_start=None,
t_stop=10, bin_size=pq.s, n_bins=10)
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s,
t_stop=10 * pq.s, bin_size=3 * pq.s, n_bins=10)
b = neo.SpikeTrain([-4, -2, 0, 1] * pq.s, t_start=-4 * pq.s,
t_stop=1 * pq.s)
self.assertRaises(
TypeError,
cv.BinnedSpikeTrain,
b,
bin_size=-2 * pq.s,
t_start=-4 * pq.s,
t_stop=0 * pq.s)
# Test edges
def test_binned_spiketrain_bin_edges(self):
a = self.spiketrain_a
x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s, n_bins=10,
t_stop=10. * pq.s)
# Test all edges
assert_array_equal(x.bin_edges, [float(i) for i in range(11)])
# Test center edges
assert_array_equal(x.bin_centers, np.arange(0, 10) + 0.5)
# Test for different units but same times
def test_binned_spiketrain_different_units(self):
a = self.spiketrain_a
b = a.rescale(pq.ms)
bin_size = 1 * pq.s
xa = cv.BinnedSpikeTrain(a, bin_size=bin_size)
xb = cv.BinnedSpikeTrain(b, bin_size=bin_size.rescale(pq.ms))
assert_array_equal(xa.to_array(), xb.to_array())
assert_array_equal(xa.to_bool_array(), xb.to_bool_array())
assert_array_equal(xa.sparse_matrix.data,
xb.sparse_matrix.data)
assert_array_equal(xa.bin_edges, xb.bin_edges)
def test_binary_to_binned_matrix(self):
a = [[1, 0, 0, 0], [0, 1, 1, 0]]
x = cv.BinnedSpikeTrain(a, t_start=0 * pq.s, t_stop=5 * pq.s)
# Check for correctness with different init params
assert_array_equal(x.to_array(), a)
assert_array_equal(x.to_bool_array(), a)
self.assertEqual(x.n_bins, 4)
self.assertEqual(x.bin_size, 1.25 * pq.s)
x = cv.BinnedSpikeTrain(a, t_start=1 * pq.s, bin_size=2 * pq.s)
assert_array_equal(x.to_array(), a)
assert_array_equal(x.to_bool_array(), a)
self.assertEqual(x.t_stop, 9 * pq.s)
x = cv.BinnedSpikeTrain(a, t_stop=9 * pq.s, bin_size=2 * pq.s)
self.assertEqual(x.t_start, 1 * pq.s)
# Raise error
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a,
t_start=5 * pq.s, t_stop=0 * pq.s, bin_size=pq.s,
n_bins=10)
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s,
t_stop=10 * pq.s, bin_size=3 * pq.s, n_bins=10)
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a,
bin_size=-2 * pq.s, t_start=-4 * pq.s,
t_stop=0 * pq.s)
# Check binary property
self.assertTrue(x.is_binary)
def test_binned_to_binned(self):
a = self.spiketrain_a
x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s).to_array()
y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
assert_array_equal(y.to_array(), x)
# test with a list
x = cv.BinnedSpikeTrain([[0, 1, 2, 3]], bin_size=1 * pq.s,
t_stop=3 * pq.s).to_array()
y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
assert_array_equal(y.to_array(), x)
# test with a numpy array
a = np.array([[0, 1, 2, 3], [1, 2, 2.5, 3]])
x = cv.BinnedSpikeTrain(a, bin_size=1 * pq.s,
t_stop=3 * pq.s).to_array()
y = cv.BinnedSpikeTrain(x, bin_size=1 * pq.s, t_start=0 * pq.s)
assert_array_equal(y.to_array(), x)
# Check binary property
self.assertFalse(y.is_binary)
# Raise Errors
# give a strangely shaped matrix as input (not MxN)
a = np.array([[0, 1, 2, 3], [1, 2, 3]], dtype=object)
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s,
bin_size=1 * pq.s)
# Give no t_start or t_stop
a = np.array([[0, 1, 2, 3], [1, 2, 3, 4]])
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a,
bin_size=1 * pq.s)
# Input format not supported
a = np.array(([0, 1, 2], [0, 1, 2, 3, 4]), dtype=object)
self.assertRaises(ValueError, cv.BinnedSpikeTrain, a,
bin_size=1 * pq.s)
def test_binnend_spiketrain_different_input_units(self):
train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s,
t_start=1 * pq.s, t_stop=1.01 * pq.s)
bst = cv.BinnedSpikeTrain(train,
t_start=1 * pq.s, t_stop=1.01 * pq.s,
bin_size=1 * pq.ms)
self.assertEqual(bst.units, pq.s)
target_edges = np.array([1000, 1001, 1002, 1003, 1004, 1005, 1006,
1007, 1008, 1009, 1010], dtype=np.float
) * pq.ms
target_centers = np.array(
[1000.5, 1001.5, 1002.5, 1003.5, 1004.5, 1005.5, 1006.5, 1007.5,
1008.5, 1009.5], dtype=np.float) * pq.ms
assert_array_almost_equal(bst.bin_edges, target_edges)
assert_array_almost_equal(bst.bin_centers, target_centers)
bst = cv.BinnedSpikeTrain(train,
t_start=1 * pq.s, t_stop=1010 * pq.ms,
bin_size=1 * pq.ms)
self.assertEqual(bst.units, pq.s)
assert_array_almost_equal(bst.bin_edges, target_edges)
assert_array_almost_equal(bst.bin_centers, target_centers)
def test_rescale(self):
train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s,
t_start=1 * pq.s, t_stop=1.01 * pq.s)
bst = cv.BinnedSpikeTrain(train, t_start=1 * pq.s,
t_stop=1.01 * pq.s,
bin_size=1 * pq.ms)
self.assertEqual(bst.units, pq.s)
self.assertEqual(bst._t_start, 1) # 1 s
self.assertEqual(bst._t_stop, 1.01) # 1.01 s
self.assertEqual(bst._bin_size, 0.001) # 0.001 s
bst.rescale(units='ms')
self.assertEqual(bst.units, pq.ms)
self.assertEqual(bst._t_start, 1000) # 1 s
self.assertEqual(bst._t_stop, 1010) # 1.01 s
self.assertEqual(bst._bin_size, 1) # 0.001 s
def test_repr(self):
train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s,
t_start=1 * pq.s, t_stop=1.01 * pq.s)
bst = cv.BinnedSpikeTrain(train, t_start=1 * pq.s,
t_stop=1.01 * pq.s,
bin_size=1 * pq.ms)
self.assertEqual(repr(bst), "BinnedSpikeTrain(t_start=1.0 s, "
"t_stop=1.01 s, bin_size=0.001 s; "
"shape=(1, 10), format=csr_matrix)")
def test_binned_sparsity(self):
train = neo.SpikeTrain(np.arange(10), t_stop=10 * pq.s, units=pq.s)
bst = cv.BinnedSpikeTrain(train, n_bins=100)
self.assertAlmostEqual(bst.sparsity, 0.1)
# Test fix for rounding errors
def test_binned_spiketrain_rounding(self):
train = neo.SpikeTrain(times=np.arange(120000) / 30000. * pq.s,
t_start=0 * pq.s, t_stop=4 * pq.s)
with self.assertWarns(UserWarning):
bst = cv.BinnedSpikeTrain(train,
t_start=0 * pq.s, t_stop=4 * pq.s,
bin_size=1. / 30000. * pq.s)
assert_array_equal(bst.to_array().nonzero()[1],
np.arange(120000))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
"""
Definition of the Request object, which acts as a kind of bridge between
what the user wants and what the plugins can.
"""
import os
from io import BytesIO
import zipfile
import tempfile
import shutil
import enum
import warnings
from ..core import urlopen, get_remote_file
from pathlib import Path
from urllib.parse import urlparse
from typing import Optional
# URI types
URI_BYTES = 1
URI_FILE = 2
URI_FILENAME = 3
URI_ZIPPED = 4
URI_HTTP = 5
URI_FTP = 6
class IOMode(str, enum.Enum):
"""Available Image modes
This is a helper enum for ``Request.Mode`` which is a composite of a
``Request.ImageMode`` and ``Request.IOMode``. The IOMode that tells the
plugin if the resource should be read from or written to. Available values are
- read ("r"): Read from the specified resource
- write ("w"): Write to the specified resource
"""
read = "r"
write = "w"
class ImageMode(str, enum.Enum):
"""Available Image modes
This is a helper enum for ``Request.Mode`` which is a composite of a
``Request.ImageMode`` and ``Request.IOMode``. The image mode that tells the
plugin the desired (and expected) image shape. Available values are
- single_image ("i"): Return a single image extending in two spacial
dimensions
- multi_image ("I"): Return a list of images extending in two spacial
dimensions
- single_volume ("v"): Return an image extending into multiple dimensions.
E.g. three spacial dimensions for image stacks, or two spatial and one
time dimension for videos
- multi_volume ("V"): Return a list of images extending into multiple
dimensions.
- any_mode ("?"): Return an image in any format (the plugin decides the
appropriate action).
"""
single_image = "i"
multi_image = "I"
single_volume = "v"
multi_volume = "V"
any_mode = "?"
@enum.unique
class Mode(str, enum.Enum):
"""The mode to use when interacting with the resource
``Request.Mode`` is a composite of ``Request.ImageMode`` and
``Request.IOMode``. The image mode that tells the plugin the desired (and
expected) image shape and the ``Request.IOMode`` tells the plugin the way
the resource should be interacted with. For a detailed description of the
available modes, see the documentation for ``Request.ImageMode`` and
``Request.IOMode`` respectively.
Available modes are all combinations of ``Request.IOMode`` and ``Request.ImageMode``:
- read_single_image ("ri")
- read_multi_image ("rI")
- read_single_volume ("rv")
- read_multi_volume ("rV")
- read_any ("r?")
- write_single_image ("wi")
- write_multi_image ("wI")
- write_single_volume ("wv")
- write_multi_volume ("wV")
- write_any ("w?")
Examples
--------
>>> Request.Mode("rI") # a list of simple images should be read from the resource
>>> Request.Mode("wv") # a single volume should be written to the resource
"""
read_single_image = "ri"
read_multi_image = "rI"
read_single_volume = "rv"
read_multi_volume = "rV"
read_any = "r?"
write_single_image = "wi"
write_multi_image = "wI"
write_single_volume = "wv"
write_multi_volume = "wV"
write_any = "w?"
@classmethod
def _missing_(cls, value):
"""Enable Mode("r") and Mode("w")
The sunder method ``_missing_`` is called whenever the constructor fails
to directly look up the corresponding enum value from the given input.
In our case, we use it to convert the modes "r" and "w" (from the v3
API) into their legacy versions "r?" and "w?".
More info on _missing_:
https://docs.python.org/3/library/enum.html#supported-sunder-names
"""
if value == "r":
return cls("r?")
elif value == "w":
return cls("w?")
else:
raise ValueError(f"{value} is no valid Mode.")
@property
def io_mode(self) -> IOMode:
return IOMode(self.value[0])
@property
def image_mode(self) -> ImageMode:
return ImageMode(self.value[1])
def __getitem__(self, key):
"""For backwards compatibility with the old non-enum modes"""
if key == 0:
return self.io_mode
elif key == 1:
return self.image_mode
else:
raise IndexError(f"Mode has no item {key}")
SPECIAL_READ_URIS = "<video", "<screen>", "<clipboard>"
# The user can use this string in a write call to get the data back as bytes.
RETURN_BYTES = "<bytes>"
# Example images that will be auto-downloaded
EXAMPLE_IMAGES = {
"astronaut.png": "Image of the astronaut Eileen Collins",
"camera.png": "A grayscale image of a photographer",
"checkerboard.png": "Black and white image of a chekerboard",
"wood.jpg": "A (repeatable) texture of wooden planks",
"bricks.jpg": "A (repeatable) texture of stone bricks",
"clock.png": "Photo of a clock with motion blur (Stefan van der Walt)",
"coffee.png": "Image of a cup of coffee (Rachel Michetti)",
"chelsea.png": "Image of Stefan's cat",
"wikkie.png": "Image of Almar's cat",
"coins.png": "Image showing greek coins from Pompeii",
"horse.png": "Image showing the silhouette of a horse (Andreas Preuss)",
"hubble_deep_field.png": "Photograph taken by Hubble telescope (NASA)",
"immunohistochemistry.png": "Immunohistochemical (IHC) staining",
"moon.png": "Image showing a portion of the surface of the moon",
"page.png": "A scanned page of text",
"text.png": "A photograph of handdrawn text",
"chelsea.zip": "The chelsea.png in a zipfile (for testing)",
"chelsea.bsdf": "The chelsea.png in a BSDF file(for testing)",
"newtonscradle.gif": "Animated GIF of a newton's cradle",
"cockatoo.mp4": "Video file of a cockatoo",
"stent.npz": "Volumetric image showing a stented abdominal aorta",
"meadow_cube.jpg": "A cubemap image of a meadow, e.g. to render a skybox.",
}
class Request(object):
"""ImageResource handling utility.
Represents a request for reading or saving an image resource. This
object wraps information to that request and acts as an interface
for the plugins to several resources; it allows the user to read
from filenames, files, http, zipfiles, raw bytes, etc., but offer
a simple interface to the plugins via ``get_file()`` and
``get_local_filename()``.
For each read/write operation a single Request instance is used and passed
to the can_read/can_write method of a format, and subsequently to
the Reader/Writer class. This allows rudimentary passing of
information between different formats and between a format and
associated reader/writer.
Parameters
----------
uri : {str, bytes, file}
The resource to load the image from.
mode : str
The first character is "r" or "w", indicating a read or write
request. The second character is used to indicate the kind of data:
"i" for an image, "I" for multiple images, "v" for a volume,
"V" for multiple volumes, "?" for don't care.
"""
def __init__(self, uri, mode, *, format_hint: str = None, **kwargs):
# General
self.raw_uri = uri
self._uri_type = None
self._filename = None
self._extension = None
self._format_hint = None
self._kwargs = kwargs
self._result = None # Some write actions may have a result
# To handle the user-side
self._filename_zip = None # not None if a zipfile is used
self._bytes = None # Incoming bytes
self._zipfile = None # To store a zipfile instance (if used)
# To handle the plugin side
self._file = None # To store the file instance
self._file_is_local = False # whether the data needs to be copied at end
self._filename_local = None # not None if using tempfile on this FS
self._firstbytes = None # For easy header parsing
# To store formats that may be able to fulfil this request
# self._potential_formats = []
# Check mode
try:
self._mode = Mode(mode)
except ValueError:
raise ValueError(f"Invalid Request.Mode: {mode}")
# Parse what was given
self._parse_uri(uri)
# Set extension
if self._filename is not None:
if self._uri_type in (URI_FILENAME, URI_ZIPPED):
path = self._filename
else:
path = urlparse(self._filename).path
ext = Path(path).suffix.lower()
self._extension = ext if ext != "" else None
self.format_hint = format_hint
def _parse_uri(self, uri):
"""Try to figure our what we were given"""
is_read_request = self.mode.io_mode is IOMode.read
is_write_request = self.mode.io_mode is IOMode.write
if isinstance(uri, str):
# Explicit
if uri.startswith("imageio:"):
if is_write_request:
raise RuntimeError("Cannot write to the standard images.")
fn = uri.split(":", 1)[-1].lower()
fn, _, zip_part = fn.partition(".zip/")
if zip_part:
fn += ".zip"
if fn not in EXAMPLE_IMAGES:
raise ValueError("Unknown standard image %r." % fn)
self._uri_type = URI_FILENAME
self._filename = get_remote_file("images/" + fn, auto=True)
if zip_part:
self._filename += "/" + zip_part
elif uri.startswith("http://") or uri.startswith("https://"):
self._uri_type = URI_HTTP
self._filename = uri
elif uri.startswith("ftp://") or uri.startswith("ftps://"):
self._uri_type = URI_FTP
self._filename = uri
elif uri.startswith("file://"):
self._uri_type = URI_FILENAME
self._filename = uri[7:]
elif uri.startswith(SPECIAL_READ_URIS) and is_read_request:
self._uri_type = URI_BYTES
self._filename = uri
elif uri.startswith(RETURN_BYTES) and is_write_request:
self._uri_type = URI_BYTES
self._filename = uri
else:
self._uri_type = URI_FILENAME
self._filename = uri
elif isinstance(uri, memoryview) and is_read_request:
self._uri_type = URI_BYTES
self._filename = "<bytes>"
self._bytes = uri.tobytes()
elif isinstance(uri, bytes) and is_read_request:
self._uri_type = URI_BYTES
self._filename = "<bytes>"
self._bytes = uri
elif isinstance(uri, Path):
self._uri_type = URI_FILENAME
self._filename = str(uri)
# Files
elif is_read_request:
if hasattr(uri, "read") and hasattr(uri, "close"):
self._uri_type = URI_FILE
self._filename = "<file>"
self._file = uri # Data must be read from here
elif is_write_request:
if hasattr(uri, "write") and hasattr(uri, "close"):
self._uri_type = URI_FILE
self._filename = "<file>"
self._file = uri # Data must be written here
# Expand user dir
if self._uri_type == URI_FILENAME and self._filename.startswith("~"):
self._filename = os.path.expanduser(self._filename)
# Check if a zipfile
if self._uri_type == URI_FILENAME:
# Search for zip extension followed by a path separater
for needle in [".zip/", ".zip\\"]:
zip_i = self._filename.lower().find(needle)
if zip_i > 0:
zip_i += 4
zip_path = self._filename[:zip_i]
if os.path.isdir(zip_path):
pass # is an existing dir (see #548)
elif is_write_request or os.path.isfile(zip_path):
self._uri_type = URI_ZIPPED
self._filename_zip = (
zip_path,
self._filename[zip_i:].lstrip("/\\"),
)
break
# Check if we could read it
if self._uri_type is None:
uri_r = repr(uri)
if len(uri_r) > 60:
uri_r = uri_r[:57] + "..."
raise IOError("Cannot understand given URI: %s." % uri_r)
# Check if this is supported
noWriting = [URI_HTTP, URI_FTP]
if is_write_request and self._uri_type in noWriting:
raise IOError("imageio does not support writing to http/ftp.")
# Deprecated way to load standard images, give a sensible error message
if is_read_request and self._uri_type in [URI_FILENAME, URI_ZIPPED]:
fn = self._filename
if self._filename_zip:
fn = self._filename_zip[0]
if (not os.path.exists(fn)) and (fn in EXAMPLE_IMAGES):
raise IOError(
"No such file: %r. This file looks like one of "
"the standard images, but from imageio 2.1, "
"standard images have to be specified using "
'"imageio:%s".' % (fn, fn)
)
# Make filename absolute
if self._uri_type in [URI_FILENAME, URI_ZIPPED]:
if self._filename_zip:
self._filename_zip = (
os.path.abspath(self._filename_zip[0]),
self._filename_zip[1],
)
else:
self._filename = os.path.abspath(self._filename)
# Check whether file name is valid
if self._uri_type in [URI_FILENAME, URI_ZIPPED]:
fn = self._filename
if self._filename_zip:
fn = self._filename_zip[0]
if is_read_request:
# Reading: check that the file exists (but is allowed a dir)
if not os.path.exists(fn):
raise FileNotFoundError("No such file: '%s'" % fn)
else:
# Writing: check that the directory to write to does exist
dn = os.path.dirname(fn)
if not os.path.exists(dn):
raise FileNotFoundError("The directory %r does not exist" % dn)
@property
def filename(self):
"""Name of the ImageResource.
The uri for which reading/saving was requested. This
can be a filename, an http address, or other resource
identifier. Do not rely on the filename to obtain the data,
but use ``get_file()`` or ``get_local_filename()`` instead.
"""
return self._filename
@property
def extension(self) -> str:
"""The (lowercase) extension of the requested filename.
Suffixes in url's are stripped. Can be None if the request is
not based on a filename.
"""
return self._extension
@property
def format_hint(self) -> Optional[str]:
return self._format_hint
@format_hint.setter
def format_hint(self, format: str) -> None:
self._format_hint = format
if self._extension is None:
self._extension = format
@property
def mode(self):
"""The mode of the request. The first character is "r" or "w",
indicating a read or write request. The second character is
used to indicate the kind of data:
"i" for an image, "I" for multiple images, "v" for a volume,
"V" for multiple volumes, "?" for don't care.
"""
return self._mode
@property
def kwargs(self):
"""The dict of keyword arguments supplied by the user."""
return self._kwargs
# For obtaining data
def get_file(self):
"""get_file()
Get a file object for the resource associated with this request.
If this is a reading request, the file is in read mode,
otherwise in write mode. This method is not thread safe. Plugins
should not close the file when done.
This is the preferred way to read/write the data. But if a
format cannot handle file-like objects, they should use
``get_local_filename()``.
"""
want_to_write = self.mode.io_mode is IOMode.write
# Is there already a file?
# Either _uri_type == URI_FILE, or we already opened the file,
# e.g. by using firstbytes
if self._file is not None:
return self._file
if self._uri_type == URI_BYTES:
if want_to_write:
# Create new file object, we catch the bytes in finish()
self._file = BytesIO()
self._file_is_local = True
else:
self._file = BytesIO(self._bytes)
elif self._uri_type == URI_FILENAME:
if want_to_write:
self._file = open(self.filename, "wb")
else:
self._file = open(self.filename, "rb")
elif self._uri_type == URI_ZIPPED:
# Get the correct filename
filename, name = self._filename_zip
if want_to_write:
# Create new file object, we catch the bytes in finish()
self._file = BytesIO()
self._file_is_local = True
else:
# Open zipfile and open new file object for specific file
self._zipfile = zipfile.ZipFile(filename, "r")
self._file = self._zipfile.open(name, "r")
self._file = SeekableFileObject(self._file)
elif self._uri_type in [URI_HTTP or URI_FTP]:
assert not want_to_write # This should have been tested in init
timeout = os.getenv("IMAGEIO_REQUEST_TIMEOUT")
if timeout is None or not timeout.isdigit():
timeout = 5
self._file = urlopen(self.filename, timeout=float(timeout))
self._file = SeekableFileObject(self._file)
return self._file
def get_local_filename(self):
"""get_local_filename()
If the filename is an existing file on this filesystem, return
that. Otherwise a temporary file is created on the local file
system which can be used by the format to read from or write to.
"""
if self._uri_type == URI_FILENAME:
return self._filename
else:
# Get filename
if self._uri_type in (URI_HTTP, URI_FTP):
ext = os.path.splitext(self._filename.split("?")[0])[1]
else:
ext = os.path.splitext(self._filename)[1]
self._filename_local = tempfile.mktemp(ext, "imageio_")
# Write stuff to it?
if self.mode.io_mode == IOMode.read:
with open(self._filename_local, "wb") as file:
shutil.copyfileobj(self.get_file(), file)
return self._filename_local
def finish(self) -> None:
"""Wrap up this request.
Finishes any pending reads or writes, closes any open files and frees
any resources allocated by this request.
"""
if self.mode.io_mode == IOMode.write:
# See if we "own" the data and must put it somewhere
bytes = None
if self._filename_local:
bytes = Path(self._filename_local).read_bytes()
elif self._file_is_local:
self._file_is_local = False
bytes = self._file.getvalue()
# Put the data in the right place
if bytes is not None:
if self._uri_type == URI_BYTES:
self._result = bytes # Picked up by imread function
elif self._uri_type == URI_FILE:
self._file.write(bytes)
elif self._uri_type == URI_ZIPPED:
zf = zipfile.ZipFile(self._filename_zip[0], "a")
zf.writestr(self._filename_zip[1], bytes)
zf.close()
# elif self._uri_type == URI_FILENAME: -> is always direct
# elif self._uri_type == URI_FTP/HTTP: -> write not supported
# Close open files that we know of (and are responsible for)
if self._file and self._uri_type != URI_FILE:
self._file.close()
self._file = None
if self._zipfile:
self._zipfile.close()
self._zipfile = None
# Remove temp file
if self._filename_local:
try:
os.remove(self._filename_local)
except Exception: # pragma: no cover
warnings.warn(
"Failed to delete the temporary file at "
f"`{self._filename_local}`. Please report this issue."
)
self._filename_local = None
# Detach so gc can clean even if a reference of self lingers
self._bytes = None
def get_result(self):
"""For internal use. In some situations a write action can have
a result (bytes data). That is obtained with this function.
"""
# Is there a reason to disallow reading multiple times?
self._result, res = None, self._result
return res
@property
def firstbytes(self):
"""The first 256 bytes of the file. These can be used to
parse the header to determine the file-format.
"""
if self._firstbytes is None:
self._read_first_bytes()
return self._firstbytes
def _read_first_bytes(self, N=256):
if self._bytes is not None:
self._firstbytes = self._bytes[:N]
else:
# Prepare
try:
f = self.get_file()
except IOError:
if os.path.isdir(self.filename): # A directory, e.g. for DICOM
self._firstbytes = bytes()
return
raise
try:
i = f.tell()
except Exception:
i = None
# Read
self._firstbytes = read_n_bytes(f, N)
# Set back
try:
if i is None:
raise Exception("cannot seek with None")
f.seek(i)
except Exception:
# Prevent get_file() from reusing the file
self._file = None
# If the given URI was a file object, we have a problem,
if self._uri_type == URI_FILE:
raise IOError("Cannot seek back after getting firstbytes!")
def read_n_bytes(f, N):
"""read_n_bytes(file, n)
Read n bytes from the given file, or less if the file has less
bytes. Returns zero bytes if the file is closed.
"""
bb = bytes()
while len(bb) < N:
extra_bytes = f.read(N - len(bb))
if not extra_bytes:
break
bb += extra_bytes
return bb
class SeekableFileObject:
"""A readonly wrapper file object that add support for seeking, even if
the wrapped file object does not. The allows us to stream from http and
still use Pillow.
"""
def __init__(self, f):
self.f = f
self._i = 0 # >=0 but can exceed buffer
self._buffer = b""
self._have_all = False
self.closed = False
def read(self, n=None):
# Fix up n
if n is None:
pass
else:
n = int(n)
if n < 0:
n = None
# Can and must we read more?
if not self._have_all:
more = b""
if n is None:
more = self.f.read()
self._have_all = True
else:
want_i = self._i + n
want_more = want_i - len(self._buffer)
if want_more > 0:
more = self.f.read(want_more)
if len(more) < want_more:
self._have_all = True
self._buffer += more
# Read data from buffer and update pointer
if n is None:
res = self._buffer[self._i :]
else:
res = self._buffer[self._i : self._i + n]
self._i += len(res)
return res
def tell(self):
return self._i
def seek(self, i, mode=0):
# Mimic BytesIO behavior
# Get the absolute new position
i = int(i)
if mode == 0:
if i < 0:
raise ValueError("negative seek value " + str(i))
real_i = i
elif mode == 1:
real_i = max(0, self._i + i) # negative ok here
elif mode == 2:
if not self._have_all:
self.read()
real_i = max(0, len(self._buffer) + i)
else:
raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % i)
# Read some?
if real_i <= len(self._buffer):
pass # no need to read
elif not self._have_all:
assert real_i > self._i # if we don't have all, _i cannot be > _buffer
self.read(real_i - self._i) # sets self._i
self._i = real_i
return self._i
def close(self):
self.closed = True
self.f.close()
def isatty(self):
return False
def seekable(self):
return True
class InitializationError(Exception):
"""The plugin could not initialize from the given request.
This is a _internal_ error that is raised by plugins that fail to handle
a given request. We use this to differentiate incompatibility between
a plugin and a request from an actual error/bug inside a plugin.
"""
pass
|
|
# pylint: disable=E1101,F0401
from ..external.qt.QtCore import (QAbstractItemModel, QModelIndex,
QObject, Qt, QTimer, Signal)
from ..external.qt.QtGui import (QFont, QTreeView, QItemSelectionModel,
QAbstractItemView, QStyledItemDelegate)
from .qtutil import layer_icon
from .mime import LAYERS_MIME_TYPE, PyMimeData
from ..core.decorators import memoize
from ..core import message as m
from ..core.hub import HubListener
from .. import core
from .widgets.style_dialog import StyleDialog
DATA_IDX = 0
SUBSET_IDX = 1
def full_edit_factory(item, pos):
StyleDialog.dropdown_editor(item, pos)
def restricted_edit_factory(item, pos):
StyleDialog.dropdown_editor(item, pos, edit_label=False)
class Item(object):
edit_factory = None
glue_data = None
flags = Qt.ItemIsEnabled
tooltip = None
def font(self):
return QFont()
def icon(self):
return None
@property
def label(self):
return self._label
class DataCollectionItem(Item):
def __init__(self, dc):
self.dc = dc
self.row = 0
self.column = 0
self.parent = None
self._label = ''
self.children_count = 2
@memoize
def child(self, row):
if row == DATA_IDX:
return DataListItem(self.dc, self)
if row == SUBSET_IDX:
return SubsetListItem(self.dc, self)
return None
class DataListItem(Item):
def __init__(self, dc, parent):
self.dc = dc
self.parent = parent
self.row = DATA_IDX
self.column = 0
self._label = 'Data'
@memoize
def child(self, row):
if row < len(self.dc):
return DataItem(self.dc, row, self)
@property
def children_count(self):
return len(self.dc)
def font(self):
result = QFont()
result.setBold(True)
return result
class DataItem(Item):
edit_factory = full_edit_factory
flags = (Qt.ItemIsSelectable | Qt.ItemIsEnabled |
Qt.ItemIsDragEnabled)
def __init__(self, dc, row, parent):
self.dc = dc
self.row = row
self.parent = parent
self.column = 0
self.children_count = 0
@property
def data(self):
return self.dc[self.row]
@property
def glue_data(self):
return self.data
@property
def label(self):
return self.data.label
@label.setter
def label(self, value):
self.data.label = value
@property
def style(self):
return self.data.style
def icon(self):
return layer_icon(self.data)
class SubsetListItem(Item):
def __init__(self, dc, parent):
self.dc = dc
self.parent = parent
self.row = SUBSET_IDX
self._label = 'Subsets'
self.column = 0
@memoize
def child(self, row):
if row < self.dc.subset_groups:
return SubsetGroupItem(self.dc, row, self)
@property
def children_count(self):
return len(self.dc.subset_groups)
def font(self):
result = QFont()
result.setBold(True)
return result
class SubsetGroupItem(Item):
edit_factory = full_edit_factory
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
def __init__(self, dc, row, parent):
self.parent = parent
self.dc = dc
self.row = row
self.column = 0
@property
def subset_group(self):
return self.dc.subset_groups[self.row]
@property
def glue_data(self):
return self.subset_group
@property
def label(self):
return self.subset_group.label
@label.setter
def label(self, value):
self.subset_group.label = value
@property
def tooltip(self):
if type(self.subset_group.subset_state) == core.subset.SubsetState:
return "Empty subset"
atts = self.subset_group.subset_state.attributes
atts = [a for a in atts if isinstance(a, core.ComponentID)]
if len(atts) > 0:
lbl = ', '.join(a.label for a in atts)
return "Selection on %s" % lbl
@property
def style(self):
return self.subset_group.style
@property
def children_count(self):
return len(self.subset_group.subsets)
@memoize
def child(self, row):
return SubsetItem(self.dc, self.subset_group, row, self)
def icon(self):
return layer_icon(self.subset_group)
class SubsetItem(Item):
edit_factory = restricted_edit_factory
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled
def __init__(self, dc, subset_group, subset_idx, parent):
self.parent = parent
self.subset_group = subset_group
self.row = subset_idx
self.parent = parent
self.children_count = 0
self.column = 0
@property
def subset(self):
return self.subset_group.subsets[self.row]
@property
def label(self):
return self.subset.verbose_label
def icon(self):
return layer_icon(self.subset)
@property
def style(self):
return self.subset.style
@property
def glue_data(self):
return self.subset
class DataCollectionModel(QAbstractItemModel, HubListener):
new_item = Signal(QModelIndex)
def __init__(self, data_collection, parent=None):
QAbstractItemModel.__init__(self, parent)
HubListener.__init__(self)
self.data_collection = data_collection
self.root = DataCollectionItem(data_collection)
self._items = {} # map hashes of Model pointers to model items
# without this reference, PySide clobbers instance
# data of model items
self.register_to_hub(self.data_collection.hub)
self.setSupportedDragActions(Qt.CopyAction)
def index(self, row, column, parent=QModelIndex()):
if column != 0:
return QModelIndex()
if not parent.isValid():
parent_item = self.root
else:
parent_item = self._get_item(parent)
if parent_item is None:
return QModelIndex()
child_item = parent_item.child(row)
if child_item:
return self._make_index(row, column, child_item)
else:
return QModelIndex()
def _get_item(self, index):
if not index.isValid():
return None
return self._items.get(id(index.internalPointer()), None)
def _make_index(self, row, column, item):
if item is not None:
result = self.createIndex(row, column, item)
self._items[id(result.internalPointer())] = item
assert result.internalPointer() is item
return result
return self.createIndex(row, column)
def to_indices(self, items):
"""Translate a list of Data, Subset, or SubsetGroups
to a list of indices"""
result = []
for item in items:
if isinstance(item, core.Data):
idx = self.data_index(list(self.data_collection).index(item))
elif isinstance(item, core.SubsetGroup):
idx = self.subsets_index(
self.data_collection.subset_groups.index(item))
elif isinstance(item, core.subset_group.GroupedSubset):
grp = item.group
idx = self.subsets_index(
self.data_collection.subset_groups.index(grp))
row = list(self.data_collection).index(item.data)
idx = self.index(grow, idx)
else:
raise NotImplementedError(type(item))
result.append(idx)
return result
def flags(self, index=QModelIndex()):
if not index.isValid():
return Qt.NoItemFlags
return self._get_item(index).flags
def data(self, index, role):
if not index.isValid():
return
dispatch = {
Qt.DisplayRole: self._display_data,
Qt.FontRole: self._font_data,
Qt.DecorationRole: self._icon_data,
Qt.UserRole: self._user_data,
Qt.ToolTipRole: self._tooltip_data}
if role in dispatch:
return dispatch[role](index)
def setData(self, index, value, role=Qt.EditRole):
if role != Qt.EditRole:
return False
try:
self._get_item(index).label = value
return True
except AttributeError:
return False
def _tooltip_data(self, index):
tooltip = self._get_item(index).tooltip
return tooltip
def _user_data(self, index):
return self._get_item(index)
def _display_data(self, index):
return self._get_item(index).label
def _font_data(self, index):
item = self._get_item(index)
return item.font()
def _icon_data(self, index):
return self._get_item(index).icon()
def headerData(self, section, orientation, role=Qt.DisplayRole):
return ''
def data_index(self, data_number=None):
"""
Fetch the QModelIndex for a given data index,
or the index for the parent data item
:param data_number: position of data set to fetch, or None
"""
base = self.index(DATA_IDX, 0)
if data_number is None:
return base
return self.index(data_number, 0, base)
def subsets_index(self, subset_number=None):
"""
Fetch the QModelIndex for a given subset,
or the index for the parent subset item
:param data_number: position of subset group to fetch, or None
"""
base = self.index(SUBSET_IDX, 0)
assert isinstance(self._get_item(base), SubsetListItem)
if subset_number is None:
return base
return self.index(subset_number, 0, base)
def rowCount(self, index=QModelIndex()):
item = self._get_item(index)
if item is None:
return self.root.children_count
return item.children_count
def parent(self, index=None):
if index is None: # overloaded QObject.parent()
return QObject.parent(self)
item = self._get_item(index)
if item is None:
return QModelIndex()
return self._make_index(item.row, item.column, item.parent)
def columnCount(self, index):
return 1
def register_to_hub(self, hub):
for msg in [m.DataCollectionDeleteMessage,
m.SubsetDeleteMessage]:
hub.subscribe(self, msg, lambda x: self.invalidate())
hub.subscribe(self, m.DataCollectionAddMessage, self._on_add_data)
hub.subscribe(self, m.SubsetCreateMessage, self._on_add_subset)
def _on_add_data(self, message):
self.invalidate()
idx = self.data_index(len(self.data_collection) - 1)
self.new_item.emit(idx)
def _on_add_subset(self, message):
self.invalidate()
idx = self.subsets_index(len(self.data_collection.subset_groups) - 1)
self.new_item.emit(idx)
def invalidate(self):
self.root = DataCollectionItem(self.data_collection)
self._items.clear()
self.reset()
self.layoutChanged.emit()
def glue_data(self, indices):
""" Given a list of indices, return a list of all selected
Data, Subset, and SubsetGroup objects.
"""
items = [self._get_item(idx) for idx in indices]
items = [item.glue_data for item in items]
return items
def mimeData(self, indices):
data = self.glue_data(indices)
result = PyMimeData(data, **{LAYERS_MIME_TYPE: data})
self._mime = result # hold reference to prevent segfault
return result
def mimeTypes(self):
return [LAYERS_MIME_TYPE]
class DataCollectionView(QTreeView):
selection_changed = Signal()
def __init__(self, parent=None):
super(DataCollectionView, self).__init__(parent)
self.doubleClicked.connect(self._edit)
# this keeps the full-row of the selection bar in-sync
self.pressed.connect(lambda x: self.viewport().update())
# only edit label on model.new_item
self.setItemDelegate(LabeledDelegate())
self.setEditTriggers(self.NoEditTriggers)
self._timer = QTimer(self)
self._timer.timeout.connect(self.viewport().update)
self._timer.start(1000)
def selected_layers(self):
idxs = self.selectedIndexes()
return self._model.glue_data(idxs)
def set_selected_layers(self, layers):
sm = self.selectionModel()
idxs = self._model.to_indices(layers)
self.select_indices(*idxs)
def select_indices(self, *indices):
sm = self.selectionModel()
sm.clearSelection()
for idx in indices:
sm.select(idx, sm.Select)
def set_data_collection(self, data_collection):
self._model = DataCollectionModel(data_collection)
self.setModel(self._model)
sm = QItemSelectionModel(self._model)
sm.selectionChanged.connect(lambda *args:
self.selection_changed.emit())
self.setSelectionModel(sm)
self.setRootIsDecorated(False)
self.setExpandsOnDoubleClick(False)
self.expandToDepth(0)
self._model.layoutChanged.connect(lambda: self.expandToDepth(0))
self._model.layoutChanged.connect(self.selection_changed.emit)
self._model.new_item.connect(self.select_indices)
self._model.new_item.connect(self.edit_label)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
self.setDragDropMode(QAbstractItemView.DragOnly)
def edit_label(self, index):
if not (self._model.flags(index) & Qt.ItemIsEditable):
return
self.edit(index)
def _edit(self, index):
item = self._model.data(index, role=Qt.UserRole)
if item is None or item.edit_factory is None:
return
rect = self.visualRect(index)
pos = self.mapToGlobal(rect.bottomLeft())
pos.setY(pos.y() + 1)
item.edit_factory(pos)
class LabeledDelegate(QStyledItemDelegate):
""" Add placeholder text to default delegate """
def setEditorData(self, editor, index):
super(LabeledDelegate, self).setEditorData(editor, index)
label = index.model().data(index, role=Qt.DisplayRole)
editor.selectAll()
editor.setText(label)
if __name__ == "__main__":
from glue.qt import get_qapp
from glue.external.qt.QtGui import QTreeView
from glue.core import Data, DataCollection
app = get_qapp()
dc = DataCollection()
dc.append(Data(label='w'))
view = DataCollectionView()
view.set_data_collection(dc)
view.show()
view.raise_()
dc.extend([Data(label='x', x=[1, 2, 3]),
Data(label='y', y=[1, 2, 3]),
Data(label='z', z=[1, 2, 3])])
app.exec_()
|
|
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import hmac
import itertools
import json
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
sanitized_Request,
)
class VikiBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s'
_APP = '65535a'
_APP_VERSION = '2.2.5.1428709186'
_APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)'
_NETRC_MACHINE = 'viki'
_token = None
_ERRORS = {
'geo': 'Sorry, this content is not available in your region.',
'upcoming': 'Sorry, this content is not yet available.',
# 'paywall': 'paywall',
}
def _prepare_call(self, path, timestamp=None, post_data=None):
path += '?' if '?' not in path else '&'
if not timestamp:
timestamp = int(time.time())
query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp)
if self._token:
query += '&token=%s' % self._token
sig = hmac.new(
self._APP_SECRET.encode('ascii'),
query.encode('ascii'),
hashlib.sha1
).hexdigest()
url = self._API_URL_TEMPLATE % (query, sig)
return sanitized_Request(
url, json.dumps(post_data).encode('utf-8')) if post_data else url
def _call_api(self, path, video_id, note, timestamp=None, post_data=None):
resp = self._download_json(
self._prepare_call(path, timestamp, post_data), video_id, note)
error = resp.get('error')
if error:
if error == 'invalid timestamp':
resp = self._download_json(
self._prepare_call(path, int(resp['current_timestamp']), post_data),
video_id, '%s (retry)' % note)
error = resp.get('error')
if error:
self._raise_error(resp['error'])
return resp
def _raise_error(self, error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
def _check_errors(self, data):
for reason, status in data.get('blocking', {}).items():
if status and reason in self._ERRORS:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, self._ERRORS[reason]), expected=True)
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'login_id': username,
'password': password,
}
login = self._call_api(
'sessions.json', None,
'Logging in as %s' % username, post_data=login_form)
self._token = login.get('token')
if not self._token:
self.report_warning('Unable to get session token, login has probably failed')
@staticmethod
def dict_selection(dict_obj, preferred_key, allow_fallback=True):
if preferred_key in dict_obj:
return dict_obj.get(preferred_key)
if not allow_fallback:
return
filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
return filtered_dict[0] if filtered_dict else None
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
_VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
}
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '5fa476a902e902783ac7a4d615cdbc7a',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:b89cf50038b480b88b5b3c93589a9076',
'duration': 4204,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
}
}, {
# youtube external
'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
'md5': '63f8600c1da6f01b7640eee7eca4f1da',
'info_dict': {
'id': '50562v',
'ext': 'webm',
'title': 'Poor Nastya [COMPLETE] - Episode 1',
'description': '',
'duration': 606,
'timestamp': 1274949505,
'upload_date': '20101213',
'uploader': 'ad14065n',
'uploader_id': 'ad14065n',
'like_count': int,
'age_limit': 13,
}
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}, {
# non-English description
'url': 'http://www.viki.com/videos/158036v-love-in-magic',
'md5': '1713ae35df5a521b31f6dc40730e7c9c',
'info_dict': {
'id': '158036v',
'ext': 'mp4',
'uploader': 'I Planet Entertainment',
'upload_date': '20111122',
'timestamp': 1321985454,
'description': 'md5:44b1e46619df3a072294645c770cef36',
'title': 'Love In Magic',
'age_limit': 13,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
self._check_errors(video)
title = self.dict_selection(video.get('titles', {}), 'en', allow_fallback=False)
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = video.get('container', {}).get('titles', {})
container_title = self.dict_selection(container_titles, 'en')
title = '%s - %s' % (container_title, title)
description = self.dict_selection(video.get('descriptions', {}), 'en')
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
uploader = video.get('author')
like_count = int_or_none(video.get('likes', {}).get('count'))
age_limit = parse_age_limit(video.get('rating'))
thumbnails = []
for thumbnail_id, thumbnail in video.get('images', {}).items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail.get('url'),
})
subtitles = {}
for subtitle_lang, _ in video.get('subtitle_completions', {}).items():
subtitles[subtitle_lang] = [{
'ext': subtitles_format,
'url': self._prepare_call(
'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)),
} for subtitles_format in ('srt', 'vtt')]
result = {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'like_count': like_count,
'age_limit': age_limit,
'thumbnails': thumbnails,
'subtitles': subtitles,
}
streams = self._call_api(
'videos/%s/streams.json' % video_id, video_id,
'Downloading video streams JSON')
if 'external' in streams:
result.update({
'_type': 'url_transparent',
'url': streams['external']['url'],
})
return result
formats = []
for format_id, stream_dict in streams.items():
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None))
for protocol, format_dict in stream_dict.items():
# rtmps URLs does not seem to work
if protocol == 'rtmps':
continue
format_url = format_dict['url']
if format_id == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native',
m3u8_id='m3u8-%s' % protocol, fatal=False)
# Despite CODECS metadata in m3u8 all video-only formats
# are actually video+audio
for f in m3u8_formats:
if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
f['acodec'] = None
formats.extend(m3u8_formats)
elif format_url.startswith('rtmp'):
mobj = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$',
format_url)
if not mobj:
continue
formats.append({
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
'url': mobj.group('url'),
'play_path': mobj.group('playpath'),
'app': mobj.group('app'),
'page_url': url,
})
else:
formats.append({
'url': format_url,
'format_id': '%s-%s' % (format_id, protocol),
'height': height,
})
self._sort_formats(formats)
result['formats'] = formats
return result
class VikiChannelIE(VikiBaseIE):
IE_NAME = 'viki:channel'
_VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
},
'playlist_mincount': 71,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
}, {
'url': 'http://www.viki.com/news/24569c-showbiz-korea',
'only_matching': True,
}, {
'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005',
'only_matching': True,
}, {
'url': 'http://www.viki.com/artists/2141c-shinee',
'only_matching': True,
}]
_PER_PAGE = 25
def _real_extract(self, url):
channel_id = self._match_id(url)
channel = self._call_api(
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
self._check_errors(channel)
title = self.dict_selection(channel['titles'], 'en')
description = self.dict_selection(channel['descriptions'], 'en')
entries = []
for video_type in ('episodes', 'clips', 'movies'):
for page_num in itertools.count(1):
page = self._call_api(
'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d'
% (channel_id, video_type, self._PER_PAGE, page_num), channel_id,
'Downloading %s JSON page #%d' % (video_type, page_num))
for video in page['response']:
video_id = video['id']
entries.append(self.url_result(
'http://www.viki.com/videos/%s' % video_id, 'Viki'))
if not page['pagination']['next']:
break
return self.playlist_result(entries, channel_id, title, description)
|
|
# Copyright: See the LICENSE file.
import datetime
import decimal
import unittest
import warnings
from unittest import mock
from factory import fuzzy, random
from . import utils
class FuzzyAttributeTestCase(unittest.TestCase):
def test_simple_call(self):
d = fuzzy.FuzzyAttribute(lambda: 10)
res = utils.evaluate_declaration(d)
self.assertEqual(10, res)
res = utils.evaluate_declaration(d)
self.assertEqual(10, res)
class FuzzyChoiceTestCase(unittest.TestCase):
def test_unbiased(self):
options = [1, 2, 3]
d = fuzzy.FuzzyChoice(options)
res = utils.evaluate_declaration(d)
self.assertIn(res, options)
def test_mock(self):
options = [1, 2, 3]
fake_choice = lambda d: sum(d)
d = fuzzy.FuzzyChoice(options)
with mock.patch('factory.random.randgen.choice', fake_choice):
res = utils.evaluate_declaration(d)
self.assertEqual(6, res)
def test_generator(self):
def options():
yield from range(3)
d = fuzzy.FuzzyChoice(options())
res = utils.evaluate_declaration(d)
self.assertIn(res, [0, 1, 2])
# And repeat
res = utils.evaluate_declaration(d)
self.assertIn(res, [0, 1, 2])
def test_lazy_generator(self):
class Gen:
def __init__(self, options):
self.options = options
self.unrolled = False
def __iter__(self):
self.unrolled = True
return iter(self.options)
opts = Gen([1, 2, 3])
d = fuzzy.FuzzyChoice(opts)
self.assertFalse(opts.unrolled)
res = utils.evaluate_declaration(d)
self.assertIn(res, [1, 2, 3])
self.assertTrue(opts.unrolled)
def test_getter(self):
options = [('a', 1), ('b', 2), ('c', 3)]
d = fuzzy.FuzzyChoice(options, getter=lambda x: x[1])
res = utils.evaluate_declaration(d)
self.assertIn(res, [1, 2, 3])
class FuzzyIntegerTestCase(unittest.TestCase):
def test_definition(self):
"""Tests all ways of defining a FuzzyInteger."""
fuzz = fuzzy.FuzzyInteger(2, 3)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertIn(res, [2, 3])
fuzz = fuzzy.FuzzyInteger(4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertIn(res, [0, 1, 2, 3, 4])
def test_biased(self):
fake_randrange = lambda low, high, step: (low + high) * step
fuzz = fuzzy.FuzzyInteger(2, 8)
with mock.patch('factory.random.randgen.randrange', fake_randrange):
res = utils.evaluate_declaration(fuzz)
self.assertEqual((2 + 8 + 1) * 1, res)
def test_biased_high_only(self):
fake_randrange = lambda low, high, step: (low + high) * step
fuzz = fuzzy.FuzzyInteger(8)
with mock.patch('factory.random.randgen.randrange', fake_randrange):
res = utils.evaluate_declaration(fuzz)
self.assertEqual((0 + 8 + 1) * 1, res)
def test_biased_with_step(self):
fake_randrange = lambda low, high, step: (low + high) * step
fuzz = fuzzy.FuzzyInteger(5, 8, 3)
with mock.patch('factory.random.randgen.randrange', fake_randrange):
res = utils.evaluate_declaration(fuzz)
self.assertEqual((5 + 8 + 1) * 3, res)
class FuzzyDecimalTestCase(unittest.TestCase):
def test_definition(self):
"""Tests all ways of defining a FuzzyDecimal."""
fuzz = fuzzy.FuzzyDecimal(2.0, 3.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(
decimal.Decimal('2.0') <= res <= decimal.Decimal('3.0'),
"value %d is not between 2.0 and 3.0" % res,
)
fuzz = fuzzy.FuzzyDecimal(4.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(
decimal.Decimal('0.0') <= res <= decimal.Decimal('4.0'),
"value %d is not between 0.0 and 4.0" % res,
)
fuzz = fuzzy.FuzzyDecimal(1.0, 4.0, precision=5)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(
decimal.Decimal('1.0') <= res <= decimal.Decimal('4.0'),
"value %d is not between 1.0 and 4.0" % res,
)
self.assertTrue(res.as_tuple().exponent, -5)
def test_biased(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyDecimal(2.0, 8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(decimal.Decimal('10.0'), res)
def test_biased_high_only(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyDecimal(8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(decimal.Decimal('8.0'), res)
def test_precision(self):
fake_uniform = lambda low, high: low + high + 0.001
fuzz = fuzzy.FuzzyDecimal(8.0, precision=3)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(decimal.Decimal('8.001').quantize(decimal.Decimal(10) ** -3), res)
def test_no_approximation(self):
"""We should not go through floats in our fuzzy calls unless actually needed."""
fuzz = fuzzy.FuzzyDecimal(0, 10)
decimal_context = decimal.getcontext()
old_traps = decimal_context.traps[decimal.FloatOperation]
try:
decimal_context.traps[decimal.FloatOperation] = True
utils.evaluate_declaration(fuzz)
finally:
decimal_context.traps[decimal.FloatOperation] = old_traps
class FuzzyFloatTestCase(unittest.TestCase):
def test_definition(self):
"""Tests all ways of defining a FuzzyFloat."""
fuzz = fuzzy.FuzzyFloat(2.0, 3.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(2.0 <= res <= 3.0, "value %d is not between 2.0 and 3.0" % res)
fuzz = fuzzy.FuzzyFloat(4.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(0.0 <= res <= 4.0, "value %d is not between 0.0 and 4.0" % res)
fuzz = fuzzy.FuzzyDecimal(1.0, 4.0, precision=5)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(1.0 <= res <= 4.0, "value %d is not between 1.0 and 4.0" % res)
self.assertTrue(res.as_tuple().exponent, -5)
def test_biased(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyFloat(2.0, 8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(10.0, res)
def test_biased_high_only(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyFloat(8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.0, res)
def test_default_precision(self):
fake_uniform = lambda low, high: low + high + 0.000000000000011
fuzz = fuzzy.FuzzyFloat(8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.00000000000001, res)
def test_precision(self):
fake_uniform = lambda low, high: low + high + 0.001
fuzz = fuzzy.FuzzyFloat(8.0, precision=4)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.001, res)
class FuzzyDateTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup useful constants
cls.jan1 = datetime.date(2013, 1, 1)
cls.jan3 = datetime.date(2013, 1, 3)
cls.jan31 = datetime.date(2013, 1, 31)
def test_accurate_definition(self):
"""Tests all ways of defining a FuzzyDate."""
fuzz = fuzzy.FuzzyDate(self.jan1, self.jan31)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan31)
def test_partial_definition(self):
"""Test defining a FuzzyDate without passing an end date."""
with utils.mocked_date_today(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDate(self.jan1)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan3)
def test_invalid_definition(self):
with self.assertRaises(ValueError):
fuzzy.FuzzyDate(self.jan31, self.jan1)
def test_invalid_partial_definition(self):
with utils.mocked_date_today(self.jan1, fuzzy):
with self.assertRaises(ValueError):
fuzzy.FuzzyDate(self.jan31)
def test_biased(self):
"""Tests a FuzzyDate with a biased random.randint."""
fake_randint = lambda low, high: (low + high) // 2
fuzz = fuzzy.FuzzyDate(self.jan1, self.jan31)
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.date(2013, 1, 16), res)
def test_biased_partial(self):
"""Tests a FuzzyDate with a biased random and implicit upper bound."""
with utils.mocked_date_today(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDate(self.jan1)
fake_randint = lambda low, high: (low + high) // 2
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.date(2013, 1, 2), res)
class FuzzyNaiveDateTimeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup useful constants
cls.jan1 = datetime.datetime(2013, 1, 1)
cls.jan3 = datetime.datetime(2013, 1, 3)
cls.jan31 = datetime.datetime(2013, 1, 31)
def test_accurate_definition(self):
"""Tests explicit definition of a FuzzyNaiveDateTime."""
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan31)
def test_partial_definition(self):
"""Test defining a FuzzyNaiveDateTime without passing an end date."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan3)
def test_aware_start(self):
"""Tests that a timezone-aware start datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyNaiveDateTime(self.jan1.replace(tzinfo=datetime.timezone.utc), self.jan31)
def test_aware_end(self):
"""Tests that a timezone-aware end datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31.replace(tzinfo=datetime.timezone.utc))
def test_force_year(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_year=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.year)
def test_force_month(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_month=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.month)
def test_force_day(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_day=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.day)
def test_force_hour(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_hour=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.hour)
def test_force_minute(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_minute=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.minute)
def test_force_second(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_second=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.second)
def test_force_microsecond(self):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31, force_microsecond=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.microsecond)
def test_invalid_definition(self):
with self.assertRaises(ValueError):
fuzzy.FuzzyNaiveDateTime(self.jan31, self.jan1)
def test_invalid_partial_definition(self):
with utils.mocked_datetime_now(self.jan1, fuzzy):
with self.assertRaises(ValueError):
fuzzy.FuzzyNaiveDateTime(self.jan31)
def test_biased(self):
"""Tests a FuzzyDate with a biased random.randint."""
fake_randint = lambda low, high: (low + high) // 2
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1, self.jan31)
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 16), res)
def test_biased_partial(self):
"""Tests a FuzzyDate with a biased random and implicit upper bound."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyNaiveDateTime(self.jan1)
fake_randint = lambda low, high: (low + high) // 2
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 2), res)
class FuzzyDateTimeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup useful constants
cls.jan1 = datetime.datetime(2013, 1, 1, tzinfo=datetime.timezone.utc)
cls.jan3 = datetime.datetime(2013, 1, 3, tzinfo=datetime.timezone.utc)
cls.jan31 = datetime.datetime(2013, 1, 31, tzinfo=datetime.timezone.utc)
def test_accurate_definition(self):
"""Tests explicit definition of a FuzzyDateTime."""
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan31)
def test_partial_definition(self):
"""Test defining a FuzzyDateTime without passing an end date."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDateTime(self.jan1)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan3)
def test_invalid_definition(self):
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan31, self.jan1)
def test_invalid_partial_definition(self):
with utils.mocked_datetime_now(self.jan1, fuzzy):
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan31)
def test_naive_start(self):
"""Tests that a timezone-naive start datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan1.replace(tzinfo=None), self.jan31)
def test_naive_end(self):
"""Tests that a timezone-naive end datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan1, self.jan31.replace(tzinfo=None))
def test_force_year(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_year=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.year)
def test_force_month(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_month=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.month)
def test_force_day(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_day=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.day)
def test_force_hour(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_hour=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.hour)
def test_force_minute(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_minute=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.minute)
def test_force_second(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_second=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.second)
def test_force_microsecond(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_microsecond=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.microsecond)
def test_biased(self):
"""Tests a FuzzyDate with a biased random.randint."""
fake_randint = lambda low, high: (low + high) // 2
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31)
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 16, tzinfo=datetime.timezone.utc), res)
def test_biased_partial(self):
"""Tests a FuzzyDate with a biased random and implicit upper bound."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDateTime(self.jan1)
fake_randint = lambda low, high: (low + high) // 2
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 2, tzinfo=datetime.timezone.utc), res)
class FuzzyTextTestCase(unittest.TestCase):
def test_unbiased(self):
chars = ['a', 'b', 'c']
fuzz = fuzzy.FuzzyText(prefix='pre', suffix='post', chars=chars, length=12)
res = utils.evaluate_declaration(fuzz)
self.assertEqual('pre', res[:3])
self.assertEqual('post', res[-4:])
self.assertEqual(3 + 12 + 4, len(res))
for char in res[3:-4]:
self.assertIn(char, chars)
def test_mock(self):
fake_choice = lambda chars: chars[0]
chars = ['a', 'b', 'c']
fuzz = fuzzy.FuzzyText(prefix='pre', suffix='post', chars=chars, length=4)
with mock.patch('factory.random.randgen.choice', fake_choice):
res = utils.evaluate_declaration(fuzz)
self.assertEqual('preaaaapost', res)
def test_generator(self):
def options():
yield 'a'
yield 'b'
yield 'c'
fuzz = fuzzy.FuzzyText(chars=options(), length=12)
res = utils.evaluate_declaration(fuzz)
self.assertEqual(12, len(res))
for char in res:
self.assertIn(char, ['a', 'b', 'c'])
class FuzzyRandomTestCase(unittest.TestCase):
def test_seeding(self):
fuzz = fuzzy.FuzzyInteger(1, 1000)
random.reseed_random(42)
value = utils.evaluate_declaration(fuzz)
random.reseed_random(42)
value2 = utils.evaluate_declaration(fuzz)
self.assertEqual(value, value2)
def test_seeding_warning(self):
with warnings.catch_warnings(record=True) as w:
# Do not turn expected warning into an error.
warnings.filterwarnings("default", category=UserWarning, module=r"tests\.test_fuzzy")
fuzz = fuzzy.FuzzyDate(datetime.date(2013, 1, 1))
utils.evaluate_declaration(fuzz)
self.assertEqual(1, len(w))
self.assertIn('factory_boy/issues/331', str(w[-1].message))
def test_reset_state(self):
fuzz = fuzzy.FuzzyInteger(1, 1000)
state = random.get_random_state()
value = utils.evaluate_declaration(fuzz)
random.set_random_state(state)
value2 = utils.evaluate_declaration(fuzz)
self.assertEqual(value, value2)
|
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import getpass as gp
import yaml
if "win32" in sys.platform:
import colorama
colorama.init()
import re
import tempfile
import subprocess
import codecs
import unicodedata
import shlex
import logging
log = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
STDIN = sys.stdin
STDERR = sys.stderr
STDOUT = sys.stdout
TEST = False
__cached_tz = None
WARNING_COLOR = "\033[33m"
ERROR_COLOR = "\033[31m"
RESET_COLOR = "\033[0m"
# Based on Segtok by Florian Leitner
# https://github.com/fnl/segtok
SENTENCE_SPLITTER = re.compile(r"""
( # A sentence ends at one of two sequences:
[.!?\u203C\u203D\u2047\u2048\u2049\u3002\uFE52\uFE57\uFF01\uFF0E\uFF1F\uFF61] # Either, a sequence starting with a sentence terminal,
[\'\u2019\"\u201D]? # an optional right quote,
[\]\)]* # optional closing brackets and
\s+ # a sequence of required spaces.
| # Otherwise,
\n # a sentence also terminates newlines.
)""", re.UNICODE | re.VERBOSE)
class UserAbort(Exception):
pass
def getpass(prompt="Password: "):
if not TEST:
return gp.getpass(bytes(prompt))
else:
return py23_input(prompt)
def get_password(validator, keychain=None, max_attempts=3):
pwd_from_keychain = keychain and get_keychain(keychain)
password = pwd_from_keychain or getpass()
result = validator(password)
# Password is bad:
if result is None and pwd_from_keychain:
set_keychain(keychain, None)
attempt = 1
while result is None and attempt < max_attempts:
prompt("Wrong password, try again.")
password = getpass()
result = validator(password)
attempt += 1
if result is not None:
return result
else:
prompt("Extremely wrong password.")
sys.exit(1)
def get_keychain(journal_name):
import keyring
return keyring.get_password('jrnl', journal_name)
def set_keychain(journal_name, password):
import keyring
if password is None:
try:
keyring.delete_password('jrnl', journal_name)
except:
pass
elif not TEST:
keyring.set_password('jrnl', journal_name, password)
def u(s):
"""Mock unicode function for python 2 and 3 compatibility."""
if not isinstance(s, str):
s = str(s)
return s if PY3 or type(s) is unicode else s.decode("utf-8")
def py2encode(s):
"""Encodes to UTF-8 in Python 2 but not in Python 3."""
return s.encode("utf-8") if PY2 and type(s) is unicode else s
def bytes(s):
"""Returns bytes, no matter what."""
if PY3:
return s.encode("utf-8") if type(s) is not bytes else s
return s.encode("utf-8") if type(s) is unicode else s
def prnt(s):
"""Encode and print a string"""
STDOUT.write(u(s + "\n"))
def prompt(msg):
"""Prints a message to the std err stream defined in util."""
if not msg:
return
if not msg.endswith("\n"):
msg += "\n"
STDERR.write(u(msg))
def py23_input(msg=""):
prompt(msg)
return STDIN.readline().strip()
def py23_read(msg=""):
print(msg)
return STDIN.read()
def yesno(prompt, default=True):
prompt = prompt.strip() + (" [Y/n]" if default else " [y/N]")
raw = py23_input(prompt)
return {'y': True, 'n': False}.get(raw.lower(), default)
def load_config(config_path):
"""Tries to load a config file from YAML.
"""
with open(config_path) as f:
return yaml.load(f, Loader=yaml.FullLoader)
def scope_config(config, journal_name):
if journal_name not in config['journals']:
return config
config = config.copy()
journal_conf = config['journals'].get(journal_name)
if type(journal_conf) is dict: # We can override the default config on a by-journal basis
log.debug('Updating configuration with specific journal overrides %s', journal_conf)
config.update(journal_conf)
else: # But also just give them a string to point to the journal file
config['journal'] = journal_conf
config.pop('journals')
return config
def get_text_from_editor(config, template=""):
filehandle, tmpfile = tempfile.mkstemp(prefix="jrnl", text=True, suffix=".txt")
with codecs.open(tmpfile, 'w', "utf-8") as f:
if template:
f.write(template)
try:
subprocess.call(shlex.split(config['editor'], posix="win" not in sys.platform) + [tmpfile])
except AttributeError:
subprocess.call(config['editor'] + [tmpfile])
with codecs.open(tmpfile, "r", "utf-8") as f:
raw = f.read()
os.close(filehandle)
os.remove(tmpfile)
if not raw:
prompt('[Nothing saved to file]')
return raw
def colorize(string):
"""Returns the string wrapped in cyan ANSI escape"""
return u"\033[36m{}\033[39m".format(string)
def slugify(string):
"""Slugifies a string.
Based on public domain code from https://github.com/zacharyvoase/slugify
and ported to deal with all kinds of python 2 and 3 strings
"""
string = u(string)
ascii_string = str(unicodedata.normalize('NFKD', string).encode('ascii', 'ignore'))
if PY3:
ascii_string = ascii_string[1:] # removed the leading 'b'
no_punctuation = re.sub(r'[^\w\s-]', '', ascii_string).strip().lower()
slug = re.sub(r'[-\s]+', '-', no_punctuation)
return u(slug)
def int2byte(i):
"""Converts an integer to a byte.
This is equivalent to chr() in Python 2 and bytes((i,)) in Python 3."""
return chr(i) if PY2 else bytes((i,))
def byte2int(b):
"""Converts a byte to an integer.
This is equivalent to ord(bs[0]) on Python 2 and bs[0] on Python 3."""
return ord(b)if PY2 else b
def split_title(text):
"""Splits the first sentence off from a text."""
punkt = SENTENCE_SPLITTER.search(text)
if not punkt:
return text, ""
return text[:punkt.end()].strip(), text[punkt.end():].strip()
|
|
#! /usr/bin/env python
# Last Change: Mon Aug 20 08:00 PM 2007 J
from __future__ import division, print_function, absolute_import
import re
import itertools
import datetime
from functools import partial
import numpy as np
from scipy.lib.six import next
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info is lost !
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile('^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get attributes name enclosed with '', possibly spread across multilines
r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
#-------------------------
# Module defined exception
#-------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
#------------------
# Various utilities
#------------------
# An attribute is defined as @attribute name value
def parse_type(attrtype):
"""Given an arff attribute value (meta data), returns its type.
Expect the value to be a name."""
uattribute = attrtype.lower().strip()
if uattribute[0] == '{':
return 'nominal'
elif uattribute[:len('real')] == 'real':
return 'numeric'
elif uattribute[:len('integer')] == 'integer':
return 'numeric'
elif uattribute[:len('numeric')] == 'numeric':
return 'numeric'
elif uattribute[:len('string')] == 'string':
return 'string'
elif uattribute[:len('relational')] == 'relational':
return 'relational'
elif uattribute[:len('date')] == 'date':
return 'date'
else:
raise ParseArffError("unknown attribute %s" % uattribute)
def get_nominal(attribute):
"""If attribute is nominal, returns a list of the values"""
return attribute.split(',')
def read_data_list(ofile):
"""Read each line of the iterable and put it in a list."""
data = [next(ofile)]
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
data.extend([i for i in ofile])
return data
def get_ndata(ofile):
"""Read the whole file to get number of data attributes."""
data = [next(ofile)]
loc = 1
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
for i in ofile:
loc += 1
return loc
def maxnomlen(atrv):
"""Given a string containing a nominal type definition, returns the
string len of the biggest component.
A nominal type is defined as seomthing framed between brace ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
slen : int
length of longest component
Examples
--------
maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of
ratata, the longest nominal value).
>>> maxnomlen("{floup, bouga, fl, ratata}")
6
"""
nomtp = get_nom_val(atrv)
return max(len(i) for i in nomtp)
def get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
r_nominal = re.compile('{(.+)}')
m = r_nominal.match(atrv)
if m:
return tuple(i.strip() for i in m.group(1).split(','))
else:
raise ValueError("This does not look like a nominal string")
def get_date_format(atrv):
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
def go_data(ofile):
"""Skip header.
the first next() call of the returned iterator will be the @data line"""
return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile)
#----------------
# Parsing header
#----------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (eg starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
#name, type, next_item = tokenize_multilines(iterable, atrv)
else:
raise ValueError("First line unparsable: %s" % sattr)
if type == 'relational':
raise ValueError("relational attributes not supported yet")
return name, type, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
name, type, i = tokenize_attribute(ofile, i)
attributes.append((name, type))
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
#--------------------
# Parsing actual data
#--------------------
def safe_float(x):
"""given a string x, convert it to a float. If the stripped string is a ?,
return a Nan (missing value).
Parameters
----------
x : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> safe_float('1')
1.0
>>> safe_float('1\\n')
1.0
>>> safe_float('?\\n')
nan
"""
if '?' in x:
return np.nan
else:
return np.float(x)
def safe_nominal(value, pvalue):
svalue = value.strip()
if svalue in pvalue:
return svalue
elif svalue == '?':
return svalue
else:
raise ValueError("%s value not in %s" % (str(svalue), str(pvalue)))
def safe_date(value, date_format, datetime_unit):
date_str = value.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, date_format)
return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit)
def get_delim(line):
"""Given a string representing a line of data, check whether the
delimiter is ',' or space.
Parameters
----------
line : str
line of data
Returns
-------
delim : {',', ' '}
Examples
--------
>>> get_delim(',')
','
>>> get_delim(' ')
' '
>>> get_delim(', ')
','
>>> get_delim('x')
Traceback (most recent call last):
...
ValueError: delimiter not understood: x
"""
if ',' in line:
return ','
if ' ' in line:
return ' '
raise ValueError("delimiter not understood: " + line)
class MetaData(object):
"""Small container to keep useful informations on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print i
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Notes
-----
Also maintains the list of attributes in order, i.e. doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
# XXX: may be better to implement an ordered dictionary
self._attributes = {}
self._attrnames = []
for name, value in attr:
tp = parse_type(value)
self._attrnames.append(name)
if tp == 'nominal':
self._attributes[name] = (tp, get_nom_val(value))
elif tp == 'date':
self._attributes[name] = (tp, get_date_format(value)[0])
else:
self._attributes[name] = (tp, None)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attrnames:
msg += "\t%s's type is %s" % (i, self._attributes[i][0])
if self._attributes[i][1]:
msg += ", range is %s" % str(self._attributes[i][1])
msg += '\n'
return msg
def __iter__(self):
return iter(self._attrnames)
def __getitem__(self, key):
return self._attributes[key]
def names(self):
"""Return the list of attribute names."""
return self._attrnames
def types(self):
"""Return the list of attribute types."""
attr_types = [self._attributes[name][0] for name in self._attrnames]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of numpy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc...
Raises
------
`ParseArffError`
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for name, value in attr:
type = parse_type(value)
if type == 'string':
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
acls2dtype = {'real': np.float, 'integer': np.float, 'numeric': np.float}
acls2conv = {'real': safe_float, 'integer': safe_float, 'numeric': safe_float}
descr = []
convertors = []
if not hasstr:
for name, value in attr:
type = parse_type(value)
if type == 'date':
date_format, datetime_unit = get_date_format(value)
descr.append((name, "datetime64[%s]" % datetime_unit))
convertors.append(partial(safe_date, date_format=date_format, datetime_unit=datetime_unit))
elif type == 'nominal':
n = maxnomlen(value)
descr.append((name, 'S%d' % n))
pvalue = get_nom_val(value)
convertors.append(partial(safe_nominal, pvalue=pvalue))
else:
descr.append((name, acls2dtype[type]))
convertors.append(safe_float)
#dc.append(acls2conv[type])
#sdescr.append((name, acls2sdtype[type]))
else:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(convertors)
# Get the delimiter from the first line of data:
def next_data_line(row_iter):
"""Assumes we are already in the data part (eg after @data)."""
raw = next(row_iter)
while r_empty.match(raw):
raw = next(row_iter)
while r_comment.match(raw):
raw = next(row_iter)
return raw
try:
try:
dtline = next_data_line(ofile)
delim = get_delim(dtline)
except ValueError as e:
raise ParseArffError("Error while parsing delimiter: " + str(e))
finally:
ofile.seek(0, 0)
ofile = go_data(ofile)
# skip the @data line
next(ofile)
def generator(row_iter, delim=','):
# TODO: this is where we are spending times (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# We do not abstract skipping comments and empty lines for performances
# reason.
raw = next(row_iter)
while r_empty.match(raw):
raw = next(row_iter)
while r_comment.match(raw):
raw = next(row_iter)
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
for raw in row_iter:
while r_comment.match(raw):
raw = next(row_iter)
while r_empty.match(raw):
raw = next(row_iter)
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
a = generator(ofile, delim=delim)
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
return data, meta
#-----
# Misc
#-----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp[0]
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
msg = name + ",{"
for i in range(len(tp[1])-1):
msg += tp[1][i] + ","
msg += tp[1][-1]
msg += "}"
print(msg)
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i,meta[i],data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
#import glob
#for i in glob.glob('arff.bak/data/*'):
# relation, attributes = read_header(open(i))
# print "Parsing header of %s: relation %s, %d attributes" % (i,
# relation, len(attributes))
import sys
filename = sys.argv[1]
#filename = 'arff.bak/data/pharynx.arff'
#floupi(filename)
test_weka(filename)
#gf = []
#wf = []
#for i in glob.glob('arff.bak/data/*'):
# try:
# print "=============== reading %s ======================" % i
# floupi(i)
# gf.append(i)
# except ValueError, e:
# print "!!!! Error parsing the file !!!!!"
# print e
# wf.append(i)
# except IndexError, e:
# print "!!!! Error parsing the file !!!!!"
# print e
# wf.append(i)
# except ArffError, e:
# print "!!!! Error parsing the file !!!!!"
# print e
# wf.append(i)
#print "%d good files" % len(gf)
#print "%d bad files" % len(wf)
|
|
import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.generator import Generator
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, None, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = str(Header(val, encoding))
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
def sanitize_address(addr, encoding):
if isinstance(addr, basestring):
addr = parseaddr(force_unicode(addr))
nm, addr = addr
nm = str(Header(nm, encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
if u'@' in addr:
localpart, domain = addr.split(u'@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna')
addr = '@'.join([localpart, domain])
else:
addr = str(Header(addr, encoding))
return formataddr((nm, addr))
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = Generator(fp, mangle_from_ = False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = Generator(fp, mangle_from_ = False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, basestring), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = ', '.join(self.to)
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() == 'from': # From is already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
cmd_return_code = cmd_proc.poll()
if cmd_return_code != None:
break
if not cmd.endswith("&"):
while True:
line = cmd_proc.stdout.readline().strip("\r\n")
print line
if not line or line.find("daemon started") >= 0:
break
output.append(line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, PKG_SRC_DIR+"/"+item_name):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import requests
import sys
from neutron.tests import base
with mock.patch.dict(sys.modules, {
'networking_cisco': mock.Mock(),
'networking_cisco.plugins': mock.Mock().plugins,
'networking_cisco.plugins.cisco': mock.Mock().cisco,
'networking_cisco.plugins.cisco.cfg_agent': mock.Mock().cfg_agent,
'networking_cisco.plugins.cisco.cfg_agent.device_drivers':
mock.Mock().device_drivers,
}):
from neutron_fwaas.services.firewall.drivers.cisco import csr_acl_driver
FAKE_ACL_ID = 'acl123'
FAKE_FW = {
'id': '123456789',
'admin_state_up': True,
'vendor_ext': {
'acl_id': FAKE_ACL_ID,
'host_mngt_ip': '192.169.101.5',
'host_usr_nm': 'lab',
'host_usr_pw': 'lab',
'if_list': [
{
'direction': 'inside',
'port': {
'id': 'fake_port_id',
'hosting_info': {
# map to interface GigabitEthernet3.101
'segmentation_id': 101,
'hosting_port_name': 't2_p:1',
},
},
},
]
},
'firewall_rule_list': [
{
'enabled': True,
'name': 'r1',
'ip_version': 4,
'protocol': 'tcp',
'action': 'allow',
'source_port': '3001',
'destination_port': '3001',
},
]
}
class TestCsrAclDriver(base.BaseTestCase):
def setUp(self):
super(TestCsrAclDriver, self).setUp()
self.csr = mock.Mock()
self.csracl = csr_acl_driver.CsrAclDriver()
self.csracl._get_csr_host = mock.Mock(return_value=self.csr)
self.acl_data = self.csracl._get_acl_rule_data(FAKE_FW)
self.aclapi_response = 'https://' + FAKE_FW[
'vendor_ext']['host_mngt_ip'] + '/' + FAKE_ACL_ID
def _set_csracl_mocks(self):
self.csracl._post_acl = mock.Mock()
self.csracl._post_acl_to_interfaces = mock.Mock()
self.csracl._delete_acl = mock.Mock()
self.csracl._put_acl = mock.Mock()
self.csracl._delete_acl_on_interface = mock.Mock()
self.csracl._get_acl_interface = mock.Mock()
def _set_csr_mocks(self):
self.csr.post_request = mock.Mock()
self.csr.delete_request = mock.Mock()
self.csr.get_request = mock.Mock()
self.csr.put_request = mock.Mock()
def _test_post_acl(self):
self._set_csr_mocks()
self.csr.post_request.return_value = self.aclapi_response
acl_id = self.csracl._post_acl(self.csr, self.acl_data)
self.csr.post_request.assert_called_once_with('acl', self.acl_data)
if self.csr.status == requests.codes.CREATED:
self.assertEqual(FAKE_ACL_ID, acl_id)
else:
self.assertEqual('', acl_id)
def test_post_acl_error(self):
self.csr.status = requests.codes.SERVER_ERROR
self._test_post_acl()
def test_post_acl(self):
self.csr.status = requests.codes.CREATED
self._test_post_acl()
def _test_delete_acl(self):
self._set_csr_mocks()
success = self.csracl._delete_acl(self.csr, FAKE_ACL_ID)
self.csr.delete_request.assert_called_once_with('acl/' + FAKE_ACL_ID)
if self.csr.status == requests.codes.NO_CONTENT:
self.assertTrue(success)
else:
self.assertEqual(False, success)
def test_delete_acl_error(self):
self.csr.status = requests.codes.SERVER_ERROR
self._test_delete_acl()
def test_delete_acl(self):
self.csr.status = requests.codes.NO_CONTENT
self._test_delete_acl()
def _test_put_acl(self):
self._set_csr_mocks()
success = self.csracl._put_acl(
self.csr, FAKE_ACL_ID, self.acl_data)
self.csr.put_request.assert_called_once_with(
'acl/' + FAKE_ACL_ID, self.acl_data)
if self.csr.status == requests.codes.NO_CONTENT:
self.assertTrue(success)
else:
self.assertEqual(False, success)
def test_put_acl_error(self):
self.csr.status = requests.codes.SERVER_ERROR
self._test_put_acl()
def test_put_acl(self):
self.csr.status = requests.codes.NO_CONTENT
self._test_put_acl()
def _test_post_acl_to_interfaces(self):
self._set_csr_mocks()
self.csr.post_request.return_value = 'fake_post_response'
status_data = {
'fw_id': FAKE_FW['id'],
'acl_id': FAKE_ACL_ID,
'if_list': []
}
firewall_interface = FAKE_FW['vendor_ext']['if_list'][0]
interface_name = self.csracl._get_interface_name_from_hosting_port(
firewall_interface['port'])
acl_interface_data = {
'if-id': interface_name,
'direction': firewall_interface['direction']}
api = 'acl/' + FAKE_ACL_ID + '/interfaces'
self.csracl._post_acl_to_interfaces(FAKE_FW, self.csr,
FAKE_ACL_ID, status_data)
self.csr.post_request.assert_called_once_with(api, acl_interface_data)
if self.csr.status == requests.codes.CREATED:
self.assertEqual(
[{'port_id': firewall_interface['port']['id'],
'status': 'OK'}],
status_data['if_list'])
else:
self.assertEqual(
[{'port_id': firewall_interface['port']['id'],
'status': 'ERROR'}],
status_data['if_list'])
def test_post_acl_to_interfaces_error(self):
self.csr.status = requests.codes.SERVER_ERROR
self._test_post_acl_to_interfaces()
def test_post_acl_to_interfaces(self):
self.csr.status = requests.codes.CREATED
self._test_post_acl_to_interfaces()
def test_delete_acl_on_interface(self):
self._set_csr_mocks()
self.csr.status = requests.codes.NO_CONTENT
csr_acl_interfaces = [
{
'acl-id': FAKE_ACL_ID,
'if-id': 'GigabitEthernet3.101',
'direction': 'inside'
}
]
api = 'acl/%s/interfaces/%s_%s' % (
FAKE_ACL_ID, csr_acl_interfaces[0]['if-id'],
csr_acl_interfaces[0]['direction'])
self.csracl._delete_acl_on_interface(
self.csr, FAKE_ACL_ID, csr_acl_interfaces)
self.csr.delete_request.assert_called_once_with(api)
def _test_get_acl_interface(self):
self._set_csr_mocks()
api = 'acl/%s/interfaces' % FAKE_ACL_ID
get_rsp = {'items': [{'fake_k1': 'fake_d1'}]}
self.csr.get_request.return_value = get_rsp
rsp = self.csracl._get_acl_interface(self.csr, FAKE_ACL_ID)
self.csr.get_request.assert_called_once_with(api)
if self.csr.status == requests.codes.OK:
self.assertEqual(get_rsp['items'], rsp)
else:
self.assertEqual('', rsp)
def test_get_acl_interface_err(self):
self.csr.status = requests.codes.SERVER_ERROR
self._test_get_acl_interface()
def test_get_acl_interface(self):
self.csr.status = requests.codes.OK
self._test_get_acl_interface()
def test_create_firewall_admin_state_not_up(self):
firewall = copy.deepcopy(FAKE_FW)
firewall['admin_state_up'] = False
self._set_csracl_mocks()
self.csracl._post_acl.return_value = FAKE_ACL_ID
success, status = self.csracl.create_firewall(None, None, firewall)
self.csracl._post_acl.assert_called_once_with(self.csr, self.acl_data)
self.assertTrue(success)
self.assertEqual(
{'fw_id': FAKE_FW['id'], 'acl_id': FAKE_ACL_ID, 'if_list': []},
status)
def test_create_firewall_post_acl_error(self):
self._set_csracl_mocks()
self.csracl._post_acl.return_value = ''
success, status = self.csracl.create_firewall(None, None, FAKE_FW)
self.csracl._post_acl.assert_called_once_with(self.csr, self.acl_data)
self.assertEqual(False, success)
def test_create_firewall(self):
self._set_csracl_mocks()
self.csracl._post_acl.return_value = FAKE_ACL_ID
status_data = {
'fw_id': FAKE_FW['id'],
'acl_id': FAKE_ACL_ID,
'if_list': []
}
success, status = self.csracl.create_firewall(None, None, FAKE_FW)
self.csracl._post_acl.assert_called_once_with(self.csr, self.acl_data)
self.csracl._post_acl_to_interfaces.assert_called_once_with(
FAKE_FW, self.csr, FAKE_ACL_ID, status_data)
self.assertTrue(success)
def _test_delete_firewall(self, delete_acl_success):
self._set_csracl_mocks()
self.csracl._delete_acl.return_value = delete_acl_success
success = self.csracl.delete_firewall(None, None, FAKE_FW)
self.csracl._delete_acl.assert_called_once_with(self.csr, FAKE_ACL_ID)
self.assertEqual(delete_acl_success, success)
def test_delete_firewall(self):
self._test_delete_firewall(True)
def test_delete_firewall_error(self):
self._test_delete_firewall(False)
def test_udpate_firewall_put_acl_error(self):
self._set_csracl_mocks()
self.csracl._put_acl.return_value = False
acldata = self.acl_data
acldata['acl-id'] = FAKE_ACL_ID
success, status = self.csracl.update_firewall(None, None, FAKE_FW)
self.csracl._put_acl.assert_called_once_with(
self.csr, FAKE_ACL_ID, acldata)
self.assertEqual(False, success)
def _test_update_firewall(self, admin_stat_up):
firewall = copy.deepcopy(FAKE_FW)
firewall['admin_state_up'] = admin_stat_up
self._set_csracl_mocks()
self.csracl._put_acl.return_value = True
acldata = self.acl_data
acldata['acl-id'] = FAKE_ACL_ID
fake_acl_interface_list = [{'if-id': 'GigabitEthernet3.101'}]
self.csracl._get_acl_interface.return_value = fake_acl_interface_list
status_data = {
'fw_id': firewall['id'],
'acl_id': FAKE_ACL_ID,
'if_list': []
}
success, status = self.csracl.update_firewall(None, None, firewall)
self.csracl._put_acl.assert_called_once_with(
self.csr, FAKE_ACL_ID, acldata)
self.csracl._get_acl_interface.assert_called_once_with(
self.csr, FAKE_ACL_ID)
self.csracl._delete_acl_on_interface.assert_called_once_with(
self.csr, FAKE_ACL_ID, fake_acl_interface_list)
self.assertTrue(success)
if not admin_stat_up:
self.assertEqual(status_data, status)
else:
self.csracl._post_acl_to_interfaces.assert_called_once_with(
firewall, self.csr, FAKE_ACL_ID, status_data)
def test_update_firewall_admin_state_not_up(self):
self._test_update_firewall(False)
def test_update_firewall(self):
self._test_update_firewall(True)
class TestCsrAclDriverValidation(base.BaseTestCase):
def setUp(self):
super(TestCsrAclDriverValidation, self).setUp()
self.csracl = csr_acl_driver.CsrAclDriver()
self.firewall = copy.deepcopy(FAKE_FW)
def test_create_firewall_no_admin_state(self):
del self.firewall['admin_state_up']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_vendor_ext(self):
del self.firewall['vendor_ext']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_host_mngt_ip(self):
del self.firewall['vendor_ext']['host_mngt_ip']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_host_usr_name(self):
del self.firewall['vendor_ext']['host_usr_nm']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_host_usr_password(self):
del self.firewall['vendor_ext']['host_usr_pw']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_if_list(self):
del self.firewall['vendor_ext']['if_list']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_direction(self):
del self.firewall['vendor_ext']['if_list'][0]['direction']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_invalid_direction(self):
self.firewall['vendor_ext']['if_list'][0]['direction'] = 'dir'
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_port(self):
del self.firewall['vendor_ext']['if_list'][0]['port']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_host_info(self):
del self.firewall['vendor_ext']['if_list'][0]['port']['hosting_info']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_segmentation_id(self):
del self.firewall['vendor_ext']['if_list'][0]['port']['hosting_info'][
'segmentation_id']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_host_port_name(self):
del self.firewall['vendor_ext']['if_list'][0]['port']['hosting_info'][
'hosting_port_name']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_invalid_host_port_name(self):
self.firewall['vendor_ext']['if_list'][0]['port']['hosting_info'][
'hosting_port_name'] = 't3_p:1'
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_no_rule_list(self):
del self.firewall['firewall_rule_list']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_no_name(self):
del self.firewall['firewall_rule_list'][0]['name']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_no_ip_version(self):
del self.firewall['firewall_rule_list'][0]['ip_version']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_not_ipv4(self):
self.firewall['firewall_rule_list'][0]['ip_version'] = 6
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_no_protocol(self):
del self.firewall['firewall_rule_list'][0]['protocol']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_no_action(self):
del self.firewall['firewall_rule_list'][0]['action']
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_create_firewall_rule_invalid_action(self):
self.firewall['firewall_rule_list'][0]['action'] = 'action'
success, status = self.csracl.create_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_update_firewall_no_acl_id(self):
del self.firewall['vendor_ext']['acl_id']
success, status = self.csracl.update_firewall(
None, None, self.firewall)
self.assertEqual(False, success)
def test_delete_firewall_no_acl_id(self):
del self.firewall['vendor_ext']['acl_id']
success = self.csracl.delete_firewall(None, None, self.firewall)
self.assertEqual(False, success)
|
|
from os import path
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from frozendict import frozendict
# noinspection PyProtectedMember
from vms.models.base import _JsonPickleModel, _StatusModel, _VmDiskModel, _ScheduleModel
from vms.models.dc import Dc
from vms.models.vm import Vm
from vms.models.node import Node
from vms.models.storage import get_cached_size, clear_cached_size, NodeStorage
class BackupDefine(_VmDiskModel, _ScheduleModel):
"""
Virtual Machine backup definition and schedule.
"""
DATASET = 1
FILE = 2
TYPE = (
(DATASET, _('Dataset')),
(FILE, _('File')),
)
NONE = 0
GZIP = 1
BZIP2 = 2
XZ = 3
COMPRESSION = (
(NONE, _('None')),
(GZIP, 'gzip'),
(BZIP2, 'bzip2'),
(XZ, 'xz'),
)
FILE_SUFFIX = frozendict({
NONE: 'zfs',
GZIP: 'zfs.gz',
BZIP2: 'zfs.bz2',
XZ: 'zfs.xz',
})
# id (implicit), Inherited: disk_id, schedule (property), active (property)
vm = models.ForeignKey(Vm, verbose_name=_('Server'))
name = models.CharField(_('Name'), max_length=16) # User ID
node = models.ForeignKey(Node, verbose_name=_('Node')) # Where?
zpool = models.ForeignKey(NodeStorage, verbose_name=_('Zpool')) # Where?
type = models.SmallIntegerField(_('Type'), choices=TYPE, default=DATASET)
desc = models.CharField(_('Description'), max_length=128, blank=True)
bwlimit = models.IntegerField(_('Bandwidth limit'), blank=True, null=True) # bytes
retention = models.IntegerField(_('Retention')) # max count
compression = models.SmallIntegerField(_('Compression'), choices=COMPRESSION, default=NONE)
fsfreeze = models.BooleanField(_('Application-Consistent?'), default=False)
class Meta:
app_label = 'vms'
verbose_name = _('Backup definition')
verbose_name_plural = _('Backup definitions')
unique_together = (('vm', 'disk_id', 'name'),)
def __unicode__(self):
return '%s-disk%s %s/%s' % (self.vm_id, self.disk_id, self.name, self.retention)
@property
def web_data(self):
"""Return dict used in html templates"""
return {
'name': self.name,
'disk_id': self.array_disk_id,
'node': self.node.hostname,
'zpool': self.zpool.zpool,
'type': self.type,
'bwlimit': self.bwlimit,
'compression': self.compression,
'schedule': self.schedule,
'retention': self.retention,
'active': self.active,
'fsfreeze': self.fsfreeze,
'desc': self.desc,
}
def _new_periodic_task(self):
"""Return new instance of PeriodicTask"""
return self.PT(name='backup-%s-%s-disk%s' % (self.name, self.vm_id, self.disk_id),
task='api.vm.backup.tasks.vm_backup_beat', args='[%d]' % self.id,
queue='mgmt', expires=None) # expires bug: https://github.com/celery/django-celery/pull/271
def generate_backup_name(self):
"""Create name for new backup"""
return '%s-%s' % (self.name, timezone.now().strftime('%Y%m%d_%H%M%S'))
class Backup(_VmDiskModel, _StatusModel, _JsonPickleModel):
"""
List of backups.
"""
_cache_status = False # _StatusModel
_disk_size = None # Disk size cache
_disks = None # Disk list cache
# Used in NodeStorage.size_backups
BACKUP_SIZE_TOTAL_KEY = 'backup-size-total:%s' # %s = zpool.id (NodeStorage)
BACKUP_SIZE_DC_KEY = 'backup-size-dc:%s:%s' # %s = dc.id:zpool.id (NodeStorage)
BACKUP_SIZE_TOTAL_DC_KEY = 'backup-size-total-dc:%s' # %s = dc.id
BACKUP_SIZE_TOTAL_VM_KEY = 'backup-size-total-vm:%s' # %s = vm.uuid
DATASET = BackupDefine.DATASET
FILE = BackupDefine.FILE
TYPE = BackupDefine.TYPE
OK = 1
PENDING = 2
RESTORE = 3
LOST = 4
STATUS = (
(OK, _('ok')),
(PENDING, _('pending')),
(RESTORE, _('restore')),
(LOST, _('lost')),
)
LOCKED = frozenset([PENDING, RESTORE])
# id (implicit), Inherited: status_change, created, changed, json, disk_id
dc = models.ForeignKey(Dc, verbose_name=_('Datacenter'))
vm = models.ForeignKey(Vm, verbose_name=_('Server'), null=True, blank=True, on_delete=models.SET_NULL)
vm_hostname = models.CharField(_('Server hostname'), max_length=128) # original hostname
vm_disk_id = models.SmallIntegerField('Array disk ID') # json disk_id
define = models.ForeignKey(BackupDefine, verbose_name=_('Backup definition'), null=True, blank=True,
on_delete=models.SET_NULL)
name = models.CharField(_('Name'), max_length=32) # define name + timestamp
status = models.SmallIntegerField(_('Status'), choices=STATUS)
file_path = models.CharField(_('File path'), max_length=255, blank=True)
manifest_path = models.CharField(_('Manifest path'), max_length=255, blank=True)
checksum = models.CharField(_('Checksum'), max_length=40, blank=True)
node = models.ForeignKey(Node, verbose_name=_('Node'))
zpool = models.ForeignKey(NodeStorage, verbose_name=_('Zpool'))
type = models.SmallIntegerField(_('Type'), choices=TYPE)
size = models.BigIntegerField(_('Size'), null=True, blank=True) # bytes
time = models.IntegerField(_('Duration'), null=True, blank=True) # seconds
note = models.CharField(_('Note'), max_length=255, blank=True)
last = models.BooleanField(_('Last?'), default=False) # TODO: index?
fsfreeze = models.BooleanField(_('Application-Consistent?'), default=False)
class Meta:
app_label = 'vms'
verbose_name = _('Backup')
verbose_name_plural = _('Backups')
unique_together = (('vm_hostname', 'vm_disk_id', 'name'),)
# index_together = (('created',),)
def __unicode__(self):
return '%s-disk%s@%s' % (self.vm_hostname, self.disk_id, self.name)
def get_disk_map(self): # See _VmDiskModel
"""Return real_disk_id -> disk_id mapping"""
return Vm.get_disk_map(self.json)
def _get_disks(self):
if self._disks is None:
json = self.json
brand = json.get('brand', 'kvm')
is_hvm = brand == 'kvm' or brand == 'bhyve'
self._disks = Vm.parse_json_disks(json['uuid'], json, is_hvm)
return self._disks
@property
def locked(self):
return self.status in self.LOCKED
@property
def array_disk_id(self):
"""Faster array_disk_id"""
return int(self.vm_disk_id) + 1
@property
def vm_uuid(self):
"""VM uuid"""
if self.vm:
return self.vm.uuid
return self.json['uuid']
@property
def vm_hostname_real(self):
"""Real VM hostname"""
if self.vm:
return self.vm.hostname
return self.vm_hostname
@property
def vm_brand(self):
"""VM brand"""
return self.json.get('brand', 'kvm')
@property
def disk_size(self):
"""Return disk size in MB"""
if self._disk_size is None:
self._disk_size = self._get_disks()[int(self.vm_disk_id)]['size']
return self._disk_size
@property
def zfs_filesystem(self):
"""Return zfs_filesystem of VM's disk this backup is for"""
return self._get_disks()[int(self.vm_disk_id)]['zfs_filesystem']
@property
def zfs_filesystem_real(self):
"""Return zfs_filesystem of VM's disk this backup is for"""
if self.vm:
try:
vm_disks = self.vm.json_active_get_disks()
disk_map = self.vm.get_disk_map(vm_disks)
return vm_disks[disk_map[self.disk_id]]['zfs_filesystem']
except (IndexError, KeyError):
return self.zfs_filesystem
else:
return self.zfs_filesystem
@property # Gui helper
def bkpid(self):
return '%s_%s' % (self.array_disk_id, self.name)
@property
def snap_name(self):
"""Return snapshot name used for dataset backup"""
return 'is-%d' % self.id
@property
def file_name(self):
"""Return backup file name"""
assert self.name
define = self.define
return '%s-full.%s' % (self.name, define.FILE_SUFFIX[define.compression])
def create_file_path(self):
"""Return backup file path"""
# /zones/backups/file/<uuid>/disk0/<file_name>.zfs
return path.join('/', self.zpool.zpool, self.dc.settings.VMS_VM_BACKUP_FILE_DIR, self.vm_uuid,
'disk%s' % self.disk_id, self.file_name)
def create_dataset_path(self):
"""Return backup dataset"""
# zones/backups/ds/<uuid>-disk0
return path.join(self.zpool.zpool, self.dc.settings.VMS_VM_BACKUP_DS_DIR,
'%s-disk%s' % (self.vm_uuid, self.disk_id))
def create_file_manifest_path(self):
"""Return backup file manifest path"""
# /zones/backups/manifests/file/<uuid>/disk0/<file_name>.zfs.json
return path.join('/', self.zpool.zpool, self.dc.settings.VMS_VM_BACKUP_MANIFESTS_FILE_DIR, self.vm_uuid,
'disk%s' % self.disk_id, '%s.json' % self.file_name)
def create_dataset_manifest_path(self):
"""Return backup dataset manifest path"""
# zones/backups/manifests/ds/<uuid>-disk0/<snap_name>.json
return path.join('/', self.zpool.zpool, self.dc.settings.VMS_VM_BACKUP_MANIFESTS_DS_DIR,
'%s-disk%s' % (self.vm_uuid, self.disk_id), '%s.json' % self.snap_name)
@classmethod
def get_total_dc_size(cls, dc):
"""Return cumulative backup size for one DC"""
key = cls.BACKUP_SIZE_TOTAL_DC_KEY % dc.id
qs = cls.objects.filter(dc=dc).exclude(status__in=(cls.PENDING, cls.LOST), size__isnull=True)
return get_cached_size(key, qs)
@classmethod
def get_total_vm_size(cls, vm):
"""Return cumulative backup size for one VM"""
key = cls.BACKUP_SIZE_TOTAL_VM_KEY % vm.uuid
qs = cls.objects.filter(vm=vm).exclude(status__in=(cls.PENDING, cls.LOST), size__isnull=True)
return get_cached_size(key, qs)
@classmethod
def clear_total_dc_size(cls, dc):
return clear_cached_size(cls.BACKUP_SIZE_TOTAL_DC_KEY % getattr(dc, 'id', dc))
@classmethod
def clear_total_vm_size(cls, vm):
return clear_cached_size(cls.BACKUP_SIZE_TOTAL_VM_KEY % getattr(vm, 'uuid', vm))
@classmethod
def update_resources(cls, ns, vm, dc):
"""Update NodeStorage and Storage size_free"""
ns.save(update_resources=True, update_dcnode_resources=True, recalculate_vms_size=False,
recalculate_snapshots_size=False, recalculate_images_size=False, recalculate_backups_size=True,
recalculate_dc_backups_size=(dc,))
cls.clear_total_dc_size(dc)
if vm:
cls.clear_total_vm_size(vm)
def update_zpool_resources(self):
"""Used by backup callback tasks"""
# noinspection PyTypeChecker
self.update_resources(self.zpool, self.vm, self.dc) # Broken PyCharm inspection
|
|
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
it's value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class RawConfigParser:
def __init__(self, defaults=None):
self._sections = {}
self._defaults = {}
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = {}
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = {'__name__': sectname}
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e[0])
else:
break
if "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_match = re.compile(r"%\(([^)]+)\)s").match
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value):
"""Set an option. Extend ConfigParser.set: check for string values."""
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
ConfigParser.set(self, section, option, value)
|
|
import time
import json
import numpy as np
__author__ = "Chang Gao, Liyan Chen"
__copyright__ = "Copyright (c) 2017 Malmactor"
__license__ = "MIT"
class spos:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class pos:
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __str__(self):
return str(self.x) + ' ' + str(self.y) + ' ' + str(self.z)
def eltadd(self, rhs):
return pos(self.x+rhs.x, self.y+rhs.y, self.z+rhs.z)
def shiftcheck(self, rhs):
"""Check if it will shift into another block with given dx and dy.
ret: 0-8, indicate the target block with [-1, 1] x [-1 , 1]
"""
dx = int(self.x)-int(rhs.x)+1
dy = int(self.y)-int(rhs.y)+1
return dx + 3*dy
class RigidEntity:
def __init__(self, x=0, y=0, z=0, config=None):
self.config = config
self.dtype = "float16" if config is None else config["dtype"]
self.delta_t = 1.0 if config is None else config["delta_t"]
self.delta_mat = np.identity(3, self.dtype)
self.delta_mat[1, 0] = self.delta_t
self.delta_mat[2, 1] = self.delta_t
self.delta_mat[2, 0] = self.delta_t ** 2 / 2.0
self.state = np.zeros([3, 3], dtype=self.dtype)
self.state[:, 0] = [x, y, z]
def __str__(self):
return ' '.join(map(lambda num: str(num), self.state[:, 0]))
def __getattr__(self, item):
mapping = {"x": self.state[0, 0], "y": self.state[1, 0], "z": self.state[2, 0]}
if item in mapping:
return mapping[item]
else:
raise AttributeError("object has no attribute \'{:}\'".format(str(item)))
def eltadd(self, rhs):
result = self.__class__(config=self.config)
result.state = self.state + rhs.state
return result
def shiftcheck(self, rhs):
"""Check if it will shift into another block with given dx and dy.
ret: 0-8, indicate the target block with [-1, 1] x [-1 , 1]
"""
dx = int(self.x)-int(rhs.x)+1
dy = int(self.y)-int(rhs.y)+1
return dx + 3*dy
def update(self):
self.state = np.dot(self.state, self.delta_mat)
def reaction(self, op):
op(self.state)
class CollisionResolver:
def __init__(self, passive_box, active_box):
pass
class Actor:
def __init__(self, host):
self.host = host
self.state = host.getWorldState()
self.jump_status = 0 # Begin 0~1 Finish, 0 means not jumping, 0.5 for highest point
self.move_status = 0 # Begin 0~1 Finish, 0 means not left/right movig
self.jump_type = 0 # 0 for low, 1 for med, 2 for high
self.move_type = 0 # 0 far left, 1 for right
self.bound = [False, False, False, False] # srounding boundaries
def current_pos(self):
obs = json.loads(self.state.observations[-1].text)
return RigidEntity(obs[u'XPos'], obs[u'YPos'], obs[u'ZPos'])
def lowjumpfuc(self):
return 0.3-0.5*self.jump_status
def midjumpfuc(self):
return 0.39-0.65*self.jump_status
def highjumpfuc(self):
return 0.78-1.3*self.jump_status
def movefuc(self):
return (self.move_type*2 - 1)*(0.3025-(self.jump_status-0.55)*(self.jump_status-0.55))
def pos_shift(self):
dx = 0
dy = 0
# 1. for jumping
if self.jump_status > 0:
if self.jump_type == 0:
dy += self.lowjumpfuc()
elif self.jump_type == 1:
dy += self.midjumpfuc()
else:
dy += self.highjumpfuc()
# 2. for moving left/right
if self.move_status > 0:
dx += self.movefuc()
return RigidEntity(dx, dy, 0)
def get_action(self):
"""Get next action.
ret: {0: freeze, 1: left, 2: right, 3: low jump, 4: mid jump, 5: high jump}
"""
#### TODO ####
# if numpy.random.randint(5) == 0:
# return 2
return 4
def die(self):
#### TODO ####
pass
def boundcheck(self):
"""Check boundaries in all four directions [l ,r, u, d]."""
self.bound = [False, False, False, True]
# 1. get grid info
obs = json.loads(self.state.observations[-1].text)
grid = obs.get(u'floor3x3', 0)
print(grid)
# 2. get next interger position
nextintpos = self.current_pos().shiftcheck(self.current_pos().eltadd(self.pos_shift()))
# 3. test if a bound is there
if grid[nextintpos] != u'air':
if nextintpos % 3 == 0:
self.bound[0] = True
elif nextintpos % 3 == 2:
self.bound[1] = True
if int(nextintpos / 3) == 0:
self.bound[3] = True
elif int(nextintpos / 3) == 2:
self.bound[2] = True
print(self.bound)
def run_1(self):
self.state = self.host.getWorldState()
if self.state.number_of_observations_since_last_state > 0:
# get next action
actnum = self.get_action()
self.boundcheck()
body = self.current_pos()
def jump_init_velocity(state):
state[1, 1] = 4.0 / 16.0
state[1, 2] = 2.0 / 16.0 / 16.0
body.reaction(jump_init_velocity)
while True:
self.state = self.host.getWorldState()
if self.state.number_of_observations_since_last_state > 0:
# get next action
actnum = self.get_action()
self.host.sendCommand("tp " + str(body.x) + " " + str(body.y) + " " + str(body.z))
body.update()
def run(self):
turn = 0
def jump_init_velocity(state):
state[1, 1] = 4.0 / 16.0
state[1, 2] = -2.0 / 16.0 / 16.0
state[2, 1:3] = 0.0
while True:
self.state = self.host.getWorldState()
if self.state.number_of_observations_since_last_state > 0:
# get next action
actnum = self.get_action()
# 1. check all prerequisite for next movement
# -- 1.1 check if already in air
if actnum > 2:
if self.jump_status != 0 or self.bound[3] == False:
actnum = 0
else:
self.jump_type = actnum-3
self.jump_status = 0.05
# -- 1.2 check if already moving left/right
elif actnum <= 2 and actnum > 0:
if self.move_status != 0:
actnum = 0
else:
self.move_type = actnum
self.move_status = 0.05
print("action to take: " + str(actnum))
# 2. check all boundaries for next movement
self.boundcheck()
# -- 2.1 if jumping up and hits block
if self.jump_status < 0.5 and self.bound[2] == True:
self.jump_status = 0.5
# -- 2.2 if jumping down and hits block
elif self.jump_status >= 0.5 and self.bound[3] == True:
self.jump_status = 0
# -- 2.3 if moving left and hits block
if self.move_type == 0 and self.move_status != 0 and self.bound[0] == True:
self.move_status = 0
# -- 2.4 if moving right and hits block
elif self.move_type == 1 and self.move_status != 0 and self.bound[1] == True:
self.move_status = 0
# 3. calc next position
if turn == 0:
curpos = self.current_pos()
else:
curpos = nextpos
nextpos = curpos.eltadd(self.pos_shift())
if turn == 0:
body = self.current_pos()
body.state = nextpos.state
body.reaction(jump_init_velocity)
print body.state
print body.delta_mat
print("current: " + str(curpos))
print("next: " + str(nextpos))
print "body:", str(body)
print
# 4. action
#self.host.sendCommand("tp " + str(nextpos.x) + " " + str(nextpos.y) + " " + str(nextpos.z))
self.host.sendCommand("tp " + str(body.x) + " " + str(body.y) + " " + str(body.z))
body.update()
# 5. reset status
# -- 5.1 continue to jump
if self.jump_status > 0 and self.jump_status < 1:
self.jump_status += 0.05
# -- 5.2 continue to move left/right
if self.move_status > 0 and self.move_status < 1:
self.move_status += 0.05
# -- 5.3 die if fail inside the world
if nextpos.y < 0:
return self.die()
# -- 5.4 stop moving left/right if finish moving
if self.move_status == 1:
self.move_status = 0
turn += 1
|
|
from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
import json
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
)
class TagAPITest(BaseAPITest):
def setUp(self):
super(TagAPITest, self).setUp()
# Try to cleanup from any previous test runs...
self.remove_users = []
self.client = Client()
def test_owner_tags(self):
owner = "tag_owner"
dataset_name = "super_tagged"
self.remove_users.append(owner)
backend = get_backend()
backend.get_user(owner)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset_tags", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "[]")
data = [
{ "name": owner, "tags": [ "tag1", "tag2" ] }
]
response = self.client.put(url, data=json.dumps(data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data)
data = [
{ "name": owner, "tags": [ "tag1" ] }
]
response = self.client.put(url, data=json.dumps(data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data)
data = [
{ "name": owner, "tags": [ ] }
]
response = self.client.put(url, data=json.dumps(data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, [])
def test_multi_user(self):
owner = "tag_owner2"
user1 = "tag_user1"
user2 = "tag_user2"
backend = get_backend()
backend.get_user(owner)
backend.get_user(user1)
backend.get_user(user2)
dataset_name = "super_tagged_2"
self.remove_users.append(owner)
self.remove_users.append(user1)
self.remove_users.append(user2)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset_tags", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(user1)
user2_auth_headers = self.get_auth_header_for_username(user2)
# Make sure a user can't tag before permission is granted:
data = [
{ "name": user1, "tags": [ "tag1", "tag2" ] }
]
response = self.client.put(url, data=json.dumps(data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
# Grant access to user1
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "accounts": [ user1 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Now make sure user1 can add their tags:
data = [
{ "name": user1, "tags": [ "tag1", "tag2" ] }
]
response = self.client.put(url, data=json.dumps(data), **user1_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data)
# Now make sure user1 can't update the owner's tags:
data2 = [
{ "name": owner, "tags": [ "tag3", "tag4" ] },
{ "name": user1, "tags": [ "tag1", "tag2" ] },
]
response = self.client.put(url, data=json.dumps(data2), **user1_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data)
# Make sure the owner can set those same values:
response = self.client.put(url, data=json.dumps(data2), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data2)
# Now make sure the owner can't add tags as user1
data3 = [
{ "name": owner, "tags": [ "tag3", "tag4" ] },
{ "name": user1, "tags": [ "tag1", "tag2", "tag5" ] },
]
response = self.client.put(url, data=json.dumps(data3), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data2)
# Make sure the owner can remove a tag from user1, and add it to their list
data4 = [
{ "name": owner, "tags": [ "tag1", "tag3", "tag4" ] },
{ "name": user1, "tags": [ "tag2" ] },
]
response = self.client.put(url, data=json.dumps(data4), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
self.assertEquals(response_data, data4)
# Grant user2 access...
new_data = { "accounts": [ user1, user2 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# make sure user2 can add a tag, but not remove user1's
data5 = [
{ "name": owner, "tags": [ "tag1", "tag4" ] },
{ "name": user1, "tags": [ ] },
{ "name": user2, "tags": [ "tag99" ] },
]
response = self.client.put(url, data=json.dumps(data5), **user2_auth_headers)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content.decode("utf-8"))
data5_correct = [
{ "name": owner, "tags": [ "tag1", "tag3", "tag4" ] },
{ "name": user1, "tags": [ "tag2" ] },
{ "name": user2, "tags": [ "tag99" ] },
]
self.assertEquals(response_data, data5_correct)
# Make sure all this data is in the dataset api itself:
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name })
response = self.client.get(url, **owner_auth_headers)
tags = json.loads(response.content.decode("utf-8"))["tags"]
self.assertEquals(tags, data5_correct)
|
|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used with SpannerAdminApi to manage Spanner schema updates."""
import abc
from typing import Iterable, List, Optional, Type
from spanner_orm import condition
from spanner_orm import error
from spanner_orm import field
from spanner_orm import foreign_key_relationship
from spanner_orm import index
from spanner_orm import model
from spanner_orm.admin import api
from spanner_orm.admin import index_column
from spanner_orm.admin import metadata
class MigrationUpdate(abc.ABC):
"""Base class for all updates that can happen in a migration."""
@abc.abstractmethod
def execute(self) -> None:
"""Executes the update."""
raise NotImplementedError
class NoUpdate(MigrationUpdate):
"""Update that does nothing, for migrations that don't update db schemas."""
def execute(self) -> None:
"""See base class."""
class SchemaUpdate(MigrationUpdate, abc.ABC):
"""Base class for specifying schema updates."""
@abc.abstractmethod
def ddl(self) -> str:
raise NotImplementedError
def execute(self) -> None:
self.validate()
api.spanner_admin_api().update_schema(self.ddl())
def validate(self) -> None:
pass # TODO(dseomn): Remove this method.
class CreateTable(SchemaUpdate):
"""Update that allows creating a new table."""
def __init__(self, model_: Type[model.Model]):
self._model = model_
def ddl(self) -> str:
key_fields = [
'{} {}'.format(name, field.ddl())
for name, field in self._model.fields.items()
]
key_fields_ddl = ', '.join(key_fields)
for relation in self._model.foreign_key_relations.values():
key_fields_ddl += f', {relation.ddl}'
index_ddl = 'PRIMARY KEY ({})'.format(', '.join(self._model.primary_keys))
statement = 'CREATE TABLE {} ({}) {}'.format(self._model.table,
key_fields_ddl, index_ddl)
if self._model.interleaved:
statement += ', INTERLEAVE IN PARENT {parent} ON DELETE CASCADE'.format(
parent=self._model.interleaved.table)
return statement
def validate(self) -> None:
if not self._model.table:
raise error.SpannerError('New table has no name')
existing_model = metadata.SpannerMetadata.model(self._model.table)
if existing_model:
raise error.SpannerError('Table {} already exists'.format(
self._model.table))
if self._model.interleaved:
self._validate_parent()
self._validate_primary_keys()
if self._model.indexes.keys() - {index.Index.PRIMARY_INDEX}:
raise error.SpannerError(
'Secondary indexes cannot be created by CreateTable; use CreateIndex '
'in a separate migration.')
def _validate_parent(self) -> None:
"""Verifies that the parent table information is valid."""
parent_primary_keys = self._model.interleaved.primary_keys
primary_keys = self._model.primary_keys
message = 'Table {} is not a child of parent table {}'.format(
self._model.table, self._model.interleaved.table)
for parent_key, key in zip(parent_primary_keys, primary_keys):
if parent_key != key:
raise error.SpannerError(message)
if len(parent_primary_keys) > len(primary_keys):
raise error.SpannerError(message)
def _validate_primary_keys(self) -> None:
"""Verifies that the primary key data is valid."""
if not self._model.primary_keys:
raise error.SpannerError('Table {} has no primary key'.format(
self._model.table))
for key in self._model.primary_keys:
if key not in self._model.fields:
raise error.SpannerError(
'Table {} column {} in primary key but not in schema'.format(
self._model.table, key))
class DropTable(SchemaUpdate):
"""Update for dropping an existing table."""
def __init__(self, table_name: str):
self._table = table_name
def ddl(self) -> str:
return 'DROP TABLE {}'.format(self._table)
class AddColumn(SchemaUpdate):
"""Update for adding a column to an existing table.
Only supports adding nullable columns
"""
def __init__(self, table_name: str, column_name: str, field_: field.Field):
self._table = table_name
self._column = column_name
self._field = field_
def ddl(self) -> str:
return 'ALTER TABLE {} ADD COLUMN {} {}'.format(self._table, self._column,
self._field.ddl())
def validate(self) -> None:
model_ = metadata.SpannerMetadata.model(self._table)
if not model_:
raise error.SpannerError('Table {} does not exist'.format(self._table))
if not self._field.nullable():
raise error.SpannerError('Column {} is not nullable'.format(self._column))
if self._field.primary_key():
raise error.SpannerError('Column {} is a primary key'.format(
self._column))
class DropColumn(SchemaUpdate):
"""Update for dropping a column from an existing table."""
def __init__(self, table_name: str, column_name: str):
self._table = table_name
self._column = column_name
def ddl(self) -> str:
return 'ALTER TABLE {} DROP COLUMN {}'.format(self._table, self._column)
def validate(self) -> None:
model_ = metadata.SpannerMetadata.model(self._table)
if not model_:
raise error.SpannerError('Table {} does not exist'.format(self._table))
if self._column not in model_.fields:
raise error.SpannerError('Column {} does not exist on {}'.format(
self._column, self._table))
# Verify no indices exist on the column we're trying to drop
num_indexed_columns = index_column.IndexColumnSchema.count(
condition.equal_to('column_name', self._column),
condition.equal_to('table_name', self._table),
)
if num_indexed_columns > 0:
raise error.SpannerError('Column {} is indexed'.format(self._column))
class AlterColumn(SchemaUpdate):
"""Update for altering a column an existing table.
Only supports changing the nullability of a column
"""
def __init__(self, table_name: str, column_name: str, field_: field.Field):
self._table = table_name
self._column = column_name
self._field = field_
def ddl(self) -> str:
return 'ALTER TABLE {} ALTER COLUMN {} {}'.format(self._table, self._column,
self._field.ddl())
def validate(self) -> None:
model_ = metadata.SpannerMetadata.model(self._table)
if not model_:
raise error.SpannerError('Table {} does not exist'.format(self._table))
if self._column not in model_.fields:
raise error.SpannerError('Column {} does not exist on {}'.format(
self._column, self._table))
if self._column in model_.primary_keys:
raise error.SpannerError('Column {} is a primary key on {}'.format(
self._column, self._table))
old_field = model_.fields[self._column]
# Validate that the only alteration is to change column nullability
if self._field.field_type() != old_field.field_type():
raise error.SpannerError('Column {} is changing type'.format(
self._column))
if self._field.nullable() == old_field.nullable():
raise error.SpannerError('Column {} has no changes'.format(self._column))
class CreateIndex(SchemaUpdate):
"""Update for creating an index on an existing table."""
def __init__(self,
table_name: str,
index_name: str,
columns: Iterable[str],
interleaved: Optional[str] = None,
null_filtered: bool = False,
unique: bool = False,
storing_columns: Optional[Iterable[str]] = None):
self._table = table_name
self._index = index_name
self._columns = columns
self._parent_table = interleaved
self._null_filtered = null_filtered
self._unique = unique
self._storing_columns = storing_columns or []
def ddl(self) -> str:
statement = 'CREATE'
if self._unique:
statement += ' UNIQUE'
if self._null_filtered:
statement += ' NULL_FILTERED'
statement += (f' INDEX {self._index} '
f'ON {self._table} ({", ".join(self._columns)})')
if self._storing_columns:
statement += 'STORING ({})'.format(', '.join(self._storing_columns))
if self._parent_table:
statement += ', INTERLEAVE IN {}'.format(self._parent_table)
return statement
def validate(self) -> None:
model_ = metadata.SpannerMetadata.model(self._table)
if not model_:
raise error.SpannerError('Table {} does not exist'.format(self._table))
if not self._columns:
raise error.SpannerError('Index {} has no columns'.format(self._index))
if self._index in model_.indexes:
raise error.SpannerError('Index {} already exists'.format(self._index))
self._validate_columns(model_)
if self._parent_table:
self._validate_parent(model_)
def _validate_columns(self, model_: Type[model.Model]) -> None:
"""Verifies all columns exist and are not part of the primary key."""
for column in self._columns:
if column not in model_.columns:
raise error.SpannerError('Table {} has no column {}'.format(
self._table, column))
for column in self._storing_columns:
if column not in model_.columns:
raise error.SpannerError('Table {} has no column {}'.format(
self._table, column))
if column in model_.primary_keys:
raise error.SpannerError('{} is part of the primary key for {}'.format(
column, self._table))
def _validate_parent(self, model_: Type[model.Model]) -> None:
"""Verifies this index can be interleaved in the parent table."""
parent = model_.interleaved
while parent:
if parent == self._parent_table:
break
parent = parent.interleaved
if not parent:
raise error.SpannerError('{} is not a parent of table {}'.format(
self._parent_table, self._table))
class DropIndex(SchemaUpdate):
"""Update for dropping a secondary index on an existing table."""
def __init__(self, table_name: str, index_name: str):
self._table = table_name
self._index = index_name
def ddl(self) -> str:
return 'DROP INDEX {}'.format(self._index)
def validate(self) -> None:
model_ = metadata.SpannerMetadata.model(self._table)
if not model_:
raise error.SpannerError('Table {} does not exist'.format(self._table))
db_index = model_.indexes.get(self._index)
if not db_index:
raise error.SpannerError('Index {} does not exist'.format(self._index))
if db_index.primary:
raise error.SpannerError('Index {} is the primary index'.format(
self._index))
class ExecutePartitionedDml(MigrationUpdate):
"""Update for running arbitrary partitioned DML.
NOTE: Partitioned DML queries should be idempotent. See
https://cloud.google.com/spanner/docs/dml-partitioned for details, and more
information about partitioned DML.
"""
def __init__(self, dml: str):
self._dml = dml
def execute(self) -> None:
"""See base class."""
api.spanner_admin_api().execute_partitioned_dml(self._dml)
def model_creation_ddl(model_: Type[model.Model]) -> List[str]:
"""Returns the list of ddl statements needed to create the model's table."""
ddl_list = [CreateTable(model_).ddl()]
for model_index in model_.indexes.values():
if model_index.primary:
continue
create_index = CreateIndex(
model_.table,
model_index.name,
model_index.columns,
interleaved=model_index.parent,
storing_columns=model_index.storing_columns)
ddl_list.append(create_index.ddl())
return ddl_list
|
|
import django_includes
from qurkexp.estimation.models import *
run_defs = {
'complete_test_vars_change10': { # and change1,...
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue", "green"],
'num_batches': 5,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'complete_test_gtav_vars_change6': { # and change1,...
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'complete_test_wgat_vars_change6': { # and change1,...
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 2,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'complete_test_gtav_batch_vars_change7': { # and change1,...
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 5,
'batch_size': 3,
'display_style': 'batch',
'assignments': 2,
'price': 0.01
},
'shape_blue_.1_test10': { # and test1,...
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue"],
'num_batches': 5,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_test1': { # and test1,...
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue", "red"],
'num_batches': 20,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.1_200_10_real1': { # REAL
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.1_200_100_real1': { # REAL
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.1_200_50_real1': { # REAL notdone
'dataset': 'shape_blue_.1',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.5_200_50_real1': { # REAL notdone
'dataset': 'shape_blue_.5',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.5_200_100_real1': { # REAL
'dataset': 'shape_blue_.5',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_bluered_.5_200_10_real1': { # REAL notdone
'dataset': 'shape_blue_.5',
'vals_to_estimate': ["blue", "red"],
'num_batches': 200,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_male_real': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male"],
'num_batches': 200,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_female_real': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["female"],
'num_batches': 200,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_batch_real': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 10,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_batch5_real': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 5,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_male_real3': { # real2 messed up (ran in sandbox)
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_male_real4': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_male_real5': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
##########################################################################
# Experiments Friday March 23, 2012
##########################################################################
'gtav_male_.1_size150': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 150,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size125': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 125,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size100': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 100,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size75': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 75,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size50': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size25': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 25,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size10': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 10,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size5': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 5,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_batch_size10': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 10,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_batch_size10_noredundancy': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 10,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_batch_size5': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 5,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_batch_size5_noredundancy': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 5,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male_.01_size50': {
'dataset': 'gtav_male_.01',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.25_size50': {
'dataset': 'gtav_male_.25',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.5_size50': {
'dataset': 'gtav_male_.5',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.75_size50': {
'dataset': 'gtav_male_.75',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.9_size50': {
'dataset': 'gtav_male_.9',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.99_size50': {
'dataset': 'gtav_male_.99',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
# Monday morning 3/26
'gtav_male_.1_batch_size20': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_batch_size20_noredundancy': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_batch_size15': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 15,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male_.1_batch_size15_noredundancy': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 15,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
# Tuesday 3/27
'gtav_male_.1_size150_2': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 150,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male_.1_size125_2': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 125,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
# Wednesday, 3/28
'wgat_normal_batch_size20': { #fluke
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'wgat_normal_batch_size5': { #good
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 200,
'batch_size': 5,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'wgat_normal_batch_size5_noredundancy': { #fluke
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 1000,
'batch_size': 5,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'wgat_normal_batch_size20_noredundancy': { #good
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
# Thursday 3/29
'wgat_normal_size50': { # i had to kill the last three of these because they had a comment in them:/
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'wgat_normal_size20': {
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 500,
'batch_size': 20,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'wgat_normal_size5': {
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 500,
'batch_size': 5,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'wgat_normal2_batch_size20_noredundancy': {
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 500,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'wgat_normal2_batch_size5_noredundancy': {
'dataset': 'wgat_normal',
'vals_to_estimate': ["IS", "ME", "QF"],
'num_batches': 500,
'batch_size': 5,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'shape_yellowoutline_.1_size50': {
'dataset': 'shape_yellowoutline_.1',
'vals_to_estimate': ["yellow", "orange"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_triangle_.1_size50': {
'dataset': 'shape_triangle_.1',
'vals_to_estimate': ["triangle", "circle"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_yellowoutline2_.1_size50': {
'dataset': 'shape_yellowoutline_.1',
'vals_to_estimate': ["yellow", "orange"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_triangle2_.1_size50': {
'dataset': 'shape_triangle_.1',
'vals_to_estimate': ["triangle", "circle"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.01_size50': {
'dataset': 'gtav_male_.01',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.1_size50': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.25_size50': {
'dataset': 'gtav_male_.25',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.5_size50': {
'dataset': 'gtav_male_.5',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.75_size50': {
'dataset': 'gtav_male_.75',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.9_size50': {
'dataset': 'gtav_male_.9',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.99_size50': {
'dataset': 'gtav_male_.99',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_yellowoutline3_.1_size50': {
'dataset': 'shape_yellowoutline_.1',
'vals_to_estimate': ["yellow", "orange"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
'shape_triangle3_.1_size50': {
'dataset': 'shape_triangle_.1',
'vals_to_estimate': ["triangle", "circle"],
'num_batches': 500,
'batch_size': 50,
'display_style': 'tile',
'assignments': 1,
'price': 0.01
},
# Friday March 30, 2012
'gtav_male2_.01_batch_size20_noredundancy': {
'dataset': 'gtav_male_.01',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.01_batch_size20': {
'dataset': 'gtav_male_.01',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.1_batch_size20_noredundancy': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.1_batch_size20': {
'dataset': 'gtav_male_.1',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.25_batch_size20_noredundancy': {
'dataset': 'gtav_male_.25',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.25_batch_size20': {
'dataset': 'gtav_male_.25',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.5_batch_size20_noredundancy': {
'dataset': 'gtav_male_.5',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.5_batch_size20': {
'dataset': 'gtav_male_.5',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.75_batch_size20_noredundancy': {
'dataset': 'gtav_male_.75',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.75_batch_size20': {
'dataset': 'gtav_male_.75',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.9_batch_size20_noredundancy': {
'dataset': 'gtav_male_.9',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.9_batch_size20': {
'dataset': 'gtav_male_.9',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
'gtav_male2_.99_batch_size20_noredundancy': {
'dataset': 'gtav_male_.99',
'vals_to_estimate': ["male", "female"],
'num_batches': 1000,
'batch_size': 20,
'display_style': 'batch',
'assignments': 1,
'price': 0.01
},
'gtav_male2_.99_batch_size20': {
'dataset': 'gtav_male_.99',
'vals_to_estimate': ["male", "female"],
'num_batches': 200,
'batch_size': 20,
'display_style': 'batch',
'assignments': 5,
'price': 0.01
},
}
def load_run(run_name):
if run_name not in run_defs:
raise Exception("run_name not in experiment list (runs.py)")
ds = run_defs[run_name]
return (run_name, ds['dataset'], ds['vals_to_estimate'], ds['num_batches'], ds['batch_size'], ds['display_style'], ds['assignments'], ds['price'])
|
|
import lasagne
import numpy as np
import theano
import theano.tensor as T
from sklearn import metrics
import layers
import params
import util
class NodeClassificationDCNN(object):
"""A DCNN model for node classification.
This is a shallow model.
(K, X) -> DCNN -> Dense -> Out
"""
def __init__(self, parameters, A):
self.params = parameters
self.var_K = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
self.l_in_k = lasagne.layers.InputLayer((None, self.params.num_hops + 1, self.params.num_nodes), input_var=self.var_K)
self.l_in_x = lasagne.layers.InputLayer((self.params.num_nodes, self.params.num_features), input_var=self.var_X)
self._compute_diffusion_kernel(A)
# Overridable to customize init behavior.
self._register_model_layers()
loss_fn = params.loss_map[self.params.loss_fn]
update_fn = params.update_map[self.params.update_fn]
prediction = lasagne.layers.get_output(self.l_out)
self._loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
model_parameters = lasagne.layers.get_all_params(self.l_out)
self._updates = update_fn(self._loss, model_parameters, learning_rate=self.params.learning_rate)
if self.params.momentum:
self._updates = lasagne.updates.apply_momentum(self._updates, model_parameters)
self.apply_loss_and_update = theano.function([self.var_K, self.var_X, self.var_Y], self._loss, updates=self._updates)
self.apply_loss = theano.function([self.var_K, self.var_X, self.var_Y], self._loss)
def _compute_diffusion_kernel(self, A):
self.K = util.A_to_diffusion_kernel(A, self.params.num_hops)
def _register_model_layers(self):
self.l_dcnn = layers.DCNNLayer(
[self.l_in_k, self.l_in_x],
self.params,
1,
)
self.l_out = lasagne.layers.DenseLayer(
self.l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
def train_step(self, X, Y, batch_indices):
return self.apply_loss_and_update(
self.K[batch_indices, :, :], X, Y[batch_indices, :]
)
def validation_step(self, X, Y, valid_indices):
return self.apply_loss(
self.K[valid_indices, :, :], X, Y[valid_indices, :]
)
def fit(self, X, Y, train_indices, valid_indices):
num_nodes = X.shape[0]
print 'Training model...'
validation_losses = []
validation_loss_window = np.zeros(self.params.stop_window_size)
validation_loss_window[:] = float('+inf')
for epoch in range(self.params.num_epochs):
train_loss = 0.0
np.random.shuffle(train_indices)
num_batch = num_nodes // self.params.batch_size
for batch in range(num_batch):
start = batch * self.params.batch_size
end = min((batch + 1) * self.params.batch_size, train_indices.shape[0])
if start < end:
train_loss += self.train_step(X, Y, train_indices[start:end])
train_loss /= num_batch
valid_loss = self.validation_step(X, Y, valid_indices)
print "Epoch %d mean training error: %.6f" % (epoch, train_loss)
print "Epoch %d validation error: %.6f" % (epoch, valid_loss)
if self.params.print_train_accuracy:
predictions = self.predict(X, train_indices)
actuals = Y[train_indices, :].argmax(1)
print "Epoch %d training accuracy: %.4f" % (epoch, metrics.accuracy_score(predictions, actuals))
if self.params.print_valid_accuracy:
predictions = self.predict(X, valid_indices)
actuals = Y[valid_indices, :].argmax(1)
print "Epoch %d validation accuracy: %.4f" % (epoch, metrics.accuracy_score(predictions, actuals), )
validation_losses.append(valid_loss)
if self.params.stop_early:
if valid_loss >= validation_loss_window.mean():
print 'Validation loss did not decrease. Stopping early.'
break
validation_loss_window[epoch % self.params.stop_window_size] = valid_loss
def predict(self, X, prediction_indices):
pred = lasagne.layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_K, self.var_X], T.argmax(pred, axis=1))
# Return the predictions
predictions = pred_fn(self.K[prediction_indices, :, :], X)
return predictions
class TrueSparseNodeClassificationDCNN(NodeClassificationDCNN):
"""A DCNN model for node classification with truly sparse pre-thresholding.
This is a shallow model.
(K, X) -> DCNN -> Dense -> Out
"""
def _compute_diffusion_kernel(self, A):
self.K = util.sparse_A_to_diffusion_kernel(
A,
self.params.num_hops
)
def _register_model_layers(self):
input = self.l_in_k + [self.l_in_x]
self.l_dcnn = layers.SparseDCNNLayer(
input,
self.params,
1,
)
self.l_out = lasagne.layers.DenseLayer(
self.l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
def __init__(self, parameters, A):
self.params = parameters
self.var_K = []
for i in range(self.params.num_hops + 1):
self.var_K.append(T.matrix('K_%d' % i))
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
self.l_in_k = [lasagne.layers.InputLayer((None, self.params.num_nodes), input_var=vK) for vK in self.var_K]
self.l_in_x = lasagne.layers.InputLayer((self.params.num_nodes, self.params.num_features), input_var=self.var_X)
self._compute_diffusion_kernel(A)
# Overridable to customize init behavior.
self._register_model_layers()
loss_fn = params.loss_map[self.params.loss_fn]
update_fn = params.update_map[self.params.update_fn]
prediction = lasagne.layers.get_output(self.l_out)
self._loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
model_parameters = lasagne.layers.get_all_params(self.l_out)
self._updates = update_fn(self._loss, model_parameters, learning_rate=self.params.learning_rate)
if self.params.momentum:
self._updates = lasagne.updates.apply_momentum(self._updates, model_parameters)
self.apply_loss_and_update = theano.function(self.var_K + [self.var_X, self.var_Y], self._loss, updates=self._updates)
self.apply_loss = theano.function(self.var_K + [self.var_X, self.var_Y], self._loss)
def train_step(self, X, Y, batch_indices):
#inputs = [k[batch_indices, :] for k in self.K] + [X, Y[batch_indices, :]]
inputs = self.K + [X, Y[batch_indices, :]]
return self.apply_loss_and_update(
*inputs
)
def validation_step(self, X, Y, valid_indices):
return self.apply_loss(
self.K[valid_indices, :, :], X, Y[valid_indices, :]
)
class PostSparseNodeClassificationDCNN(NodeClassificationDCNN):
def _compute_diffusion_kernel(self, A):
self.K = util.A_to_post_sparse_diffusion_kernel(
A,
self.params.num_hops,
self.params.diffusion_threshold
)
class PreSparseNodeClassificationDCNN(NodeClassificationDCNN):
def _compute_diffusion_kernel(self, A):
self.K = util.A_to_pre_sparse_diffusion_kernel(
A,
self.params.num_hops,
self.params.diffusion_threshold
)
class DeepNodeClassificationDCNN(NodeClassificationDCNN):
"""A Deep DCNN model for node classification.
This model allows for several DCNN layers.
(K, X) -> DCNN -> DCNN -> ... -> DCNN -> Dense -> Out
"""
def __init__(self, parameters, A):
self.params = parameters
# Prepare indices input.
self.var_K = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_I = T.ivector('I')
self.var_Y = T.imatrix('Y')
self.l_in_k = lasagne.layers.InputLayer((None, self.params.num_hops + 1, self.params.num_nodes),
input_var=self.var_K)
self.l_in_x = lasagne.layers.InputLayer((self.params.num_nodes, self.params.num_features), input_var=self.var_X)
self.l_indices = lasagne.layers.InputLayer(
(None,),
input_var=self.var_I
)
self.K = util.A_to_diffusion_kernel(A, self.params.num_hops)
# Overridable to customize init behavior.
self._register_model_layers()
loss_fn = params.loss_map[self.params.loss_fn]
update_fn = params.update_map[self.params.update_fn]
prediction = lasagne.layers.get_output(self.l_out)
self._loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
model_parameters = lasagne.layers.get_all_params(self.l_out)
self._updates = update_fn(self._loss, model_parameters, learning_rate=self.params.learning_rate)
if self.params.momentum:
self._updates = lasagne.updates.apply_momentum(self._updates, model_parameters)
self.apply_loss_and_update = theano.function([self.var_K, self.var_X, self.var_I, self.var_Y], self._loss,
updates=self._updates)
self.apply_loss = theano.function([self.var_K, self.var_X, self.var_I, self.var_Y], self._loss)
def _register_model_layers(self):
features_layer = self.l_in_x
num_features = self.params.num_features
for i in range(self.params.num_dcnn_layers):
l_dcnn = layers.DCNNLayer(
[self.l_in_k, features_layer],
self.params,
i + 1,
num_features=num_features,
)
num_features *= (self.params.num_hops + 1)
features_layer = lasagne.layers.ReshapeLayer(
l_dcnn,
(-1, num_features)
)
self.l_slice = layers.ArrayIndexLayer(
[features_layer, self.l_indices]
)
self.l_out = lasagne.layers.DenseLayer(
self.l_slice,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
def train_step(self, X, Y, batch_indices):
return self.apply_loss_and_update(
self.K, X, batch_indices, Y[batch_indices, :]
)
def validation_step(self, X, Y, valid_indices):
return self.apply_loss(
self.K, X, valid_indices, Y[valid_indices, :]
)
def predict(self, X, prediction_indices):
pred = lasagne.layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_K, self.var_X, self.var_I], T.argmax(pred, axis=1))
# Return the predictions
predictions = pred_fn(self.K, X, prediction_indices)
return predictions
class DeepDenseNodeClassificationDCNN(NodeClassificationDCNN):
"""A Deep DCNN model for node classification.
Composed of one DCNN layer for the input followed by several dense layers.
(K, X) -> DCNN -> Dense -> Dense -> ... -> Dense -> Out
"""
def _register_model_layers(self):
self.l_dcnn = layers.DCNNLayer(
[self.l_in_k, self.l_in_x],
self.params,
1,
)
input = self.l_dcnn
for i in range(self.params.num_dense_layers):
l_dense = lasagne.layers.DenseLayer(
input,
num_units=self.params.dense_layer_size,
nonlinearity=params.nonlinearity_map[self.params.dense_nonlinearity],
)
input = l_dense
self.l_out = lasagne.layers.DenseLayer(
input,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
class GraphClassificationDCNN(object):
"""A DCNN for graph classification.
DCNN Activations are mean-reduced across nodes.
(P, X) -> DCNN -> Dense -> Out
"""
def __init__(self, parameters):
self.params = parameters
self.var_A = T.matrix('A')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
self.l_in_a = lasagne.layers.InputLayer((None, None), input_var=self.var_A)
self.l_in_x = lasagne.layers.InputLayer((None, self.params.num_features), input_var=self.var_X)
# Overridable to customize init behavior.
self._register_model_layers()
loss_fn = params.loss_map[self.params.loss_fn]
update_fn = params.update_map[self.params.update_fn]
prediction = lasagne.layers.get_output(self.l_out)
loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
model_parameters = lasagne.layers.get_all_params(self.l_out)
self._updates = update_fn(loss, model_parameters, learning_rate=self.params.learning_rate)
if self.params.momentum:
self._updates = lasagne.updates.apply_momentum(self._updates, model_parameters)
self.apply_loss_and_update = theano.function([self.var_A, self.var_X, self.var_Y], loss, updates=self._updates)
self.apply_loss = theano.function([self.var_A, self.var_X, self.var_Y], loss)
pred = lasagne.layers.get_output(self.l_out)
self.pred_fn = theano.function([self.var_A, self.var_X], T.argmax(pred, axis=1))
def _register_model_layers(self):
self.l_dcnn = layers.AggregatedDCNNLayer(
[self.l_in_a, self.l_in_x],
self.params,
1,
)
self.l_out = lasagne.layers.DenseLayer(
self.l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
def train_step(self, a, x, y):
return self.apply_loss_and_update(
a, x, y
)
def validation_step(self, a, x, y):
return self.apply_loss(
a, x, y
)
def fit(self, A, X, Y, train_indices, valid_indices):
print 'Training model...'
validation_losses = []
validation_loss_window = np.zeros(self.params.stop_window_size)
validation_loss_window[:] = float('+inf')
for epoch in range(self.params.num_epochs):
np.random.shuffle(train_indices)
train_loss = 0.0
for index in train_indices:
train_loss += self.train_step(A[index], X[index], Y[index])
train_loss /= len(train_indices)
valid_loss = 0.0
for index in valid_indices:
valid_loss = self.validation_step(A[index], X[index], Y[index])
valid_loss /= len(valid_indices)
print "Epoch %d mean training error: %.6f" % (epoch, train_loss)
print "Epoch %d mean validation error: %.6f" % (epoch, valid_loss)
if np.isnan(train_loss) or np.isnan(valid_loss):
raise ValueError
train_acc = 0.0
if self.params.print_train_accuracy:
for index in train_indices:
pred = self.predict(A[index], X[index])
actual = Y[index].argmax()
if pred == actual:
train_acc += 1.0
train_acc /= len(train_indices)
print "Epoch %d training accuracy: %.4f" % (epoch, train_acc)
valid_acc = 0.0
if self.params.print_valid_accuracy:
for index in valid_indices:
pred = self.predict(A[index], X[index])
actual = Y[index].argmax()
if pred == actual:
valid_acc += 1.0
valid_acc /= len(train_indices)
print "Epoch %d validation accuracy: %.4f" % (epoch, valid_acc)
validation_losses.append(valid_loss)
if self.params.stop_early:
if valid_loss >= validation_loss_window.mean():
print 'Validation loss did not decrease. Stopping early.'
break
validation_loss_window[epoch % self.params.stop_window_size] = valid_loss
def predict(self, a, x):
# Return the predictions
predictions = self.pred_fn(a, x)
return predictions
class GraphClassificationFeatureAggregatedDCNN(GraphClassificationDCNN):
"""A DCNN for graph classification.
DCNN Activations are mean-reduced across both nodes and features.
(P, X) -> DCNN -> Dense -> Out
"""
def _register_model_layers(self):
self.l_dcnn = layers.AggregatedFeaturesDCNNLayer(
[self.l_in_a, self.l_in_x],
self.params,
1,
)
self.l_out = lasagne.layers.DenseLayer(
self.l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
class DeepGraphClassificationDCNN(GraphClassificationDCNN):
"""A Deep DCNN for graph classification.
DCNN Activations are mean-reduced across nodes. Several DCNN layers.
(P, X) -> DCNN -> DCNN -> ... -> DCNN -> Dense -> Out
"""
def _register_model_layers(self):
features_layer = self.l_in_x
num_features = self.params.num_features
for i in range(self.params.num_dcnn_layers - 1):
l_dcnn = layers.UnaggregatedDCNNLayer(
[self.l_in_a, features_layer],
self.params,
i + 1,
num_features=num_features
)
features_layer = l_dcnn
num_features *= (self.params.num_hops + 1)
l_dcnn = layers.AggregatedDCNNLayer(
[self.l_in_a, features_layer],
self.params,
i + 1,
num_features=num_features,
)
self.l_out = lasagne.layers.DenseLayer(
l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity],
)
class DeepGraphClassificationDCNNWithReduction(DeepGraphClassificationDCNN):
"""A Deep DCNN for graph classification with a trivial reduction layer.
DCNN Activations are mean-reduced across nodes. Several DCNN layers.
(P, X) -> DCNN -> Reduction -> DCNN -> ... -> DCNN -> Dense -> Out
"""
def _register_model_layers(self):
graph_layer = self.l_in_a
features_layer = self.l_in_x
num_features = self.params.num_features
for i in range(self.params.num_dcnn_layers - 1):
l_dcnn = layers.UnaggregatedDCNNLayer(
[graph_layer, features_layer],
self.params,
i,
num_features=num_features
)
features_layer = l_dcnn
num_features *= (self.params.num_hops + 1)
graph_layer = layers.GraphReductionLayer(
[graph_layer, features_layer],
self.params,
)
l_dcnn = layers.AggregatedDCNNLayer(
[graph_layer, features_layer],
self.params,
i,
num_features=num_features,
)
self.l_out = lasagne.layers.DenseLayer(
l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity]
)
class DeepGraphClassificationDCNNWithKronReduction(DeepGraphClassificationDCNN):
"""A Deep DCNN for graph classification with a learnable reduction layer.
DCNN Activations are mean-reduced across nodes. Several DCNN layers.
(P, X) -> DCNN -> Reduction -> DCNN -> ... -> DCNN -> Dense -> Out
"""
def _register_model_layers(self):
graph_layer = self.l_in_a
features_layer = self.l_in_x
num_features = self.params.num_features
for i in range(self.params.num_dcnn_layers - 1):
l_dcnn = layers.UnaggregatedDCNNLayer(
[graph_layer, features_layer],
self.params,
i,
num_features=num_features
)
features_layer = l_dcnn
num_features *= (self.params.num_hops + 1)
eigenvec_layer = layers.SmallestEigenvecLayer(
[graph_layer],
self.params
)
graph_layer = layers.KronReductionLayerA(
[graph_layer, eigenvec_layer],
self.params,
)
features_layer = layers.KronReductionLayerX(
[graph_layer, features_layer, eigenvec_layer],
self.params,
)
l_dcnn = layers.AggregatedDCNNLayer(
[graph_layer, features_layer],
self.params,
i,
num_features=num_features,
)
self.l_out = lasagne.layers.DenseLayer(
l_dcnn,
num_units=self.params.num_classes,
nonlinearity=params.nonlinearity_map[self.params.out_nonlinearity]
)
|
|
# pylint:disable=no-member
import pickle
import logging
from collections import defaultdict
import networkx
from ...protos import cfg_pb2, primitives_pb2
from ...serializable import Serializable
from ...utils.enums_conv import cfg_jumpkind_to_pb, cfg_jumpkind_from_pb
from ...errors import AngrCFGError
from .cfg_node import CFGNode
from .memory_data import MemoryData
from ...misc.ux import once
l = logging.getLogger(name=__name__)
class CFGModel(Serializable):
"""
This class describes a Control Flow Graph for a specific range of code.
"""
__slots__ = ('ident', 'graph', 'jump_tables', 'memory_data', 'insn_addr_to_memory_data', 'references',
'_nodes_by_addr', '_nodes', '_cfg_manager', '_iropt_level', )
def __init__(self, ident, cfg_manager=None):
self.ident = ident
self._cfg_manager = cfg_manager
# Necessary settings
self._iropt_level = None
# The graph
self.graph = networkx.DiGraph()
# Jump tables
self.jump_tables = { }
# Memory references
# A mapping between address and the actual data in memory
self.memory_data = { }
# A mapping between address of the instruction that's referencing the memory data and the memory data itself
self.insn_addr_to_memory_data = { }
# Lists of CFGNodes indexed by the address of each block. Don't serialize
self._nodes_by_addr = defaultdict(list)
# CFGNodes dict indexed by block ID. Don't serialize
self._nodes = { }
#
# Properties
#
@property
def project(self):
if self._cfg_manager is None:
return None
return self._cfg_manager._kb._project
#
# Serialization
#
@classmethod
def _get_cmsg(cls):
return cfg_pb2.CFG()
def serialize_to_cmessage(self):
if "Emulated" in self.ident:
raise NotImplementedError("Serializing a CFGEmulated instance is currently not supported.")
cmsg = self._get_cmsg()
cmsg.ident = self.ident
# nodes
nodes = [ ]
for n in self.graph.nodes():
nodes.append(n.serialize_to_cmessage())
cmsg.nodes.extend(nodes)
# edges
edges = [ ]
for src, dst, data in self.graph.edges(data=True):
edge = primitives_pb2.Edge()
edge.src_ea = src.addr
edge.dst_ea = dst.addr
for k, v in data.items():
if k == 'jumpkind':
edge.jumpkind = cfg_jumpkind_to_pb(v)
elif k == 'ins_addr':
edge.ins_addr = v if v is not None else -1
elif k == 'stmt_idx':
edge.stmt_idx = v if v is not None else -1
else:
edge.data[k] = pickle.dumps(v)
edges.append(edge)
cmsg.edges.extend(edges)
# memory data
memory_data = [ ]
for data in self.memory_data.values():
memory_data.append(data.serialize_to_cmessage())
cmsg.memory_data.extend(memory_data)
return cmsg
@classmethod
def parse_from_cmessage(cls, cmsg, cfg_manager=None): # pylint:disable=arguments-differ
if cfg_manager is None:
# create a new model unassociated from any project
model = cls(cmsg.ident)
else:
model = cfg_manager.new_model(cmsg.ident)
# nodes
for node_pb2 in cmsg.nodes:
node = CFGNode.parse_from_cmessage(node_pb2, cfg=model)
model._nodes[node.block_id] = node
model._nodes_by_addr[node.addr].append(node)
model.graph.add_node(node)
if len(model._nodes_by_addr[node.block_id]) > 1:
if once("cfg_model_parse_from_cmessage many nodes at addr"):
l.warning("Importing a CFG with more than one node for a given address is currently unsupported. "
"The resulting graph may be broken.")
# edges
for edge_pb2 in cmsg.edges:
# more than one node at a given address is unsupported, grab the first one
src = model._nodes_by_addr[edge_pb2.src_ea][0]
dst = model._nodes_by_addr[edge_pb2.dst_ea][0]
data = { }
for k, v in edge_pb2.data.items():
data[k] = pickle.loads(v)
data['jumpkind'] = cfg_jumpkind_from_pb(edge_pb2.jumpkind)
data['ins_addr'] = edge_pb2.ins_addr if edge_pb2.ins_addr != -1 else None
data['stmt_idx'] = edge_pb2.stmt_idx if edge_pb2.stmt_idx != -1 else None
model.graph.add_edge(src, dst, **data)
# memory data
for data_pb2 in cmsg.memory_data:
md = MemoryData.parse_from_cmessage(data_pb2)
model.memory_data[md.addr] = md
return model
#
# Other methods
#
def copy(self):
model = CFGModel(self.ident, cfg_manager=self._cfg_manager)
model.graph = networkx.DiGraph(self.graph)
model.jump_tables = self.jump_tables.copy()
model.memory_data = self.memory_data.copy()
model.insn_addr_to_memory_data = self.insn_addr_to_memory_data.copy()
model._nodes_by_addr = self._nodes_by_addr.copy()
model._nodes = self._nodes.copy()
return model
#
# CFG View
#
def get_node(self, block_id):
"""
Get a single node from node key.
:param BlockID block_id: Block ID of the node.
:return: The CFGNode
:rtype: CFGNode
"""
if block_id in self._nodes:
return self._nodes[block_id]
return None
def get_any_node(self, addr, is_syscall=None, anyaddr=False, force_fastpath=False):
"""
Get an arbitrary CFGNode (without considering their contexts) from our graph.
:param int addr: Address of the beginning of the basic block. Set anyaddr to True to support arbitrary
address.
:param bool is_syscall: Whether you want to get the syscall node or any other node. This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to.
None means get either, True means get a syscall node, False means get something that isn't
a syscall node.
:param bool anyaddr: If anyaddr is True, then addr doesn't have to be the beginning address of a basic
block. By default the entire graph.nodes() will be iterated, and the first node
containing the specific address is returned, which is slow. If you need to do many such
queries, you may first call `generate_index()` to create some indices that may speed up the
query.
:param bool force_fastpath: If force_fastpath is True, it will only perform a dict lookup in the _nodes_by_addr
dict.
:return: A CFGNode if there is any that satisfies given conditions, or None otherwise
"""
# fastpath: directly look in the nodes list
if not anyaddr:
try:
return self._nodes_by_addr[addr][0]
except (KeyError, IndexError):
pass
if force_fastpath:
return None
# slower path
#if self._node_lookup_index is not None:
# pass
# the slowest path
# try to show a warning first
# TODO: re-enable it once the segment tree is implemented
#if self._node_lookup_index_warned == False:
# l.warning('Calling get_any_node() with anyaddr=True is slow on large programs. '
# 'For better performance, you may first call generate_index() to generate some indices that may '
# 'speed the node lookup.')
# self._node_lookup_index_warned = True
for n in self.graph.nodes():
if self.ident == "CFGEmulated":
cond = n.looping_times == 0
else:
cond = True
if anyaddr and n.size is not None:
cond = cond and (addr == n.addr or n.addr <= addr < n.addr + n.size)
else:
cond = cond and (addr == n.addr)
if cond:
if is_syscall is None:
return n
if n.is_syscall == is_syscall:
return n
return None
def get_all_nodes(self, addr, is_syscall=None, anyaddr=False):
"""
Get all CFGNodes whose address is the specified one.
:param addr: Address of the node
:param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both
:return: all CFGNodes
"""
results = [ ]
for cfg_node in self.graph.nodes():
if cfg_node.addr == addr or (anyaddr and
cfg_node.size is not None and
cfg_node.addr <= addr < (cfg_node.addr + cfg_node.size)
):
if is_syscall and cfg_node.is_syscall:
results.append(cfg_node)
elif is_syscall is False and not cfg_node.is_syscall:
results.append(cfg_node)
else:
results.append(cfg_node)
return results
def nodes(self):
"""
An iterator of all nodes in the graph.
:return: The iterator.
:rtype: iterator
"""
return self.graph.nodes()
def get_predecessors(self, cfgnode, excluding_fakeret=True, jumpkind=None):
"""
Get predecessors of a node in the control flow graph.
:param CFGNode cfgnode: The node.
:param bool excluding_fakeret: True if you want to exclude all predecessors that is connected to the node
with a fakeret edge.
:param str or None jumpkind: Only return predecessors with the specified jumpkind. This argument will be
ignored if set to None.
:return: A list of predecessors
:rtype: list
"""
if excluding_fakeret and jumpkind == 'Ijk_FakeRet':
return [ ]
if not excluding_fakeret and jumpkind is None:
# fast path
if cfgnode in self.graph:
return list(self.graph.predecessors(cfgnode))
return [ ]
predecessors = []
for pred, _, data in self.graph.in_edges([cfgnode], data=True):
jk = data['jumpkind']
if jumpkind is not None:
if jk == jumpkind:
predecessors.append(pred)
elif excluding_fakeret:
if jk != 'Ijk_FakeRet':
predecessors.append(pred)
else:
predecessors.append(pred)
return predecessors
def get_successors(self, node, excluding_fakeret=True, jumpkind=None):
"""
Get successors of a node in the control flow graph.
:param CFGNode node: The node.
:param bool excluding_fakeret: True if you want to exclude all successors that is connected to the node
with a fakeret edge.
:param str or None jumpkind: Only return successors with the specified jumpkind. This argument will be
ignored if set to None.
:return: A list of successors
:rtype: list
"""
if jumpkind is not None:
if excluding_fakeret and jumpkind == 'Ijk_FakeRet':
return [ ]
if not excluding_fakeret and jumpkind is None:
# fast path
if node in self.graph:
return list(self.graph.successors(node))
return [ ]
successors = []
for _, suc, data in self.graph.out_edges([node], data=True):
jk = data['jumpkind']
if jumpkind is not None:
if jumpkind == jk:
successors.append(suc)
elif excluding_fakeret:
if jk != 'Ijk_FakeRet':
successors.append(suc)
else:
successors.append(suc)
return successors
def get_successors_and_jumpkind(self, node, excluding_fakeret=True):
"""
Get a list of tuples where the first element is the successor of the CFG node and the second element is the
jumpkind of the successor.
:param CFGNode node: The node.
:param bool excluding_fakeret: True if you want to exclude all successors that are fall-through successors.
:return: A list of successors and their corresponding jumpkinds.
:rtype: list
"""
successors = []
for _, suc, data in self.graph.out_edges([node], data=True):
if not excluding_fakeret or data['jumpkind'] != 'Ijk_FakeRet':
successors.append((suc, data['jumpkind']))
return successors
def get_all_predecessors(self, cfgnode):
"""
Get all predecessors of a specific node on the control flow graph.
:param CFGNode cfgnode: The CFGNode object
:return: A list of predecessors in the CFG
:rtype: list
"""
s = set()
for child, parent in networkx.dfs_predecessors(self.graph, cfgnode).items():
s.add(child)
s.add(parent)
return list(s)
def get_all_successors(self, cfgnode):
s = set()
for parent, children in networkx.dfs_successors(self.graph, cfgnode).items():
s.add(parent)
s = s.union(children)
return list(s)
def get_branching_nodes(self):
"""
Returns all nodes that has an out degree >= 2
"""
nodes = set()
for n in self.graph.nodes():
if self.graph.out_degree(n) >= 2:
nodes.add(n)
return nodes
def get_exit_stmt_idx(self, src_block, dst_block):
"""
Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit
statement ID was put on the edge when creating the CFG.
Note that there must be a direct edge between the two blocks, otherwise an exception will be raised.
:return: The exit statement ID
"""
if not self.graph.has_edge(src_block, dst_block):
raise AngrCFGError('Edge (%s, %s) does not exist in CFG' % (src_block, dst_block))
return self.graph[src_block][dst_block]['stmt_idx']
|
|
'''
Commander Keen 1-3 support for Tiled
2014 <[email protected]>
http://imgur.com/9qaOUWy
'''
import os
import os.path
from tiled import *
from tiled.qt import *
from keenlib.ted15 import load
from keenlib.ted15 import save
class KeenVorticons(Plugin):
@classmethod
def nameFilter(cls):
return 'Keen 1-3 Level (level??.ck? LEVEL??.CK? level??.CK? LEVEL??.ck?)'
@classmethod
def supportsFile(cls, f):
if not f[:-1].lower().endswith('.ck'):
return False
return True
@classmethod
def read(cls, f):
level = load(f)
map = Tiled.Map(Tiled.Map.Orthogonal, level['width'], level['height'], 16, 16)
tset = Tiled.Tileset('Tiles', 16, 16, 0, 0)
directory = os.path.dirname(f)
episode = int(f[-1])
is_world = int(os.path.splitext(os.path.basename(f))[0].lower()[-2:]) >= 80
tileImageName = caseInsensitiveFile(directory, '{}TIL0000.BMP'.format(episode))
if tileImageName == None:
print 'No tileset file found'
return map
tileImage = QImage()
if not tileImage.load(os.path.join(directory, tileImageName), 'BMP'):
print 'Unable to open {}'.format(tileImageName)
return map
tset.loadFromImage(tileImage, tileImageName)
tileLayer = Tiled.TileLayer('Tile Plane', 0,0, level['width'],
level['height'])
spriteLayer = Tiled.ObjectGroup('Sprite Plane', 0,0, level['width'],
level['height'])
for y in xrange(level['height']):
for x in xrange(level['width']):
index = level['tiles'][y][x]
tileLayer.setCell(x, y, Tiled.Cell(tset.tileAt(index)))
sprite_size = QSizeF(16,16)
for y in range(level['height']):
for x in range(level['width']):
spriteNum = level['sprites'][y][x]
sprite = None
pos = QPointF(x*16, y*16)
def create_sprite(name, type):
return Tiled.MapObject(name, type, pos, sprite_size)
if is_world:
if spriteNum == 0:
pass
elif spriteNum == 255:
sprite = create_sprite(sprite_name(spriteNum, episode), 'Player')
elif spriteNum == 20:
sprite = create_sprite('Ship', 'Ship')
elif spriteNum & 0x8000:
sprite = create_sprite('Barricade (Level {})'.format(spriteNum & 0x7FFF), 'Barricade')
sprite.setProperty('Level', '{}'.format(spriteNum & 0x7FFF))
elif spriteNum < 32:
sprite = create_sprite('Level {}'.format(spriteNum), 'Level')
sprite.setProperty('Level', '{}'.format(spriteNum))
else:
sprite = create_sprite('Special {}'.format(hex(spriteNum)), 'Special')
sprite.setProperty('ID', '{}'.format(spriteNum))
else:
if spriteNum == 0:
pass
elif spriteNum == 255:
sprite = Tiled.MapObject(sprite_name(spriteNum, episode), 'Player', pos, QSizeF(16,32))
elif spriteNum > 32:
sprite = create_sprite('Switch ({})'.format(hex(spriteNum)), 'Switch')
sprite.setProperty('Target', '{}, {}'.format(*load_coordinates(spriteNum)))
else:
sprite = create_sprite(sprite_name(spriteNum, episode), 'Enemy')
sprite.setProperty('ID', '{}'.format(spriteNum))
if sprite is not None:
spriteLayer.addObject(sprite)
map.addLayer(tileLayer)
map.addLayer(spriteLayer)
map.addTileset(tset)
return map
@classmethod
def write(cls, m, f):
tileLayer_ = None
spriteLayer = None
for i in range(m.layerCount()):
if isTileLayerAt(m, i):
tileLayer_ = tileLayerAt(m, i)
elif isObjectGroupAt(m, i):
spriteLayer = objectGroupAt(m, i)
if tileLayer_ is None:
print 'Must have one TileLayer!'
return False
print tileLayer_
if spriteLayer is None:
print 'Must have one ObjectGroup!'
return False
level = {'width':tileLayer_.width(), 'height':tileLayer_.height(), 'tiles':[], 'sprites':[]}
for y in range(tileLayer_.height()):
level['tiles'].append([])
level['sprites'].append([])
for x in range(tileLayer_.width()):
cell = tileLayer_.cellAt(x, y)
level['tiles'][y].append(cell.tile.id())
level['sprites'][y].append(0)
for i in range(spriteLayer.objectCount()):
item = spriteLayer.objectAt(i)
x = int(item.x()/16)
y = int(item.y()/16)
if item.type() == 'Player':
level['sprites'][y][x] = 255
elif item.type() == 'Enemy' or item.type() == 'Special':
level['sprites'][y][x] = int(item.property('ID'))
elif item.type() == 'Switch':
pair = item.property('Target').split(', ')
pair[0] = int(pair[0])
pair[1] = int(pair[1])
level['sprites'][y][x] = dump_coordinates(pair)
elif item.type() == 'Ship':
level['sprites'][y][x] = 20
elif item.type() == 'Level':
level['sprites'][y][x] = int(item.property('Level'))
elif item.type() == 'Barricade':
level['sprites'][y][x] = int(item.property('Level')) | 0x8000
save(f, level)
return True
def caseInsensitiveFile(path, name):
candidates = os.listdir(path)
for item in candidates:
if item.lower() == name.lower():
return item
return None
def load_coordinates(ushort):
y = ushort >> 8
x = ushort & 0xFF
if x & 0x80:
x = -(0xFF-x)
if y & 0x80:
y = -(0xFF-y)
return (x, y)
def dump_coordinates(pair):
y = pair[1] & 0xFF
x = pair[0] & 0xFF
if x & 0x80:
x = 0xFF-x
if y & 0x80:
y = 0xFF-y
ushort = (x | (y << 8))
_names = (
{},
{
1:'Yorp',
2:'Garg',
3:'Vorticon',
4:'ButlerBot',
5:'TankBot',
6:'Cannon up/right',
7:'Cannon up',
8:'Cannon down',
9:'Cannon up/left',
10:'Chain',
255:'Keen'
},
{
1:'Grunt',
2:'Youth',
3:'Elite',
4:'Scrub',
5:'GuardBot',
6:'Platform',
7:'Amoeba',
255:'Keen'
},
{
1:'Grunt',
2:'Youth',
3:'Mother',
4:'Meep',
5:'Vortininja',
6:'Foob',
7:'Ball',
8:'Jack',
9:'Horizontal Platform',
10:'Vertical Platform',
11:'Grunt Jumping',
12:'Spark',
13:'Heart',
14:'Turret Right',
15:'Turret Down',
16:'Arm',
17:'Left Leg',
18:'Right Leg',
255:'Keen'
}
)
def sprite_name(num, ep):
if num in _names[ep]:
return _names[ep][num]
return hex(num)
|
|
import numpy as np
from warnings import warn
from faps.calculate_geno_probs import calculate_geno_probs
class genotypeArray(object):
"""
Genotype information about a sample of individuals, the identity of any known
relatives, and metadata about the dataset.
Currently only SNP data are supported. Data are recorded as integers: zeros
and twos indicate opposing homozygous genotypes, and ones heterozygous
genotypes.
Parameters
----------
geno: array
3-dimensional array of genotype data indexing (1) individual, (2) locus,
and (3) chromosome pair.
names: array-like
Unique identifiers for each individual.
mothers: array-like
Identifiers of the mother of each individual, if known.
fathers: array-like
Identifiers of the father of each individual, if known.
markers: array-like, optional
Marker names.
Returns
-------
size: int
Number of indivudals
nloci: int
Number of markers in the dataset
parents: array
Names of parent pairs for each indivudals.
families: array
List of unique full-sib families, based on the names of parents.
nfamilies: int
Number of full-sib families, based on the names of parents.
"""
def __init__(self, geno, geno_probs, names, mothers, fathers, markers=None):
self.geno = geno
self.geno_probs= geno_probs
self.names = names
self.mothers = mothers
self.fathers = fathers
self.markers = markers
self.size = self.geno.shape[0]
self.nloci = self.geno.shape[1]
self.parents = np.array([str(self.mothers[o]) + '/' + str(self.fathers[o]) for o in range(self.size)])
self.families = np.unique(self.parents)
self.nfamilies = len(self.families)
def allele_freqs(self):
"""
Allele frequencies of each locus. The reference allele is whichever is
labelled as 1.
"""
diploid = self.geno.sum(2) * 0.5
return np.nanmean(diploid, axis = 0)
def drop(self, individuals):
"""
Remove specific individuals from the genotype array.
Parameters
----------
individuals: an integer or list of integers indexing the individuals to be removed.
Returns
-------
A genotype array with the target individuals removed.
"""
# If indices are given
if isinstance(individuals, int):
if individuals > self.size or individuals < 0:
raise ValueError("The index for the individual to drop is greater than the number of individuals, or less than zero.")
individuals = [individuals]
# If a names are given, find the positions in the list of names
if isinstance(individuals, str):
if individuals not in self.names:
raise ValueError("The name for the individual to drop is not present in the list of names for this genotypeArray object.")
individuals = [individuals]
if all(isinstance(x, str) for x in individuals):
if (~np.isin(individuals, self.names)).any():
raise ValueError("The name of one or more individuals to drop is not present in the list of names for this genotypeArray object.")
individuals = [np.where(self.names == x)[0][0] for x in individuals]
if any([i < 0 or i > self.size for i in individuals]):
raise ValueError("One or more indices for individuals to drop is greater than the total number of individuals, or less than zero.")
# Indices of candidates to keep.
new_index = ~np.isin(np.arange(self.size), individuals)
new_index = np.where(new_index)[0]
# create new genotypeArray.
output = genotypeArray(
geno = self.geno[new_index],
geno_probs = self.geno_probs[new_index],
names = self.names[new_index],
mothers = self.mothers[new_index],
fathers = self.fathers[new_index]
)
return output
def dropouts(self, dr):
"""
Add allelic dropouts to an array of genotypic data.
Parameters
----------
dr: float
Diploid dropout rate.
Returns
-------
A copy of the input genotype data, but with dropouts (shown as nan).
"""
# pick data points to dropout
vals = np.random.binomial(1, dr, self.size * self.nloci)
positions = np.reshape(vals, [ self.size, self.nloci])
positions = positions.astype("bool")
# make a copy of the genotype data, just in case
new_geno = np.copy(self.geno).astype(float)
new_geno_probs = np.copy(self.geno_probs).astype(float)
# insert missing data into parental genotypes
new_geno[positions] = np.nan
new_geno_probs[positions] = np.nan
output = genotypeArray(
geno = new_geno,
geno_probs = new_geno_probs,
names = self.names,
mothers = self.mothers,
fathers = self.fathers
)
return output
def heterozygosity(self, by='marker'):
"""
Mean heterozygosity, either averaged across markers for each individual,
or across individuals for each marker.
Parameters
----------
by: str
If 'individual', values are returned averaged across markers for each
individual. If 'marker' values are returned averaged across
individuals for each marker. Defaults to 'marker'.
Returns
-------
Vector of floats.
"""
if by == 'marker' or by == 0:
return (self.geno.sum(2) == 1).mean(0)
elif by == 'individual' or by == 1:
return (self.geno.sum(2) == 1).mean(1)
else:
raise ValueError("`by` should be either 'marker' or 'individual'.")
def missing_data(self, by='marker'):
"""
Mean genotype drop-out rate, either averaged across markers for each individual,
or across individuals for each marker.
Parameters
----------
by: str
If 'individual', values are returned averaged across markers for each
individual. If 'marker' values are returned averaged across
individuals for each marker. Defaults to 'marker'.
Returns
-------
Vector of floats.
"""
d = np.copy(self.geno).astype(float)
d[d == -9] = np.nan
if by == 'marker' or by == 0:
return np.isnan(d[:,:,0]).mean(0)
elif by == 'individual' or by == 1:
return np.isnan(d[:,:,0]).mean(1)
else:
raise ValueError("`by` should be either 'marker' or 'individual'.")
def mutations(self, mu):
"""
Introduce mutations at random to an array of genotype data for multiple individuals.
For all alleles present draw mutations given error rate mu, then swap zeroes and
ones in the array.
Parameters
----------
mu: float
Haploid genotyping error rate.
Returns
-------
A copy of the input genotype data, but with point mutations added.
"""
# make a copy of the data, and make it an integer
new_alleles = np.copy(self.geno)
# for an array of the same shape as newAlleles, draw mutations at each
# position with probability mu.
vals = np.random.binomial(1, mu, self.size * self.nloci * 2)
mutate = np.reshape(vals, [ self.size, self.nloci, 2])
mutate = (mutate == 1)
# swap zeroes and ones.
new_alleles[mutate] = 1 - new_alleles[mutate]
# Apply to geno_probs
new_geno_probs = calculate_geno_probs(new_alleles, mu=mu)
output = genotypeArray(
geno = new_alleles,
geno_probs = new_geno_probs,
names = self.names,
mothers= self.mothers,
fathers = self.fathers
)
return output
def parent_index(self, parent, parent_names):
"""
Finds the position of parental names in a vector of possible parental
names. This can be the name of the mother or the father.
This is essentially a convenient wrapper for np.where().
Parameters
----------
parent: str
A string indicating whether the offspring's mother or father is to
be located. Valid arguments are 'mother', 'father' and 'parents', or
equivalently 'm' and 'f' respectively.
parent_names: array
1-d array of parental names to be found in the lists of mothers,
father or parents.
Returns
-------
A list of positions of the parent for each entry in offs_names.
Example
-------
from faps import *
import numpy as np
# create genotypes
allele_freqs = np.random.uniform(0.3,0.5,10)
parents = make_parents(5, allele_freqs, family_name='my_population')
progeny = make_sibships(mypop, 0, [1,2,3], 4, 'myprogeny')
progeny.parent_index('mother', parents.names) # position of the mother
progeny.parent_index('father', parents.names) # positions of the fathers
"""
if parent == 'mother' or parent == 'm':
return [np.where(parent_names == x)[0][0] for x in self.mothers]
if parent == 'father' or parent == 'f':
return [np.where(parent_names == x)[0][0] for x in self.fathers]
if parent == 'parents' or parent == 'p':
return [np.where(parent_names == x)[0][0] for x in self.parents]
else:
raise ValueError("parent must be 'mother', 'father', or 'parents'.")
def split(self, by, return_dict=True):
"""
Split up a gentotypeArray into groups according to some grouping
factor. For example, divide an array containing genotype data for
multiple half-sibling arrays by the ID of their mothers.
Parameters
----------
by: array-like
Vector containing grouping labels for each individual
return_dict: logical
If True, the output is returned as a dictionary of genotypeArray
objects indexed by entries in `by`. If False, a list is returned.
Defaults to True.
Returns
-------
A dictionary of genotypeArray objects.
Examples
--------
from faps import *
import numpy as np
# Generate a population of adults
allele_freqs = np.random.uniform(0.3,0.5,50)
adults = make_parents(20, allele_freqs)
# Mate the first adult to the next three.
mother = adults.subset(0)
progeny = make_sibships(adults, 0, [1,2,3], 5, 'x')
# Split by fathers
progeny.split(progeny.fathers)
"""
groups = np.unique(by)
ix = [np.where(by == groups[i])[0] for i in range(len(groups))]
if return_dict:
output = {k:self.subset(i) for k,i in zip(groups, ix)}
else:
output = [self.subset(i) for i in ix]
return output
def subset(self, individuals=None, loci=None):
"""
Subset the genotype array by individual or number of loci.
To subset by both individuals and loci, call the function twice.
Parameters
----------
individuals: int, str, or a vector thereof
Either a list of individual names, or integers indexing those
individuals.
loci: int, str, or a vector thereof
Either a list of individual markers, or integers indexing those
individuals.
Returns
-------
A genotype array with only the target individuals included.
"""
# if no subsetting indices are given, return the whole object.
if individuals is None and loci is None:
return self
# Subset individuals if necessary
if individuals is not None:
# If only a single individual is given, make it a list.
if isinstance(individuals, int):
individuals = [individuals]
if isinstance(individuals, str):
individuals = [individuals]
# If a names are given, find the positions in the list of names
if all(isinstance(x, str) for x in individuals):
individuals = [np.where(self.names == x)[0][0] for x in individuals]
# Subset the genotypeArray
output = genotypeArray(
geno = self.geno[individuals],
geno_probs = self.geno_probs[individuals],
names = self.names[individuals],
mothers = self.mothers[individuals],
fathers = self.fathers[individuals],
markers = self.markers)
# Subset loci if necessary
if loci is not None:
# If only a single locus is given, make it a list.
if isinstance(loci, int):
loci = [loci]
if isinstance(loci, str):
loci = [loci]
# If a marker names are given, find the positions in the list of names
if all(isinstance(x, str) for x in loci):
loci = [np.where(self.markersz == x)[0][0] for x in loci]
# If an array of boolean values are given, make to a list
if isinstance(loci, np.ndarray):
if np.result_type(loci) == 'bool':
loci = np.arange(len(loci))[loci]
loci = loci.tolist()
# Subset the genotypeArray
output = genotypeArray(
geno = self.geno[:,loci],
geno_probs = self.geno_probs[:,loci],
names = self.names,
mothers = self.mothers,
fathers = self.fathers,
markers = self.markers[loci])
return output
def true_partition(self):
"""
For families of known parentage, usually simulated data, create a full sibship partition
vector from the identities of known mothers and fathers contained in variables 'mothers'
and 'fathers' of a genotype array.
If one or more individuals has at least one missing parent they will be assigned to the
same full sibship group.
Returns
-------
An array of integers with an entry for each offspring individual. Individuals are labelled
according to their full sibling group.
"""
if 'NA' in self.mothers or 'NA' in self.fathers:
warn('Warning: one or more individuals has at least one parent of unkown identity.')
warn('All such individuals will be assigned to the same sibship group.')
# concatenate mother and father names to create a vector of parent pairs.
#parentage = np.array([str(self.mothers[o]) + '/' + str(self.fathers[o]) for o in range(noffs)])
possible_families = np.unique(self.parents) # get a list of all unique parent pairs
partitions = np.zeros(self.size).astype('int') # empty vector of zeros.
for o in range(self.nfamilies):
# for every possible family label individuals with an identical integer.
partitions[self.parents == possible_families[o]] += o
return partitions
def write(self, filename, delimiter = ','):
"""
Write data in a genotypeArray to disk. Columns for known mothers and fathers
are included, even if these are all NA.
Parameters
----------
filename: stre
System path to write to.
delimiter: str, optional
Column delimiter. Defaults to commas to generate CSV files.
Returns
-------
A text file at the specified path.
"""
# Names of individuals, plus mothers and fathers.
nms = np.column_stack([self.names, self.mothers, self.fathers])
# format genotype data as a strings
output = self.geno.sum(2).astype('str')
output[output == '-18'] = 'NA' # coerce missing data to NA
output = np.concatenate([nms, output], axis=1)
header = 'ID,mother,father,' + ','.join(self.markers)
np.savetxt(filename, output, delimiter=delimiter, fmt="%s", header=header, comments='')
|
|
# vim: set fileencoding=utf-8
from collections import defaultdict
import re
import inflection
try:
key = ('(?i)(p)erson$', '\\1eople')
del inflection.PLURALS[inflection.PLURALS.index(key)]
except ValueError:
pass
from regparser.layer import def_finders
from regparser.layer.scope_finder import ScopeFinder
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.priority_stack import PriorityStack
import settings
MAX_TERM_LENGTH = 100
class ParentStack(PriorityStack):
"""Used to keep track of the parents while processing nodes to find
terms. This is needed as the definition may need to find its scope in
parents."""
def unwind(self):
"""No collapsing needs to happen."""
self.pop()
def parent_of(self, node):
level = self.peek_level(node.depth() - 1)
return level[-1] if level else None
class Terms(Layer):
shorthand = 'terms'
STARTS_WITH_WORDCHAR = re.compile('^\w.*$')
ENDS_WITH_WORDCHAR = re.compile('^.*\w$')
def __init__(self, *args, **kwargs):
Layer.__init__(self, *args, **kwargs)
self.layer['referenced'] = {}
# scope -> List[(term, definition_ref)]
self.scoped_terms = defaultdict(list)
self.scope_finder = ScopeFinder()
def look_for_defs(self, node, stack=None):
"""Check a node and recursively check its children for terms which are
being defined. Add these definitions to self.scoped_terms."""
stack = stack or ParentStack()
stack.add(node.depth(), node)
if node.node_type in (struct.Node.REGTEXT, struct.Node.SUBPART,
struct.Node.EMPTYPART):
included, excluded = self.node_definitions(node, stack)
if included:
for scope in self.scope_finder.determine_scope(stack):
self.scoped_terms[scope].extend(included)
self.scoped_terms['EXCLUDED'].extend(excluded)
for child in node.children:
self.look_for_defs(child, stack)
def pre_process(self):
"""Step through every node in the tree, finding definitions. Also keep
track of which subpart we are in. Finally, document all defined terms.
"""
self.scope_finder.add_subparts(self.tree)
self.look_for_defs(self.tree)
referenced = self.layer['referenced']
for scope in self.scoped_terms:
for ref in self.scoped_terms[scope]:
key = ref.term + ":" + ref.label
if (key not in referenced or # New term
# Or this term is earlier in the paragraph
ref.start < referenced[key]['position'][0]):
referenced[key] = {
'term': ref.term,
'reference': ref.label,
'position': ref.position
}
def applicable_terms(self, label):
"""Find all terms that might be applicable to nodes with this label.
Note that we don't have to deal with subparts as subpart_scope simply
applies the definition to all sections in a subpart"""
applicable_terms = {}
for segment_length in range(1, len(label) + 1):
scope = tuple(label[:segment_length])
for ref in self.scoped_terms.get(scope, []):
applicable_terms[ref.term] = ref # overwrites
return applicable_terms
def is_exclusion(self, term, node):
"""Some definitions are exceptions/exclusions of a previously
defined term. At the moment, we do not want to include these as they
would replace previous (correct) definitions. We also remove terms
which are inside an instance of the IGNORE_DEFINITIONS_IN setting"""
applicable_terms = self.applicable_terms(node.label)
if term in applicable_terms:
regex = 'the term .?' + re.escape(term) + '.? does not include'
if re.search(regex, node.text.lower()):
return True
for start, end in self.ignored_offsets(node.label[0], node.text):
if term in node.text[start:end]:
return True
return False
def node_definitions(self, node, stack=None):
"""Find defined terms in this node's text."""
references = []
stack = stack or ParentStack()
for finder in (def_finders.ExplicitIncludes(),
def_finders.SmartQuotes(stack),
def_finders.ScopeMatch(self.scope_finder),
def_finders.XMLTermMeans(references),
def_finders.DefinitionKeyterm(stack.parent_of(node))):
# Note that `extend` is very important as XMLTermMeans uses the
# list reference
references.extend(finder.find(node))
references = [r for r in references if len(r.term) <= MAX_TERM_LENGTH]
return (
[r for r in references if not self.is_exclusion(r.term, node)],
[r for r in references if self.is_exclusion(r.term, node)])
def process(self, node):
"""Determine which (if any) definitions would apply to this node,
then find if any of those terms appear in this node"""
applicable_terms = self.applicable_terms(node.label)
layer_el = []
# Remove any definitions defined in this paragraph
term_list = [
(term, ref) for term, ref in applicable_terms.iteritems()
if ref.label != node.label_id()]
exclusions = self.excluded_offsets(node)
matches = self.calculate_offsets(node.text, term_list, exclusions)
matches = sorted(matches, key=lambda(term, r, o): term)
for term, ref, offsets in matches:
layer_el.append({
"ref": ref.term + ':' + ref.label,
"offsets": offsets
})
return layer_el
def _word_matches(self, term, text):
"""Return the start and end indexes of the term within the text,
accounting for word boundaries"""
# @todo - this is rather slow -- probably want to memoize the results
regex = re.escape(term)
if self.STARTS_WITH_WORDCHAR.match(term):
regex = r'\b' + regex
if self.ENDS_WITH_WORDCHAR.match(term):
regex += r'\b'
regex = re.compile(regex)
return [(match.start(), match.end())
for match in regex.finditer(text)]
def ignored_offsets(self, cfr_part, text):
"""Return a list of offsets corresponding to the presence of an
"ignored" phrase in the text"""
ignored_phrases = (settings.IGNORE_DEFINITIONS_IN.get('ALL', []) +
settings.IGNORE_DEFINITIONS_IN.get(cfr_part, []))
positions = []
for phrase in ignored_phrases:
positions.extend(self._word_matches(phrase, text))
return positions
def excluded_offsets(self, node):
"""We explicitly exclude certain chunks of text (for example, words
we are defining shouldn't have links appear within the defined
term.) More will be added in the future"""
exclusions = []
for reflist in self.scoped_terms.values():
exclusions.extend(
ref.position for ref in reflist
if ref.label == node.label_id())
exclusions.extend(self.ignored_offsets(node.label[0], node.text))
return exclusions
def calculate_offsets(self, text, applicable_terms, exclusions=[],
inclusions=[]):
"""Search for defined terms in this text, including singular and
plural forms of these terms, with a preference for all larger
(i.e. containing) terms."""
# don't modify the original
exclusions = list(exclusions)
inclusions = list(inclusions)
# add singulars and plurals to search terms
search_terms = set((inflection.singularize(t[0]), t[1])
for t in applicable_terms)
search_terms |= set((inflection.pluralize(t[0]), t[1])
for t in applicable_terms)
# longer terms first
search_terms = sorted(search_terms, key=lambda x: len(x[0]),
reverse=True)
matches = []
for term, ref in search_terms:
re_term = ur'\b' + re.escape(term) + ur'\b'
offsets = [
(m.start(), m.end())
for m in re.finditer(re_term, text.lower())]
safe_offsets = []
for start, end in offsets:
# Start is contained in an existing def
if any(start >= e[0] and start <= e[1] for e in exclusions):
continue
# End is contained in an existing def
if any(end >= e[0] and end <= e[1] for e in exclusions):
continue
safe_offsets.append((start, end))
if not safe_offsets:
continue
exclusions.extend(safe_offsets)
matches.append((term, ref, safe_offsets))
return matches
|
|
# Copyright (C) 2015 Pure Storage, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import ddt
import mock
from oslo_utils import timeutils
from cinder import context as ctxt
from cinder.db.sqlalchemy import models
from cinder.image import cache as image_cache
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
@ddt.ddt
class ImageVolumeCacheTestCase(test.TestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.mock_db = mock.Mock()
self.mock_volume_api = mock.Mock()
self.context = ctxt.get_admin_context()
self.volume = models.Volume()
vol_params = {'id': fake.VOLUME_ID,
'host': 'foo@bar#whatever',
'cluster_name': 'cluster',
'size': 0}
self.volume.update(vol_params)
self.volume_ovo = objects.Volume(self.context, **vol_params)
def _build_cache(self, max_gb=0, max_count=0):
cache = image_cache.ImageVolumeCache(self.mock_db,
self.mock_volume_api,
max_gb,
max_count)
cache.notifier = self.notifier
return cache
def _build_entry(self, size=10):
entry = {
'id': 1,
'host': 'test@foo#bar',
'cluster_name': 'cluster@foo#bar',
'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2',
'image_updated_at': timeutils.utcnow(with_timezone=True),
'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b',
'size': size,
'last_used': timeutils.utcnow(with_timezone=True)
}
return entry
def test_get_by_image_volume(self):
cache = self._build_cache()
ret = {'id': 1}
volume_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.mock_db.image_volume_cache_get_by_volume_id.return_value = ret
entry = cache.get_by_image_volume(self.context, volume_id)
self.assertEqual(ret, entry)
self.mock_db.image_volume_cache_get_by_volume_id.return_value = None
entry = cache.get_by_image_volume(self.context, volume_id)
self.assertIsNone(entry)
def test_evict(self):
cache = self._build_cache()
entry = self._build_entry()
cache.evict(self.context, entry)
self.mock_db.image_volume_cache_delete.assert_called_once_with(
self.context,
entry['volume_id']
)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.evict', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(entry['host'], msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
@ddt.data(True, False)
def test_get_entry(self, clustered):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': entry['image_updated_at']
}
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = entry
if not clustered:
self.volume_ovo.cluster_name = None
expect = {'host': self.volume.host}
else:
expect = {'cluster_name': self.volume.cluster_name}
found_entry = cache.get_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertDictEqual(entry, found_entry)
(self.mock_db.
image_volume_cache_get_and_update_last_used.assert_called_once_with)(
self.context,
entry['image_id'],
**expect
)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.hit', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(entry['host'], msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_get_entry_not_exists(self):
cache = self._build_cache()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': timeutils.utcnow(with_timezone=True)
}
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = None
found_entry = cache.get_entry(self.context,
self.volume_ovo,
image_id,
image_meta)
self.assertIsNone(found_entry)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(image_id, msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
@mock.patch('cinder.objects.Volume.get_by_id')
def test_get_entry_needs_update(self, mock_volume_by_id):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': entry['image_updated_at'] + timedelta(hours=2)
}
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = entry
mock_volume = mock.MagicMock()
mock_volume_by_id.return_value = mock_volume
found_entry = cache.get_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
# Expect that the cache entry is not returned and the image-volume
# for it is deleted.
self.assertIsNone(found_entry)
self.mock_volume_api.delete.assert_called_with(self.context,
mock_volume)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_create_cache_entry(self):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'updated_at': entry['image_updated_at']
}
self.mock_db.image_volume_cache_create.return_value = entry
created_entry = cache.create_cache_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertEqual(entry, created_entry)
self.mock_db.image_volume_cache_create.assert_called_once_with(
self.context,
self.volume_ovo.host,
self.volume_ovo.cluster_name,
entry['image_id'],
entry['image_updated_at'].replace(tzinfo=None),
self.volume_ovo.id,
self.volume_ovo.size
)
def test_ensure_space_unlimited(self):
cache = self._build_cache(max_gb=0, max_count=0)
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
self.volume.size = 500
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
def test_ensure_space_no_entries(self):
cache = self._build_cache(max_gb=100, max_count=10)
self.mock_db.image_volume_cache_get_all.return_value = []
self.volume_ovo.size = 5
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.volume_ovo.size = 101
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
def test_ensure_space_need_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=12)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
entry3 = self._build_entry(size=10)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 15
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
mock_delete.assert_any_call(self.context, entry3)
def test_ensure_space_need_count(self):
cache = self._build_cache(max_gb=30, max_count=2)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=10)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 12
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(1, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
def test_ensure_space_need_gb_and_count(self):
cache = self._build_cache(max_gb=30, max_count=3)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=10)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
entry3 = self._build_entry(size=12)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 16
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
mock_delete.assert_any_call(self.context, entry3)
def test_ensure_space_cant_free_enough_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = list(self._build_entry(size=25))
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 50
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
mock_delete.assert_not_called()
|
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings, assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(gamma='scale', kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_true(hasattr(clf, "coef_") == (k == 'linear'))
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(gamma='scale', kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(gamma='scale', kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(gamma='scale', kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM(gamma='scale')
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [-1, -1, -1])
assert_equal(pred.dtype, np.dtype('intp'))
assert_array_almost_equal(clf.intercept_, [-1.117], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.681, 0.139, 0.68, 0.14, 0.68, 0.68]],
decimal=3)
assert_raises(AttributeError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_oneclass_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf = svm.OneClassSVM(gamma=1).fit(X_train)
assert_array_equal(clf.score_samples([[2., 2.]]),
clf.decision_function([[2., 2.]]) + clf.offset_)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(gamma='scale', probability=True, random_state=0,
C=1.0), svm.NuSVC(gamma='scale', probability=True,
random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_weight():
# Test class weights
clf = svm.SVC(gamma='scale', class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC(gamma="scale")):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC(gamma="scale")
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC(gamma="scale")
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
@ignore_warnings(category=UndefinedMetricWarning)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test:
# class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(gamma='scale', C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(gamma='scale', nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(gamma="scale"), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC(gamma="scale")
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC(gamma="scale").fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC(gamma="scale")
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=u'linear', probability=True)
clf.fit(X, Y)
clf.predict_proba(T)
svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel=u'linear',
random_seed=0)
# Test default behavior on both versions
clf = svm.SVC(gamma='scale', kernel='linear', probability=True)
clf.fit(X, Y)
clf.predict_proba(T)
svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(gamma='scale', kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(gamma='scale', kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC(gamma="scale")
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR(gamma='scale')
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svm_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(random_state=0, max_iter=2)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
lsvr = svm.LinearSVR(random_state=0, max_iter=2)
assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target)
assert_equal(lsvr.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(gamma='scale', probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(gamma='scale', probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(gamma='scale',
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert_greater(np.min(pred_class_deci_val), 0.0)
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]))
def test_gamma_auto():
X, y = [[0.0, 1.2], [1.0, 1.3]], [0, 1]
msg = ("The default value of gamma will change from 'auto' to 'scale' in "
"version 0.22 to account better for unscaled features. Set gamma "
"explicitly to 'auto' or 'scale' to avoid this warning.")
assert_warns_message(FutureWarning, msg,
svm.SVC().fit, X, y)
assert_no_warnings(svm.SVC(kernel='linear').fit, X, y)
assert_no_warnings(svm.SVC(kernel='precomputed').fit, X, y)
def test_gamma_scale():
X, y = [[0.], [1.]], [0, 1]
clf = svm.SVC(gamma='scale')
assert_no_warnings(clf.fit, X, y)
assert_equal(clf._gamma, 2.)
# X_std ~= 1 shouldn't raise warning, for when
# gamma is not explicitly set.
X, y = [[1, 2], [3, 2 * np.sqrt(6) / 3 + 2]], [0, 1]
assert_no_warnings(clf.fit, X, y)
|
|
#MIT License
#Copyright (c) 2017 Tim Wentzlau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Web socket ipc used by the web ui to communicate with Kervi """
import time
import inspect
import json
from kervi.spine import Spine
import kervi.utility.nethelper as nethelper
from kervi.core.authentication import Authorization
import kervi.utility.encryption as encryption
#from kervi.utility.kerviThread import KerviThread
from autobahn.asyncio.websocket import WebSocketServerProtocol
class _ObjectEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return self.default(obj.to_json())
elif hasattr(obj, "__dict__"):
data = dict(
(key, value)
for key, value in inspect.getmembers(obj)
if not key.startswith("__")
and not inspect.isabstract(value)
and not inspect.isbuiltin(value)
and not inspect.isfunction(value)
and not inspect.isgenerator(value)
and not inspect.isgeneratorfunction(value)
and not inspect.ismethod(value)
and not inspect.ismethoddescriptor(value)
and not inspect.isroutine(value)
)
return self.default(data)
return obj
class _WebCommandHandler(object):
def __init__(self, command, protocol):
self.protocol = protocol
self.command = command
spine = Spine()
#print("rc", command)
spine.register_command_handler(command, self.on_command, injected="socketSpine")
def on_command(self, *args, **kwargs):
injected = kwargs.get("injected", "")
if not injected == "socketSpine":
jsonres = json.dumps({"messageType":"command", "command":self.command, "args":args}, ensure_ascii=False).encode('utf8')
self.protocol.sendMessage(jsonres, False)
class _WebQueryHandler(object):
def __init__(self, query, protocol):
self.protocol = protocol
self.query = query
spine = Spine()
#print("rq:", query)
spine.register_query_handler(query, self.on_query, injected="socketSpine")
def on_query(self, *args, **kwargs):
injected = kwargs.get("injected", "")
if not injected == "socketSpine":
jsonres=json.dumps({"messageType":"query", "query":self.query, "args":args}, ensure_ascii=False).encode('utf8')
self.protocol.sendMessage(jsonres, False)
class _WebEventHandler(object):
def __init__(self, event, id_event, protocol):
self.protocol = protocol
self.event = event
self.id_event = id_event
self.spine = Spine()
#print("re", event, id_event)
self.spine.register_event_handler(event, self.on_event, id_event, injected="socketSpine")
def on_event(self, id_event, *args, **kwargs):
injected = kwargs.get("injected", "")
groups = kwargs.get("groups", None)
process_id = kwargs.get("process_id", None)
self.spine.log.debug("WS relay event:{0} injected:{1}", self.event, injected)
authorized = True
if self.protocol.user != None and self.protocol.user["groups"] != None and groups != None and len(groups) > 0:
for group in groups:
if group in self.protocol.user["groups"]:
break
else:
authorized = False
if authorized and self.protocol.authenticated and not injected == "socketSpine":
cmd = {"messageType":"event", "event":self.event, "id":id_event, "args":args}
jsonres = json.dumps(cmd, cls=_ObjectEncoder, ensure_ascii=False).encode('utf8')
#if self.event=="userLogMessage":
# print("wum", id_event, process_id, injected, jsonres)
self.protocol.sendMessage(jsonres, False)
class _SpineProtocol(WebSocketServerProtocol):
def __init__(self):
self.spine = Spine()
WebSocketServerProtocol.__init__(self)
self.handlers = {"command":[], "query":[], "event":[]}
self.authenticated = False
self.session = None
self.user = None
self._authorization = Authorization()
def add_command_handler(self, command):
found = False
for command_handler in self.handlers["command"]:
if command_handler.command == command:
found = True
if not found:
self.handlers["command"] += [_WebCommandHandler(command, self)]
def add_query_handler(self, query):
found = False
for query_handler in self.handlers["query"]:
if query_handler.query == query:
found = True
if not found:
self.handlers["query"] += [_WebQueryHandler(query, self)]
def add_event_handler(self, event, id_event):
#print("ah", event, id_event)
found = False
for event_handler in self.handlers["event"]:
if event_handler.event == event and event_handler.id_event == id_event:
found = True
if not found:
self.handlers["event"] += [_WebEventHandler(event, id_event, self)]
def send_response(self, id, response, state="ok", message=""):
res = {
"id":id,
"messageType":"response",
"state":state,
"message":message,
"response":response
}
jsonres = json.dumps(res, ensure_ascii=False).encode('utf8')
self.sendMessage(jsonres, False)
def onConnect(self, request):
#print("Web socket Client connecting: {}".format(request.peer))
pass
def onOpen(self):
if self._authorization.active:
res = {
"messageType":"authenticate"
}
else:
self.authenticated = True
res = {
"messageType":"session_authenticated",
"session":"123456",
}
jsonres = json.dumps(res, ensure_ascii=False).encode('utf8')
self.sendMessage(jsonres, False)
def onMessage(self, payload, is_binary):
try:
obj = json.loads(payload.decode('utf8'))
if obj["messageType"] == "authenticate":
session, user = self._authorization.authorize(obj["userName"], obj["password"])
if session is None:
print("authorization failed for:", obj["userName"])
res = {
"messageType":"authentication_failed",
}
#self.close()
else:
self.session = session
self.user = user
self.authenticated = True
res = {
"messageType":"session_authenticated",
"session":session,
}
jsonres = json.dumps(res, ensure_ascii=False).encode('utf8')
self.sendMessage(jsonres, False)
elif obj["messageType"] == "logoff":
self.authenticated = False
self.session = None
self._authorization.remove_session(obj["session"])
res = {
"messageType":"session_logoff"
}
jsonres = json.dumps(res, ensure_ascii=False).encode('utf8')
self.sendMessage(jsonres, False)
else:
self.spine.log.debug("WS onMessage:{0}", obj)
if not self.authenticated:
pass
elif obj["messageType"] == "query":
res = self.spine.send_query(obj["query"], *obj["args"], injected="socketSpine", session=self.user)
self.spine.log.debug("query response:{0}", res)
self.send_response(obj["id"], res)
elif obj["messageType"] == "registerQueryHandler":
self.add_query_handler(obj["query"])
self.send_response(None, None)
elif obj["messageType"] == "command":
self.spine.send_command(obj["command"], *obj["args"], injected="socketSpine", session=self.user)
self.send_response(obj["id"], None)
elif obj["messageType"] == "registerCommandHandler":
self.add_command_handler(obj["command"])
self.send_response(obj["id"], None)
elif obj["messageType"] == "event":
self.spine.trigger_event(
obj["event"], obj["id"],
obj["args"],
injected="socketSpine"
)
self.send_response(obj["id"], None)
elif obj["messageType"] == "registerEventHandler":
self.add_event_handler(obj["event"], obj["eventId"])
self.send_response(obj["id"], None)
except:
self.spine.log.exception("WS onMessage exception")
#res={"execptionType":exc_type,"value":exc_value,"traceback":exc_traceback}
#self.sendResponse(res,"exception")
class SocketSpine:
def __init__(self, config):
coro = None
self._started = False
self._config = config
self._spine = Spine()
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio.websocket import WebSocketServerFactory
ssl_context = None
if encryption.enabled():
print("socket using ssl")
cert_file, key_file = encryption.get_cert()
try:
import ssl
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.load_cert_chain(cert_file, key_file)
print("socket ssl found")
except:
ssl_context = None
print("socket failed to use ssl")
self._spine.log.debug(
"start websocket on:{0}, port:{1}",
self._config.network.ip,
self._config.network.ws_port
)
print("start websocket: ", self._config.network.ip, self._config.network.ws_port)
self.factory = WebSocketServerFactory()
self.factory.protocol = _SpineProtocol
self.loop = asyncio.get_event_loop()
self.coro = self.loop.create_server(
self.factory,
self._config.network.ip,
self._config.network.ws_port,
ssl=ssl_context
)
def start_socket(self):
self.loop.run_until_complete(self.coro)
self._started = True
def step(self):
if self._started:
self.loop.run_until_complete(self.coro)
#loop.run_until_complete(coro_local)
time.sleep(.001)
|
|
# -*- coding: utf8 -*-
import mock
import os
import random
import string
import unittest
from .utils import placebo_session
from zappa.cli import ZappaCLI
from zappa.handler import LambdaHandler
from zappa.utilities import (add_event_source, remove_event_source)
from zappa.core import Zappa
def random_string(length):
return ''.join(random.choice(string.printable) for _ in range(length))
class TestZappa(unittest.TestCase):
def setUp(self):
self.sleep_patch = mock.patch('time.sleep', return_value=None)
# Tests expect us-east-1.
# If the user has set a different region in env variables, we set it aside for now and use us-east-1
self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.start()
def tearDown(self):
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.stop()
del os.environ['AWS_DEFAULT_REGION']
if self.users_current_region_name is not None:
# Give the user their AWS region back, we're done testing with us-east-1.
os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name
@placebo_session
def test_upload_remove_s3(self, session):
bucket_name = 'test_zappa_upload_s3'
z = Zappa(session)
zip_path = z.create_lambda_zip(minify=False)
res = z.upload_to_s3(zip_path, bucket_name)
self.assertTrue(res)
s3 = session.resource('s3')
# will throw ClientError with 404 if bucket doesn't exist
s3.meta.client.head_bucket(Bucket=bucket_name)
# will throw ClientError with 404 if object doesn't exist
s3.meta.client.head_object(
Bucket=bucket_name,
Key=zip_path,
)
res = z.remove_from_s3(zip_path, bucket_name)
self.assertTrue(res)
fail = z.upload_to_s3('/tmp/this_isnt_real', bucket_name)
self.assertFalse(fail)
#Will graciouly handle quirky S3 behavior on 'us-east-1' region name'
z.aws_region = 'us-east-1'
res = z.upload_to_s3(zip_path, bucket_name)
os.remove(zip_path)
self.assertTrue(res)
@placebo_session
def test_copy_on_s3(self, session):
bucket_name = 'test_zappa_upload_s3'
z = Zappa(session)
zip_path = z.create_lambda_zip(minify=False)
res = z.upload_to_s3(zip_path, bucket_name)
self.assertTrue(res)
s3 = session.resource('s3')
# will throw ClientError with 404 if bucket doesn't exist
s3.meta.client.head_bucket(Bucket=bucket_name)
# will throw ClientError with 404 if object doesn't exist
s3.meta.client.head_object(
Bucket=bucket_name,
Key=zip_path,
)
zp = 'copy_' + zip_path
res = z.copy_on_s3(zip_path, zp, bucket_name)
os.remove(zip_path)
self.assertTrue(res)
@placebo_session
def test_create_lambda_function_s3(self, session):
bucket_name = 'lmbda'
zip_path = 'Spheres-dev-1454694878.zip'
z = Zappa(session)
z.aws_region = 'us-east-1'
z.load_credentials(session)
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
arn = z.create_lambda_function(
bucket=bucket_name,
s3_key=zip_path,
function_name='test_lmbda_function55',
handler='runme.lambda_handler'
)
arn = z.update_lambda_function(
bucket=bucket_name,
s3_key=zip_path,
function_name='test_lmbda_function55',
)
@placebo_session
def test_create_lambda_function_local(self, session):
bucket_name = 'lmbda'
local_file = 'Spheres-dev-1454694878.zip'
z = Zappa(session)
z.aws_region = 'us-east-1'
z.load_credentials(session)
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
arn = z.create_lambda_function(
bucket=bucket_name,
local_zip=local_file,
function_name='test_lmbda_function55',
handler='runme.lambda_handler'
)
arn = z.update_lambda_function(
bucket=bucket_name,
local_zip=local_file,
function_name='test_lmbda_function55',
)
@placebo_session
def test_rollback_lambda_function_version(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
function_name = 'django-helloworld-unicode'
too_many_versions = z.rollback_lambda_function_version(function_name, 99999)
self.assertFalse(too_many_versions)
function_arn = z.rollback_lambda_function_version(function_name, 1)
@placebo_session
def test_invoke_lambda_function(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
function_name = 'django-helloworld-unicode'
payload = '{"event": "hello"}'
response = z.invoke_lambda_function(function_name, payload)
@placebo_session
def test_create_iam_roles(self, session):
z = Zappa(session)
arn, updated = z.create_iam_roles()
self.assertEqual(arn, "arn:aws:iam::123:role/{}".format(z.role_name))
@placebo_session
def test_get_api_url(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution'
url = z.get_api_url('Spheres-demonstration', 'demonstration')
@placebo_session
def test_fetch_logs(self, session):
z = Zappa(session)
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
events = z.fetch_logs('Spheres-demonstration')
self.assertTrue(events is not None)
##
# Handler
##
@placebo_session
def test_handler(self, session):
# Init will test load_remote_settings
lh = LambdaHandler('test_settings', session=session)
# Annoyingly, this will fail during record, but
# the result will actually be okay to use in playback.
# See: https://github.com/garnaat/placebo/issues/48
self.assertEqual(os.environ['hello'], 'world')
event = {
"body": {},
"headers": {},
"params": {
"parameter_1": "asdf1",
"parameter_2": "asdf2",
},
"method": "GET",
"query": {}
}
lh.handler(event, None)
# Test scheduled event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'detail-type': u'Scheduled Event',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test command event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'command': u'test_settings.command',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test command for async event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'command': u'zappa.async.route_lambda_task',
u'task_path': u'tests.test_app.async_me',
u'args': [u'xxx'],
u'kwargs': {},
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
}
self.assertEqual('run async when on lambda xxx', lh.handler(event, None))
event[u'kwargs'] = {'foo': 'bar'}
self.assertEqual('run async when on lambda xxxbar', lh.handler(event, None))
# Test raw_command event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'raw_command': u'print("check one two")',
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
lh.handler(event, None)
# Test AWS S3 event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'Records': [{'s3': {'configurationId': 'test_project:test_settings.aws_s3_event'}}],
u'source': u'aws.events',
u'version': u'0',
u'time': u'2016-05-10T21:05:39Z',
u'id': u'0d6a6db0-d5e7-4755-93a0-750a8bf49d55',
u'resources': [u'arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me']
}
self.assertEqual("AWS S3 EVENT", lh.handler(event, None))
# Test AWS SNS event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'Records': [
{
u'EventVersion': u'1.0',
u'EventSource': u'aws:sns',
u'EventSubscriptionArn': u'arn:aws:sns:EXAMPLE',
u'Sns': {
u'SignatureVersion': u'1',
u'Timestamp': u'1970-01-01T00:00:00.000Z',
u'Signature': u'EXAMPLE',
u'SigningCertUrl': u'EXAMPLE',
u'MessageId': u'95df01b4-ee98-5cb9-9903-4c221d41eb5e',
u'Message': u'Hello from SNS!',
u'Subject': u'TestInvoke',
u'Type': u'Notification',
u'UnsubscribeUrl': u'EXAMPLE',
u'TopicArn': u'arn:aws:sns:1',
u'MessageAttributes': {
u'Test': {u'Type': u'String', u'Value': u'TestString'},
u'TestBinary': {u'Type': u'Binary', u'Value': u'TestBinary'}
}
}
}
]
}
self.assertEqual("AWS SNS EVENT", lh.handler(event, None))
# Test AWS SNS event
event = {
u'account': u'72333333333',
u'region': u'us-east-1',
u'detail': {},
u'Records': [
{
u'EventVersion': u'1.0',
u'EventSource': u'aws:sns',
u'EventSubscriptionArn': u'arn:aws:sns:EXAMPLE',
u'Sns': {
u'SignatureVersion': u'1',
u'Timestamp': u'1970-01-01T00:00:00.000Z',
u'Signature': u'EXAMPLE',
u'SigningCertUrl': u'EXAMPLE',
u'MessageId': u'95df01b4-ee98-5cb9-9903-4c221d41eb5e',
u'Message': u'{"args": ["arg1", "arg2"], "command": "zappa.async.route_sns_task", '
u'"task_path": "test_settings.aws_async_sns_event", "kwargs": {"arg3": "varg3"}}',
u'Subject': u'TestInvoke',
u'Type': u'Notification',
u'UnsubscribeUrl': u'EXAMPLE',
u'MessageAttributes': {
u'Test': {u'Type': u'String', u'Value': u'TestString'},
u'TestBinary': {u'Type': u'Binary', u'Value': u'TestBinary'}
}
}
}
]
}
self.assertEqual("AWS ASYNC SNS EVENT", lh.handler(event, None))
# Test AWS DynamoDB event
event = {
u'Records': [
{
u'eventID': u'1',
u'eventVersion': u'1.0',
u'dynamodb': {
u'Keys': {u'Id': {u'N': u'101'}},
u'NewImage': {u'Message': {u'S': u'New item!'}, u'Id': {u'N': u'101'}},
u'StreamViewType': u'NEW_AND_OLD_IMAGES',
u'SequenceNumber': u'111', u'SizeBytes': 26
},
u'awsRegion': u'us-west-2',
u'eventName': u'INSERT',
u'eventSourceARN': u'arn:aws:dynamodb:1',
u'eventSource': u'aws:dynamodb'
}
]
}
self.assertEqual("AWS DYNAMODB EVENT", lh.handler(event, None))
# Test AWS kinesis event
event = {
u'Records': [
{
u'eventID': u'shardId-000000000000:49545115243490985018280067714973144582180062593244200961',
u'eventVersion': u'1.0',
u'kinesis': {
u'partitionKey': u'partitionKey-3',
u'data': u'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=',
u'kinesisSchemaVersion': u'1.0',
u'sequenceNumber': u'49545115243490985018280067714973144582180062593244200961'
},
u'invokeIdentityArn': u'arn:aws:iam::EXAMPLE',
u'eventName': u'aws:kinesis:record',
u'eventSourceARN': u'arn:aws:kinesis:1',
u'eventSource': u'aws:kinesis',
u'awsRegion': u'us-east-1'
}
]
}
self.assertEqual("AWS KINESIS EVENT", lh.handler(event, None))
# Test AWS SQS event
event = {
u"Records": [
{
u"messageId": u"c80e8021-a70a-42c7-a470-796e1186f753",
u"receiptHandle": u"AQEBJQ+/u6NsnT5t8Q/VbVxgdUl4TMKZ5FqhksRdIQvLBhwNvADoBxYSOVeCBXdnS9P+erlTtwEALHsnBXynkfPLH3BOUqmgzP25U8kl8eHzq6RAlzrSOfTO8ox9dcp6GLmW33YjO3zkq5VRYyQlJgLCiAZUpY2D4UQcE5D1Vm8RoKfbE+xtVaOctYeINjaQJ1u3mWx9T7tork3uAlOe1uyFjCWU5aPX/1OHhWCGi2EPPZj6vchNqDOJC/Y2k1gkivqCjz1CZl6FlZ7UVPOx3AMoszPuOYZ+Nuqpx2uCE2MHTtMHD8PVjlsWirt56oUr6JPp9aRGo6bitPIOmi4dX0FmuMKD6u/JnuZCp+AXtJVTmSHS8IXt/twsKU7A+fiMK01NtD5msNgVPoe9JbFtlGwvTQ==",
u"body": u"{\"foo\":\"bar\"}",
u"attributes": {
u"ApproximateReceiveCount": u"3",
u"SentTimestamp": u"1529104986221",
u"SenderId": u"594035263019",
u"ApproximateFirstReceiveTimestamp": u"1529104986230"
},
u"messageAttributes": {},
u"md5OfBody": u"9bb58f26192e4ba00f01e2e7b136bbd8",
u"eventSource": u"aws:sqs",
u"eventSourceARN": u"arn:aws:sqs:1",
u"awsRegion": u"us-east-1"
}
]
}
self.assertEqual("AWS SQS EVENT", lh.handler(event, None))
# Test Authorizer event
event = {u'authorizationToken': u'hubtoken1', u'methodArn': u'arn:aws:execute-api:us-west-2:1234:xxxxx/dev/GET/v1/endpoint/param', u'type': u'TOKEN'}
self.assertEqual("AUTHORIZER_EVENT", lh.handler(event, None))
# Ensure Zappa does return 401 if no function was defined.
lh.settings.AUTHORIZER_FUNCTION = None
with self.assertRaisesRegexp(Exception, 'Unauthorized'):
lh.handler(event, None)
# Unhandled event
event = {
u'Records': [
{
u'eventID': u'shardId-000000000000:49545115243490985018280067714973144582180062593244200961',
u'eventVersion': u'1.0',
u'kinesis': {
u'partitionKey': u'partitionKey-3',
u'data': u'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=',
u'kinesisSchemaVersion': u'1.0',
u'sequenceNumber': u'49545115243490985018280067714973144582180062593244200961'
},
u'eventSourceARN': u'bad:arn:1',
}
]
}
self.assertIsNone(lh.handler(event, None))
##
# CLI
##
@placebo_session
def test_cli_aws(self, session):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.api_key_required = True
zappa_cli.authorization_type = 'NONE'
zappa_cli.load_settings('test_settings.json', session)
zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
zappa_cli.deploy()
zappa_cli.update()
zappa_cli.rollback(1)
zappa_cli.tail(since=0, filter_pattern='', keep_open=False)
zappa_cli.schedule()
zappa_cli.unschedule()
zappa_cli.undeploy(no_confirm=True, remove_logs=True)
@placebo_session
def test_cli_aws_status(self, session):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json', session)
zappa_cli.api_stage = 'devor'
zappa_cli.lambda_name = 'baby-flask-devor'
zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
resp = zappa_cli.status()
##
# Let's Encrypt / ACME
##
##
# Django
##
##
# Util / Misc
##
@placebo_session
def test_add_event_source(self, session):
event_source = {'arn': 'blah:blah:blah:blah', 'events': [
"s3:ObjectCreated:*"
]}
# Sanity. This should fail.
try:
es = add_event_source(event_source, 'blah:blah:blah:blah', 'test_settings.callback', session)
self.fail("Success should have failed.")
except ValueError:
pass
event_source = {'arn': 's3:s3:s3:s3', 'events': [
"s3:ObjectCreated:*"
]}
add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
# get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True)
@placebo_session
def test_cognito_trigger(self, session):
z = Zappa(session)
z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test')
@placebo_session
def test_cognito_trigger_existing(self, session):
z = Zappa(session)
z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test')
@placebo_session
def test_cli_cognito_triggers(self, session):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.api_key_required = True
zappa_cli.load_settings('test_settings.json', session)
zappa_cli.lambda_arn = 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test'
zappa_cli.update_cognito_triggers()
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 17888 if testnet else 7888
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util functions for the numpy module."""
import ctypes
from ..util import is_np_array, is_np_shape
from ..base import _LIB, check_call, string_types, c_str_array
from ..base import c_handle_array, c_str, mx_uint, NDArrayHandle, py_str
from ..dlpack import ndarray_to_dlpack_for_read, ndarray_to_dlpack_for_write
from ..dlpack import ndarray_from_dlpack, ndarray_from_numpy
from ..numpy import ndarray, array
__all__ = ['save', 'load', 'to_dlpack_for_read', 'to_dlpack_for_write',
'from_dlpack', 'from_numpy']
def save(file, arr):
"""Saves a list of `ndarray`s or a dict of `str`->`ndarray` to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
file : str
Filename to which the data is saved.
arr : `ndarray` or list of `ndarray`s or dict of `str` to `ndarray`
The data to be saved.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot save `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if isinstance(arr, ndarray):
arr = [arr]
if isinstance(arr, dict):
str_keys = arr.keys()
nd_vals = arr.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, ndarray) for v in nd_vals):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(arr, list):
if any(not isinstance(v, ndarray) for v in arr):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = None
handles = c_handle_array(arr)
else:
raise ValueError("data needs to either be a ndarray, dict of (str, ndarray) pairs "
"or a list of ndarrays.")
check_call(_LIB.MXNDArraySave(c_str(file),
mx_uint(len(handles)),
handles,
keys))
def load(file):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
file : str
The filename.
Returns
-------
result : list of ndarrays or dict of str -> ndarray
Data stored in the file.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot load `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if not isinstance(file, string_types):
raise TypeError('file required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(file),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [ndarray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), ndarray(NDArrayHandle(handles[i])))
for i in range(out_size.value))
from_dlpack = ndarray_from_dlpack(ndarray)
from_dlpack_doc = """Returns a np.ndarray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
np.ndarray
an ndarray backed by a dlpack tensor
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> type(z)
<class 'mxnet.numpy.ndarray'>
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
from_dlpack.__doc__ = from_dlpack_doc
from_numpy = ndarray_from_numpy(ndarray, array)
from_numpy_doc = """Returns an MXNet's np.ndarray backed by numpy's ndarray.
When `zero_copy` is set to be true,
this API consumes numpy's ndarray and produces MXNet's np.ndarray
without having to copy the content. In this case, we disallow
users to modify the given numpy ndarray, and it is suggested
not to read the numpy ndarray as well for internal correctness.
Parameters
----------
ndarray: np.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's
np.ndarray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True.
Returns
-------
np.ndarray
a np.ndarray backed by a dlpack tensor
"""
from_numpy.__doc__ = from_numpy_doc
to_dlpack_for_read = ndarray_to_dlpack_for_read()
to_dlpack_for_read_doc = """Returns a reference view of np.ndarray that represents
as DLManagedTensor until all previous write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
"""
to_dlpack_for_read.__doc__ = to_dlpack_for_read_doc
to_dlpack_for_write = ndarray_to_dlpack_for_write()
to_dlpack_for_write_doc = """Returns a reference view of ndarray that represents
as DLManagedTensor until all previous read/write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of np.ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
to_dlpack_for_write.__doc__ = to_dlpack_for_write_doc
|
|
#
# (c) Christian Sommerfeldt OEien
# All rights reserved
import pygame
import sys, code, re
from exceptions import Exception
from traceback import format_exc
from numbers import Complex
from functools import wraps
WHITE = 255, 255, 255
GRAY = 50, 50, 50
BLACK = 0, 0, 0
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
class ConsoleError(Exception): pass
class EvalError(Exception): pass
class Settings(): pass
settings = Settings()
settings.prompt_pen = GREEN, BLACK
settings.input_pen = WHITE, BLACK
settings.string_pen = BLUE, BLACK
settings.number_pen = WHITE, GRAY
settings.repr_pen = GREEN, BLACK
settings.error_pen = RED, BLACK
settings.pagescroll = 10
def trimmed_exc():
t = format_exc().split("\n")
a = []
for s in t[3:]:
m = re.match(r'\s+File ".+?",\s*', s)
a.append(s[len(m.group(0)):] if m else s)
return "\n".join(a)
class Expression:
def __init__(self, source):
self.source = source
def __call__(self, locals):
try: return eval(self.source, locals)
except Exception as e: raise EvalError(trimmed_exc())
class Statement(Expression):
def __call__(self, locals):
try: exec(self.source, locals)
except Exception as e: raise EvalError(trimmed_exc())
class CommentTracker:
def __init__(self, notation):
self.inside = False
self.enter, self.leave = notation
def see(self, c):
if c == self.enter: self.inside = True
if c == self.leave: self.inside = False
outside = property(lambda s: not s.inside)
class QuoteTracker:
def __init__(self, quote):
self.quote = quote
self.inside = False
def see(self, c):
if c == self.quote:
self.inside = not self.inside
outside = property(lambda s: not s.inside)
class LoneTracker:
def __init__(self, e):
self.e = e
self.i = 0
self.found = 0
def see(self, c):
if self.e == c:
if not self.found: self.found = self.i
elif self.found == self.i - 1: self.found = 0
self.i += 1
class ParensTracker:
def __init__(self, *pairs):
self.pairs = pairs
self.stack = []
def see(self, c):
for p in self.pairs:
if c == p[0]: self.stack.append(c)
elif c == p[1]:
if not self.stack:
raise ConsoleError("- nah %s" % (c,))
d = self.stack.pop()
if d != p[0]:
raise ConsoleError("%s nah %s" % (d, c))
class Line:
def __init__(self, s):
assert not "\n" in s
self.chars = list(s)
def __str__(self):
return "".join(["%s" % (c,) for c in self.chars] + ["\n"])
class Source:
def __init__(self):
self.lines = []
def push(self, s):
self.lines.append(Line(s))
def clear(self):
self.lines = []
def __str__(self):
return "".join(["%s" % (l,) for l in self.lines])
def compile(self):
s = str(self)
assert s[-1] == "\n"
if s.isspace(): raise ConsoleError("- yes ?")
squotes = QuoteTracker("'")
dquotes = QuoteTracker('"')
eqalone = LoneTracker("=")
parenth = ParensTracker("()", "[]", "{}")
comment = CommentTracker("#\n")
for i, c in enumerate(s):
comment.see(c)
if comment.outside:
if not squotes.inside: dquotes.see(c)
if not dquotes.inside: squotes.see(c)
if squotes.outside and dquotes.outside:
eqalone.see(c)
parenth.see(c)
if parenth.stack:
return
if s[-2] == "\n":
return Statement(s)
if re.match("(if|for|while|def|class) ", s):
return
if re.match("(pass|import|from) ", s):
return Statement(s)
return (Statement if eqalone.found else Expression)(s)
repr_writer = None
def media_collect(m):
if isinstance(m, Icon): c = "\v"
else: return m
repr_writer.avlist.append(m)
return c
def media(f):
return wraps(f)(lambda self: media_collect(f(self)))
class List(list):
separator = classmethod(lambda c: ", ")
def __repr__(self):
a = []
train = False
for e in self:
w = Writer()
w.media_repr(e)
icon = w.render()
if icon:
if train: a.append(self.separator())
a.append(media_collect(icon))
train = True
return "".join(a)
class HorizontalList(List):
@classmethod
def separator(c):
return media_collect(PIPE)
class VerticalList(List):
@classmethod
def separator(c):
return "\n%c\n" % (media_collect(PIPE),)
class Icon(pygame.Surface):
pass
class Selfrepr:
@media
def __repr__(self):
return self
class PipeFactory(Icon):
WIDTH = 1
HEIGHT = 1
COLOR = (255, 255, 255)
def __init__(self):
#note: Icon is just a camouflage
self.get_size = lambda: (
PipeFactory.WIDTH, PipeFactory.HEIGHT)
@staticmethod
def horizontal(width):
s = Icon((width, PipeFactory.HEIGHT))
s.fill(PipeFactory.COLOR)
return s
@staticmethod
def vertical(height):
s = Icon((PipeFactory.WIDTH, height))
s.fill(PipeFactory.COLOR)
return s
PIPE = PipeFactory()
r = pygame.Rect((0, 0), (8, 8))
emptyrow_icon = Icon(r.size)
pygame.draw.rect(emptyrow_icon, GRAY, r, 2)
class Row:
def __init__(self, y):
self.x = 0
self.y = y
self.icons = []
self.score = []
@property
def width(self):
return sum(i.get_size()[0] for i in self.icons) \
if self.icons else 0
@property
def height(self):
return max(i.get_size()[1] for i in self.icons) \
if self.icons else 0
def put_icon(self, icon):
self.icons.append(icon)
def unput_icon(self):
icon = self.icons.pop()
a, b = icon.get_size()
rubber = Icon((a, b))
rubber.fill((0, 0, 0))
return rubber
def draw(self, surface):
x = self.x
for icon in self.icons:
if icon is PIPE:
icon = PIPE.vertical(self.height)
a, _ = icon.get_size()
surface.blit(icon, (x, self.y))
x += a
def displaced(self, m):
assert self.x == 0, "displaced original"
a, b = m
r = Row(self.y + b)
r.x = a #note: since original is asserted
r.icons = self.icons #note: same one list
return r
class Writer:
def __init__(self):
self.rows = [Row(0)]
self.ink = (210, 255, 210)
self.paper = (0, 40, 0)
self.font = pygame.font.SysFont("monospace", 36, bold = True)
self.avlist = None
row = property(lambda s: s.rows[-1])
put_icon = lambda s, v: s.row.put_icon(v)
def set_color(self, ink, paper):
self.ink = ink
self.paper = paper
def put_text(self, t):
for u in t:
if u == "\n": self.new_row()
else: self.put_icon(self.glyph(u))
def new_row(self):
if not self.row.icons:
self.put_icon(emptyrow_icon)
b = self.row.height
y = self.row.y + b
self.rows.append(Row(y))
def glyph(self, u):
s = self.font.render(u, 1, self.ink)
i = Icon(s.get_size())
i.fill(self.paper)
i.blit(s, (0, 0))
return i
def media_repr(self, value):
if str == type(value): #todo: unicode
self.set_color(*settings.string_pen)
self.put_text(value)
elif isinstance(value, Complex):
self.set_color(*settings.number_pen)
self.put_text(repr(value))
else:
if type(value) == list: value = HorizontalList(value)
if type(value) == tuple: value = VerticalList(value)
self.set_color(*settings.repr_pen)
global repr_writer
prev = repr_writer
repr_writer = self
self.avlist = []
self.zip_media(repr(value), self.avlist)
repr_writer = prev
def zip_media(self, s, b):
a = []
t = []
z = 0
for i, c in enumerate(s):
if c in "\v":
a.append(s[z:i])
t.append(c)
z = i + 1
a.append(s[z:])
self.put_text(a[0])
for j, m in enumerate(b):
if isinstance(m, Icon):
assert t[j] == "\v"
self.put_icon(m)
else: print("%r?" % (m,))
self.put_text(a[j + 1])
def render(self):
width = max(r.width for r in self.rows)
height = sum(r.height for r in self.rows)
if not (width and height): return
icon = Icon((width, height))
for r in self.rows:
if r.icons and r.icons[0] is PIPE:
r.icons[0] = PIPE.horizontal(width)
r.draw(icon)
return icon
|
|
#!/usr/bin/python
import json
import argparse
import sys
import os
import siphash
import time
import binascii
from random import randrange
pusher_cfg = {}
intf_to_port_number = {}
port_number_to_mac = {}
# Utility function to read from configuration file the VLL to create
def read_conf_file():
global pusher_cfg
print "*** Read Configuration File For Vll Pusher"
path = "vll_pusher.cfg"
if os.path.exists(path):
conf = open(path,'r')
pusher_cfg = json.load(conf)
conf.close()
else:
print "No Configuration File Find In %s" % path
sys.exit(-2)
for vll in pusher_cfg['vlls']:
vll['lhs_dpid'] = vll['lhs_dpid'].replace(":","")
vll['rhs_dpid'] = vll['rhs_dpid'].replace(":","")
for pw in pusher_cfg['pws']:
pw['lhs_dpid'] = pw['lhs_dpid'].replace(":","")
pw['rhs_dpid'] = pw['rhs_dpid'].replace(":","")
pw['lhs_mac'] = pw['lhs_mac'].replace(":","")
pw['rhs_mac'] = pw['rhs_mac'].replace(":","")
print "*** PUSHER_CFG", json.dumps(pusher_cfg, sort_keys=True, indent=4)
# Utility function for the vlls persisentce
def store_vll(name, dpid, table):
# Store created vll attributes in local ./vlls.json
datetime = time.asctime()
vllParams = {'name': name, 'Dpid':dpid, 'datetime':datetime, 'table_id':table}
stro = json.dumps(vllParams)
vllsDb = open('./sr_vlls.json','a+')
vllsDb.write(stro+"\n")
vllsDb.close()
# Utility function for the pws persisentce
def store_pw(name, dpid, table):
# Store created pw attributes in local ./pws.json
datetime = time.asctime()
pwParams = {'name': name, 'Dpid':dpid, 'datetime':datetime, 'table_id':table}
stro = json.dumps(pwParams)
pwsDb = open('./sr_pws.json','a+')
pwsDb.write(stro+"\n")
pwsDb.close()
# Utility function to translate intf name to port number
def retrieve_port_number_and_mac(controllerRestIP):
global intf_to_port_number
global port_number_to_mac
command = "curl -s http://%s/v1.0/topology/switches | python -mjson.tool" % (controllerRestIP)
result = os.popen(command).read()
parsedResult = json.loads(result)
default = None
for vll in pusher_cfg['vlls']:
lhs_intf = vll['lhs_intf']
lhs_dpid = vll['lhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (lhs_dpid, lhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == lhs_dpid:
for port in switch["ports"]:
if port["name"] == lhs_intf:
port_number = str(port["port_no"])
intf_to_port_number["%s-%s" % (lhs_dpid, lhs_intf)] = port_number
vll['lhs_intf'] = port_number
rhs_intf = vll['rhs_intf']
rhs_dpid = vll['rhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (rhs_dpid, rhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == rhs_dpid:
for port in switch["ports"]:
if port["name"] == rhs_intf:
port_number = str(port["port_no"])
intf_to_port_number["%s-%s" % (rhs_dpid, rhs_intf)] = port_number
vll['rhs_intf'] = port_number
for pw in pusher_cfg['pws']:
lhs_intf = pw['lhs_intf']
lhs_dpid = pw['lhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (lhs_dpid, lhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == lhs_dpid:
for port in switch["ports"]:
if port["name"] == lhs_intf:
port_number = str(port["port_no"])
intf_to_port_number["%s-%s" % (lhs_dpid, lhs_intf)] = port_number
pw['lhs_intf'] = port_number
rhs_intf = pw['rhs_intf']
rhs_dpid = pw['rhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (rhs_dpid, rhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == rhs_dpid:
for port in switch["ports"]:
if port["name"] == rhs_intf:
port_number = str(port["port_no"])
intf_to_port_number["%s-%s" % (rhs_dpid, rhs_intf)] = port_number
pw['rhs_intf'] = port_number
for switch in parsedResult:
for port in switch["ports"]:
dpid = str(port["dpid"])
port_number = str(port["port_no"])
port_number_to_mac["%s-%s"%(dpid, port_number)] = str(port["hw_addr"])
print "*** PUSHER_CFG", json.dumps(pusher_cfg, sort_keys=True, indent=4)
print "*** INTFS", json.dumps(intf_to_port_number, sort_keys=True, indent=4)
print "*** MACS", json.dumps(port_number_to_mac, sort_keys=True, indent=4)
# Add Vlls Reading All the Information From Configuration File
def add_command(args):
print "*** Add Vlls From Configuration File"
print "*** Read Previous Vlls Inserted"
if os.path.exists('./sr_vlls.json'):
vllsDb = open('./sr_vlls.json','r')
vlllines = vllsDb.readlines()
vllsDb.close()
else:
vlllines={}
if os.path.exists('./sr_pws.json'):
pwsDb = open('./sr_pws.json','r')
pwlines = pwsDb.readlines()
pwsDb.close()
else:
pwlines={}
read_conf_file()
# We use this algorithm for the name generation
key = '0123456789ABCDEF'
sip = siphash.SipHash_2_4(key)
# Extract from cmd line options the controlller information
controllerRestIp = args.controllerRestIp
# Dictionary that stores the mapping port:next_label
# We allocate the label using a counter, and we associate for each port used in this execution the next usable label
# Probably in future we can add the persistence for the label
sw_port_label = {}
retrieve_port_number_and_mac(controllerRestIp)
# Last 3 bits identify the SR-VLL TC
# 0x40000 -> 010|0 0000 0000 0000 0000
default_label_value = 262144
# 0x5FFFF -> 010|1 1111 1111 1111 1111
max_label_value = 393215
# We can have more than one vlls
for vll in pusher_cfg['vlls']:
# Retrieve the information
srcSwitch = vll['lhs_dpid']
srcPort = vll['lhs_intf']
dstSwitch = vll['rhs_dpid']
dstPort = vll['rhs_intf']
srcLabel = vll['lhs_label']
dstLabel = vll['rhs_label']
print "*** Generate Name From VLL (%s-%s-%s) - (%s-%s-%s)" % (srcSwitch, srcPort, srcLabel, dstSwitch, dstPort, dstLabel)
sip.update(srcSwitch + "$" + srcPort + "$" + dstSwitch + "$" + dstPort + "$" + srcLabel + "$" + dstLabel)
# Generate the name
cookie = sip.hash()
cookie = str(cookie)
print "*** Vll Name", cookie
vllExists = False
# if the vll exists in the vllDb, we don't insert the flow
for line in vlllines:
data = json.loads(line)
if data['name']==(cookie):
print "Vll %s exists already Skip" % cookie
vllExists = True
break
if vllExists == True:
continue
print "*** Create Vll:"
print "*** From Source Device OSHI-PE %s Port %s" % (srcSwitch, srcPort)
print "*** To Destination Device OSHI-PE %s port %s"% (dstSwitch, dstPort)
# Retrieving route from source to destination
# using Routing rest API
command = "curl -s http://%s/v1.0/topology/route/%s/%s/%s/%s | python -mjson.tool" % (controllerRestIp, srcSwitch, srcPort, dstSwitch, dstPort)
result = os.popen(command).read()
parsedResult = json.loads(result)
print
#print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
# Dictionary used for store the label of current vll
temp_sw_port_label = {}
if int(srcLabel) > max_label_value or int(dstLabel) > max_label_value:
print "Ingress or Egress Label Not Allowable"
sys.exit(-2)
# We generate the labels associated for the Ingress and Egress Nodes
for j in range(0, (len(parsedResult))):
# Ingress Nodes
if j == 0 and len(parsedResult) > 2:
value = sw_port_label.get(srcSwitch, default_label_value)
temp_sw_port_label[srcSwitch] = int(value)
value = value + 1
sw_port_label[srcSwitch] = value
# Egress Nodes
elif j == (len(parsedResult)-1) and len(parsedResult) > 2:
value = sw_port_label.get(dstSwitch, default_label_value)
temp_sw_port_label[dstSwitch] = int(value)
value = value + 1
sw_port_label[dstSwitch] = value
print "*** Current Route Tag:"
print json.dumps(temp_sw_port_label, sort_keys=True, indent=4)
print
print "*** Global Routes Tag:"
print json.dumps(sw_port_label, sort_keys=True, indent=4)
print
# Manage the special case of one hop
if len(parsedResult) == 2:
print "*** One Hop Route"
# The Switch, where we insert the rule
ap1Dpid = parsedResult[0]['switch']
# In port
ap1Port = str(parsedResult[0]['port'])
# ap1Dpid == ap2Dpid
ap2Dpid = parsedResult[1]['switch']
# Out port
ap2Port = str(parsedResult[1]['port'])
# Forward's Rule
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\"}, \"actions\":[{\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(ap1Dpid, 16), cookie, pusher_cfg['tableIP'], int(ap1Port, 16), int(ap2Port, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
# Reverse Forward's Rule
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\"}, \"actions\":[{\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(ap1Dpid, 16), cookie, pusher_cfg['tableIP'], int(ap2Port, 16), int(ap1Port, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
store_vll(cookie, ap1Dpid, pusher_cfg['tableIP'])
# see the image one_hop for details on the switching label procedure
elif (len(parsedResult)) >= 2:
print "*** %s Hop Route" % (len(parsedResult)/2)
ingressDpid = parsedResult[0]['switch']
ingressPort = parsedResult[0]['port']
egressDpid = parsedResult[len(parsedResult)-1]['switch']
egressPort = parsedResult[len(parsedResult)-1]['port']
routeDpid = []
for i in range(0, (len(parsedResult)-1)):
if parsedResult[i]['switch'] not in routeDpid:
routeDpid.append(parsedResult[i]['switch'])
index_fw_middle = randrange(len(routeDpid))
middlefwDpid = parsedResult[index_fw_middle]['switch']
print "*** Forward Path: %s - > %s -> %s" % (ingressDpid, middlefwDpid, egressDpid);
labelfw1 = temp_sw_port_label[egressDpid]
labelfw2 = get_vll_label_from_dpid(egressDpid)
labelfw3 = get_vll_label_from_dpid(middlefwDpid)
print "*** Forward Path Label Stack: |%s|%s|%s|" %(labelfw1, labelfw2, labelfw3)
index_rv_middle = randrange(len(routeDpid))
middlervDpid = parsedResult[index_rv_middle]['switch']
print "*** Reverse Path: %s - > %s -> %s" % (egressDpid, middlervDpid, ingressDpid);
labelrv1 = temp_sw_port_label[ingressDpid]
labelrv2 = get_vll_label_from_dpid(ingressDpid)
labelrv3 = get_vll_label_from_dpid(middlervDpid)
print "*** Reverse Path Label Stack: |%s|%s|%s|" %(labelrv1, labelrv2, labelrv3)
print
print "*** Install Ingress Rules (FW) - LHS"
# Ingress Rule For IP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\", \"eth_type\":\"%s\"}, \"actions\":[{\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"GOTO_TABLE\", \"table_id\":%d}]}' http://%s/stats/flowentry/add" % (int(ingressDpid, 16), cookie, pusher_cfg['tableIP'], int(ingressPort, 16), "2048", "34887", labelfw1, "34887", labelfw2, "34887", labelfw3, pusher_cfg['tableSBP'], controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
# Ingress Rule For ARP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\", \"eth_type\":\"%s\"}, \"actions\":[{\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"GOTO_TABLE\", \"table_id\":%d}]}' http://%s/stats/flowentry/add" % (int(ingressDpid, 16), cookie, pusher_cfg['tableIP'], int(ingressPort, 16), "2054", "34888", labelfw1, "34888", labelfw2, "34888", labelfw3, pusher_cfg['tableSBP'], controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
store_vll(cookie, ingressDpid, pusher_cfg['tableIP'])
print "Install Egress Rules (RV) - LHS"
# Rule For IP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"eth_type\":\"%s\", \"mpls_label\":\"%s\", \"mpls_bos\":\"1\"}, \"actions\":[{\"type\":\"POP_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(ingressDpid, 16), cookie, pusher_cfg['tableSBP'], "34887", labelrv1, "2048", int(ingressPort, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
# Rule For ARP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"eth_type\":\"%s\", \"mpls_label\":\"%s\", \"mpls_bos\":\"1\"}, \"actions\":[{\"type\":\"POP_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(ingressDpid, 16), cookie, pusher_cfg['tableSBP'], "34888", labelrv1, "2054", int(ingressPort, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
store_vll(cookie, ingressDpid, pusher_cfg['tableSBP'])
print "*** Install Ingress Rules (RV) - RHS"
# Ingress Rule For IP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\", \"eth_type\":\"%s\"}, \"actions\":[{\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"GOTO_TABLE\", \"table_id\":%d}]}' http://%s/stats/flowentry/add" % (int(egressDpid, 16), cookie, pusher_cfg['tableIP'], int(egressPort, 16), "2048", "34887", labelrv1, "34887", labelrv2, "34887", labelrv3, pusher_cfg['tableSBP'], controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
# Ingress Rule For ARP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"in_port\":\"%s\", \"eth_type\":\"%s\"}, \"actions\":[{\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"PUSH_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"SET_FIELD\", \"field\":\"mpls_label\", \"value\":%s}, {\"type\":\"GOTO_TABLE\", \"table_id\":%d}]}' http://%s/stats/flowentry/add" % (int(egressDpid, 16), cookie, pusher_cfg['tableIP'], int(egressPort, 16), "2054", "34888", labelrv1, "34888", labelrv2, "34888", labelrv3, pusher_cfg['tableSBP'], controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
store_vll(cookie, egressDpid, pusher_cfg['tableIP'])
print "Install Egress Rules (RV) - RHS"
# Rule For IP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"eth_type\":\"%s\", \"mpls_label\":\"%s\", \"mpls_bos\":\"1\"}, \"actions\":[{\"type\":\"POP_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(egressDpid, 16), cookie, pusher_cfg['tableSBP'], "34887", labelfw1, "2048", int(egressPort, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
# Rule For ARP
command = "curl -s -d '{\"dpid\": \"%s\", \"cookie\":\"%s\", \"priority\":\"32768\", \"table_id\":%d, \"match\":{\"eth_type\":\"%s\", \"mpls_label\":\"%s\", \"mpls_bos\":\"1\"}, \"actions\":[{\"type\":\"POP_MPLS\", \"ethertype\":\"%s\"}, {\"type\":\"OUTPUT\", \"port\":\"%s\"}]}' http://%s/stats/flowentry/add" % (int(egressDpid, 16), cookie, pusher_cfg['tableSBP'], "34888", labelfw1, "2054", int(egressPort, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
store_vll(cookie, egressDpid, pusher_cfg['tableSBP'])
print
def get_vll_label_from_dpid(dpid):
LABEL_MASK=0x0FFFF
LABEL_VLL=0x080000
temp = dpid.replace(":","")
temp = temp[8:]
loopback = int(temp,16)
label = (loopback & LABEL_MASK) | LABEL_VLL
return label
def get_pw_label_from_dpid(dpid):
LABEL_MASK=0x0FFFF
LABEL_PW=0x090000
temp = dpid.replace(":","")
temp = temp[8:]
loopback = int(temp,16)
label = (loopback & LABEL_MASK) | LABEL_PW
return label
def del_command(data):
print "*** Delete Saved Vlls and PWs"
print "*** Read Previous Vlls Inserted"
if os.path.exists('sr_vlls.json'):
vllsDb = open('sr_vlls.json','r')
lines = vllsDb.readlines()
vllsDb.close()
vllsDb = open('sr_vlls.json','w')
# Removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
controllerRestIp = args.controllerRestIp
for line in lines:
data = json.loads(line)
sw = data['Dpid']
cookie = data['name']
table = data['table_id']
print "*** Deleting Vll: %s - Switch %s" % (cookie, sw)
command = "curl -s -d '{\"cookie\":\"%s\", \"cookie_mask\":\"%s\", \"table_id\":%d, \"dpid\":\"%s\"}' http://%s/stats/flowentry/delete 2> /dev/null" % (cookie, (-1 & 0xFFFFFFFFFFFFFFFF), table, int(sw, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
vllsDb.close()
else:
lines={}
print "*** No Vlls Inserted"
#return
print "*** Read Previous Pws Inserted"
if os.path.exists('sr_pws.json'):
pwsDb = open('sr_pws.json','r')
lines = pwsDb.readlines()
pwsDb.close()
pwsDb = open('sr_pws.json','w')
# Removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
controllerRestIp = args.controllerRestIp
for line in lines:
data = json.loads(line)
sw = data['Dpid']
cookie = data['name']
table = data['table_id']
print "*** Deleting Pw: %s - Switch %s" % (cookie, sw)
command = "curl -s -d '{\"cookie\":\"%s\", \"cookie_mask\":\"%s\", \"table_id\":%d, \"dpid\":\"%s\"}' http://%s/stats/flowentry/delete 2> /dev/null" % (cookie, (-1 & 0xFFFFFFFFFFFFFFFF), table, int(sw, 16), controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
pwsDb.close()
else:
lines={}
print "*** No Pws Inserted"
#return
def run_command(data):
if args.action == 'add':
add_command(data)
elif args.action == 'delete':
del_command(data)
def parse_cmd_line():
parser = argparse.ArgumentParser(description='Segment Routing Virtual Leased Line Pusher')
parser.add_argument('--controller', dest='controllerRestIp', action='store', default='localhost:8080', help='controller IP:RESTport, e.g., localhost:8080 or A.B.C.D:8080')
parser.add_argument('--add', dest='action', action='store_const', const='add', default='add', help='action: add')
parser.add_argument('--delete', dest='action', action='store_const', const='delete', default='add', help='action: delete')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
return args
if __name__ == '__main__':
args = parse_cmd_line()
run_command(args)
|
|
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable, Function
import torch.nn as nn
import numpy as np
def _norm(x, dim, p=2):
"""Computes the norm over all dimensions except dim"""
if p == -1:
func = lambda x, dim: x.max(dim=dim)[0] - x.min(dim=dim)[0]
elif p == float('inf'):
func = lambda x, dim: x.max(dim=dim)[0]
else:
func = lambda x, dim: torch.norm(x, dim=dim, p=p)
if dim is None:
return x.norm(p=p)
elif dim == 0:
output_size = (x.size(0),) + (1,) * (x.dim() - 1)
return func(x.contiguous().view(x.size(0), -1), 1).view(*output_size)
elif dim == x.dim() - 1:
output_size = (1,) * (x.dim() - 1) + (x.size(-1),)
return func(x.contiguous().view(-1, x.size(-1)), 0).view(*output_size)
else:
return _norm(x.transpose(0, dim), 0).transpose(0, dim)
def _mean(p, dim):
"""Computes the mean over all dimensions except dim"""
if dim is None:
return p.mean()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).mean(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).mean(dim=0).view(*output_size)
else:
return _mean(p.transpose(0, dim), 0).transpose(0, dim)
def _std(p, dim):
"""Computes the mean over all dimensions except dim"""
if dim is None:
return p.std()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).std(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).std(dim=0).view(*output_size)
else:
return _std(p.transpose(0, dim), 0).transpose(0, dim)
# L2
class LpBatchNorm2d(nn.Module):
# This is L2 Baseline
def __init__(self, num_features, dim=1, p=2, momentum=0.1, bias=True, eps=1e-5, noise=False):
super(LpBatchNorm2d, self).__init__()
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.dim = dim
self.noise = noise
self.p = p
self.eps = eps
self.bias = Parameter(torch.Tensor(num_features))
self.weight = Parameter(torch.Tensor(num_features))
def forward(self, x):
p = self.p
if self.training:
mean = x.view(x.size(0), x.size(self.dim), -1).mean(-1).mean(0)
y = x.transpose(0, 1)
z = y.contiguous()
t = z.view(z.size(0), -1)
Var = (torch.abs((t.transpose(1, 0) - mean))**p).mean(0)
scale = (Var + self.eps)**(-1 / p)
self.running_mean.mul_(self.momentum).add_(
mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(
scale.data * (1 - self.momentum))
else:
mean = torch.autograd.Variable(self.running_mean)
scale = torch.autograd.Variable(self.running_var)
out = (x - mean.view(1, mean.size(0), 1, 1)) * \
scale.view(1, scale.size(0), 1, 1)
if self.noise and self.training:
std = 0.1 * _std(x, self.dim).data
ones = torch.ones_like(x.data)
std_noise = Variable(torch.normal(ones, ones) * std)
out = out * std_noise
if self.weight is not None:
out = out * self.weight.view(1, self.weight.size(0), 1, 1)
if self.bias is not None:
out = out + self.bias.view(1, self.bias.size(0), 1, 1)
return out
class TopkBatchNorm2d(nn.Module):
# this is normalized L_inf
def __init__(self, num_features, k=10, dim=1, momentum=0.1, bias=True, eps=1e-5, noise=False):
super(TopkBatchNorm2d, self).__init__()
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.dim = dim
self.noise = noise
self.k = k
self.eps = eps
self.bias = Parameter(torch.Tensor(num_features))
self.weight = Parameter(torch.Tensor(num_features))
def forward(self, x):
if self.training:
mean = x.view(x.size(0), x.size(self.dim), -1).mean(-1).mean(0)
y = x.transpose(0, 1)
z = y.contiguous()
t = z.view(z.size(0), -1)
A = torch.abs(t.transpose(1, 0) - mean)
const = 0.5 * (1 + (np.pi * np.log(4)) ** 0.5) / \
((2 * np.log(A.size(0))) ** 0.5)
MeanTOPK = (torch.topk(A, self.k, dim=0)[0].mean(0)) * const
scale = 1 / (MeanTOPK + self.eps)
self.running_mean.mul_(self.momentum).add_(
mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(
scale.data * (1 - self.momentum))
else:
mean = torch.autograd.Variable(self.running_mean)
scale = torch.autograd.Variable(self.running_var)
out = (x - mean.view(1, mean.size(0), 1, 1)) * \
scale.view(1, scale.size(0), 1, 1)
if self.noise and self.training:
std = 0.1 * _std(x, self.dim).data
ones = torch.ones_like(x.data)
std_noise = Variable(torch.normal(ones, ones) * std)
out = out * std_noise
if self.weight is not None:
out = out * self.weight.view(1, self.weight.size(0), 1, 1)
if self.bias is not None:
out = out + self.bias.view(1, self.bias.size(0), 1, 1)
return out
# Top10
class GhostTopkBatchNorm2d(nn.Module):
# This is normalized Top10 batch norm
def __init__(self, num_features, k=10, dim=1, momentum=0.1, bias=True, eps=1e-5, beta=0.75, noise=False):
super(GhostTopkBatchNorm2d, self).__init__()
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.dim = dim
self.register_buffer('meanTOPK', torch.zeros(num_features))
self.noise = noise
self.k = k
self.beta = 0.75
self.eps = eps
self.bias = Parameter(torch.Tensor(num_features))
self.weight = Parameter(torch.Tensor(num_features))
def forward(self, x):
# p=5
if self.training:
mean = x.view(x.size(0), x.size(self.dim), -1).mean(-1).mean(0)
y = x.transpose(0, 1)
z = y.contiguous()
t = z.view(z.size(0), -1)
A = torch.abs(t.transpose(1, 0) - mean)
beta = 0.75
MeanTOPK = torch.topk(A, self.k, dim=0)[0].mean(0)
meanTOPK = beta * \
torch.autograd.variable.Variable(
self.biasTOPK) + (1 - beta) * MeanTOPK
const = 0.5 * (1 + (np.pi * np.log(4)) ** 0.5) / \
((2 * np.log(A.size(0))) ** 0.5)
meanTOPK = meanTOPK * const
# print(self.biasTOPK)
self.biasTOPK.copy_(meanTOPK.data)
# self.biasTOPK = MeanTOPK.data
scale = 1 / (meanTOPK + self.eps)
self.running_mean.mul_(self.momentum).add_(
mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(
scale.data * (1 - self.momentum))
else:
mean = torch.autograd.Variable(self.running_mean)
scale = torch.autograd.Variable(self.running_var)
out = (x - mean.view(1, mean.size(0), 1, 1)) * \
scale.view(1, scale.size(0), 1, 1)
# out = (x - mean.view(1, mean.size(0), 1, 1)) * final_scale.view(1, scale.size(0), 1, 1)
if self.noise and self.training:
std = 0.1 * _std(x, self.dim).data
ones = torch.ones_like(x.data)
std_noise = Variable(torch.normal(ones, ones) * std)
out = out * std_noise
if self.weight is not None:
out = out * self.weight.view(1, self.weight.size(0), 1, 1)
if self.bias is not None:
out = out + self.bias.view(1, self.bias.size(0), 1, 1)
return out
# L1
class L1BatchNorm2d(nn.Module):
# This is normalized L1 Batch norm; note the normalization term (np.pi / 2) ** 0.5) when multiplying by Var:
# scale = ((Var * (np.pi / 2) ** 0.5) + self.eps) ** (-1)
"""docstring for L1BatchNorm2d."""
def __init__(self, num_features, dim=1, momentum=0.1, bias=True, normalized=True, eps=1e-5, noise=False):
super(L1BatchNorm2d, self).__init__()
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.dim = dim
self.noise = noise
self.bias = Parameter(torch.Tensor(num_features))
self.weight = Parameter(torch.Tensor(num_features))
self.eps = eps
if normalized:
self.weight_fix = (np.pi / 2) ** 0.5
else:
self.weight_fix = 1
def forward(self, x):
p = 1
if self.training:
mean = x.view(x.size(0), x.size(self.dim), -1).mean(-1).mean(0)
y = x.transpose(0, 1)
z = y.contiguous()
t = z.view(z.size(0), -1)
Var = (torch.abs((t.transpose(1, 0) - mean))).mean(0)
scale = (Var * self.weight_fix + self.eps) ** (-1)
self.running_mean.mul_(self.momentum).add_(
mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(
scale.data * (1 - self.momentum))
else:
mean = torch.autograd.Variable(self.running_mean)
scale = torch.autograd.Variable(self.running_var)
out = (x - mean.view(1, mean.size(0), 1, 1)) * \
scale.view(1, scale.size(0), 1, 1)
if self.noise and self.training:
std = 0.1 * _std(x, self.dim).data
ones = torch.ones_like(x.data)
std_noise = Variable(torch.normal(ones, ones) * std)
out = out * std_noise
if self.weight is not None:
out = out * self.weight.view(1, self.weight.size(0), 1, 1)
if self.bias is not None:
out = out + self.bias.view(1, self.bias.size(0), 1, 1)
return out
|
|
# -*- coding: utf-8 -*-
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph.graphicsItems.GraphicsObject import GraphicsObject
import pyqtgraph.functions as fn
from .Terminal import *
from pyqtgraph.pgcollections import OrderedDict
from pyqtgraph.debug import *
import numpy as np
from .eq import *
def strDict(d):
return dict([(str(k), v) for k, v in d.items()])
class Node(QtCore.QObject):
"""
Node represents the basic processing unit of a flowchart.
A Node subclass implements at least:
1) A list of input / ouptut terminals and their properties
2) a process() function which takes the names of input terminals as keyword arguments and returns a dict with the names of output terminals as keys.
A flowchart thus consists of multiple instances of Node subclasses, each of which is connected
to other by wires between their terminals. A flowchart is, itself, also a special subclass of Node.
This allows Nodes within the flowchart to connect to the input/output nodes of the flowchart itself.
Optionally, a node class can implement the ctrlWidget() method, which must return a QWidget (usually containing other widgets) that will be displayed in the flowchart control panel. Some nodes implement fairly complex control widgets, but most nodes follow a simple form-like pattern: a list of parameter names and a single value (represented as spin box, check box, etc..) for each parameter. To make this easier, the CtrlNode subclass allows you to instead define a simple data structure that CtrlNode will use to automatically generate the control widget. """
sigOutputChanged = QtCore.Signal(object) # self
sigClosed = QtCore.Signal(object)
sigRenamed = QtCore.Signal(object, object)
sigTerminalRenamed = QtCore.Signal(object, object) # term, oldName
sigTerminalAdded = QtCore.Signal(object, object) # self, term
sigTerminalRemoved = QtCore.Signal(object, object) # self, term
def __init__(self, name, terminals=None, allowAddInput=False, allowAddOutput=False, allowRemove=True):
"""
============== ============================================================
Arguments
name The name of this specific node instance. It can be any
string, but must be unique within a flowchart. Usually,
we simply let the flowchart decide on a name when calling
Flowchart.addNode(...)
terminals Dict-of-dicts specifying the terminals present on this Node.
Terminal specifications look like::
'inputTerminalName': {'io': 'in'}
'outputTerminalName': {'io': 'out'}
There are a number of optional parameters for terminals:
multi, pos, renamable, removable, multiable, bypass. See
the Terminal class for more information.
allowAddInput bool; whether the user is allowed to add inputs by the
context menu.
allowAddOutput bool; whether the user is allowed to add outputs by the
context menu.
allowRemove bool; whether the user is allowed to remove this node by the
context menu.
============== ============================================================
"""
QtCore.QObject.__init__(self)
self._name = name
self._bypass = False
self.bypassButton = None ## this will be set by the flowchart ctrl widget..
self._graphicsItem = None
self.terminals = OrderedDict()
self._inputs = OrderedDict()
self._outputs = OrderedDict()
self._allowAddInput = allowAddInput ## flags to allow the user to add/remove terminals
self._allowAddOutput = allowAddOutput
self._allowRemove = allowRemove
self.exception = None
if terminals is None:
return
for name, opts in terminals.items():
self.addTerminal(name, **opts)
def nextTerminalName(self, name):
"""Return an unused terminal name"""
name2 = name
i = 1
while name2 in self.terminals:
name2 = "%s.%d" % (name, i)
i += 1
return name2
def addInput(self, name="Input", **args):
"""Add a new input terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
This is a convenience function that just calls addTerminal(io='in', ...)"""
#print "Node.addInput called."
return self.addTerminal(name, io='in', **args)
def addOutput(self, name="Output", **args):
"""Add a new output terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
This is a convenience function that just calls addTerminal(io='out', ...)"""
return self.addTerminal(name, io='out', **args)
def removeTerminal(self, term):
"""Remove the specified terminal from this Node. May specify either the
terminal's name or the terminal itself.
Causes sigTerminalRemoved to be emitted."""
if isinstance(term, Terminal):
name = term.name()
else:
name = term
term = self.terminals[name]
#print "remove", name
#term.disconnectAll()
term.close()
del self.terminals[name]
if name in self._inputs:
del self._inputs[name]
if name in self._outputs:
del self._outputs[name]
self.graphicsItem().updateTerminals()
self.sigTerminalRemoved.emit(self, term)
def terminalRenamed(self, term, oldName):
"""Called after a terminal has been renamed
Causes sigTerminalRenamed to be emitted."""
newName = term.name()
for d in [self.terminals, self._inputs, self._outputs]:
if oldName not in d:
continue
d[newName] = d[oldName]
del d[oldName]
self.graphicsItem().updateTerminals()
self.sigTerminalRenamed.emit(term, oldName)
def addTerminal(self, name, **opts):
"""Add a new terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
Causes sigTerminalAdded to be emitted."""
name = self.nextTerminalName(name)
term = Terminal(self, name, **opts)
self.terminals[name] = term
if term.isInput():
self._inputs[name] = term
elif term.isOutput():
self._outputs[name] = term
self.graphicsItem().updateTerminals()
self.sigTerminalAdded.emit(self, term)
return term
def inputs(self):
"""Return dict of all input terminals.
Warning: do not modify."""
return self._inputs
def outputs(self):
"""Return dict of all output terminals.
Warning: do not modify."""
return self._outputs
def process(self, **kargs):
"""Process data through this node. This method is called any time the flowchart
wants the node to process data. It will be called with one keyword argument
corresponding to each input terminal, and must return a dict mapping the name
of each output terminal to its new value.
This method is also called with a 'display' keyword argument, which indicates
whether the node should update its display (if it implements any) while processing
this data. This is primarily used to disable expensive display operations
during batch processing.
"""
return {}
def graphicsItem(self):
"""Return the GraphicsItem for this node. Subclasses may re-implement
this method to customize their appearance in the flowchart."""
if self._graphicsItem is None:
self._graphicsItem = NodeGraphicsItem(self)
return self._graphicsItem
## this is just bad planning. Causes too many bugs.
def __getattr__(self, attr):
"""Return the terminal with the given name"""
if attr not in self.terminals:
raise AttributeError(attr)
else:
import traceback
traceback.print_stack()
print("Warning: use of node.terminalName is deprecated; use node['terminalName'] instead.")
return self.terminals[attr]
def __getitem__(self, item):
#return getattr(self, item)
"""Return the terminal with the given name"""
if item not in self.terminals:
raise KeyError(item)
else:
return self.terminals[item]
def name(self):
"""Return the name of this node."""
return self._name
def rename(self, name):
"""Rename this node. This will cause sigRenamed to be emitted."""
oldName = self._name
self._name = name
#self.emit(QtCore.SIGNAL('renamed'), self, oldName)
self.sigRenamed.emit(self, oldName)
def dependentNodes(self):
"""Return the list of nodes which provide direct input to this node"""
nodes = set()
for t in self.inputs().values():
nodes |= set([i.node() for i in t.inputTerminals()])
return nodes
#return set([t.inputTerminals().node() for t in self.listInputs().itervalues()])
def __repr__(self):
return "<Node %s @%x>" % (self.name(), id(self))
def ctrlWidget(self):
"""Return this Node's control widget.
By default, Nodes have no control widget. Subclasses may reimplement this
method to provide a custom widget. This method is called by Flowcharts
when they are constructing their Node list."""
return None
def bypass(self, byp):
"""Set whether this node should be bypassed.
When bypassed, a Node's process() method is never called. In some cases,
data is automatically copied directly from specific input nodes to
output nodes instead (see the bypass argument to Terminal.__init__).
This is usually called when the user disables a node from the flowchart
control panel.
"""
self._bypass = byp
if self.bypassButton is not None:
self.bypassButton.setChecked(byp)
self.update()
def isBypassed(self):
"""Return True if this Node is currently bypassed."""
return self._bypass
def setInput(self, **args):
"""Set the values on input terminals. For most nodes, this will happen automatically through Terminal.inputChanged.
This is normally only used for nodes with no connected inputs."""
changed = False
for k, v in args.items():
term = self._inputs[k]
oldVal = term.value()
if not eq(oldVal, v):
changed = True
term.setValue(v, process=False)
if changed and '_updatesHandled_' not in args:
self.update()
def inputValues(self):
"""Return a dict of all input values currently assigned to this node."""
vals = {}
for n, t in self.inputs().items():
vals[n] = t.value()
return vals
def outputValues(self):
"""Return a dict of all output values currently generated by this node."""
vals = {}
for n, t in self.outputs().items():
vals[n] = t.value()
return vals
def connected(self, localTerm, remoteTerm):
"""Called whenever one of this node's terminals is connected elsewhere."""
pass
def disconnected(self, localTerm, remoteTerm):
"""Called whenever one of this node's terminals is disconnected from another."""
pass
def update(self, signal=True):
"""Collect all input values, attempt to process new output values, and propagate downstream.
Subclasses should call update() whenever thir internal state has changed
(such as when the user interacts with the Node's control widget). Update
is automatically called when the inputs to the node are changed.
"""
vals = self.inputValues()
#print " inputs:", vals
try:
if self.isBypassed():
out = self.processBypassed(vals)
else:
out = self.process(**strDict(vals))
#print " output:", out
if out is not None:
if signal:
self.setOutput(**out)
else:
self.setOutputNoSignal(**out)
for n,t in self.inputs().items():
t.setValueAcceptable(True)
self.clearException()
except:
#printExc( "Exception while processing %s:" % self.name())
for n,t in self.outputs().items():
t.setValue(None)
self.setException(sys.exc_info())
if signal:
#self.emit(QtCore.SIGNAL('outputChanged'), self) ## triggers flowchart to propagate new data
self.sigOutputChanged.emit(self) ## triggers flowchart to propagate new data
def processBypassed(self, args):
"""Called when the flowchart would normally call Node.process, but this node is currently bypassed.
The default implementation looks for output terminals with a bypass connection and returns the
corresponding values. Most Node subclasses will _not_ need to reimplement this method."""
result = {}
for term in list(self.outputs().values()):
byp = term.bypassValue()
if byp is None:
result[term.name()] = None
else:
result[term.name()] = args.get(byp, None)
return result
def setOutput(self, **vals):
self.setOutputNoSignal(**vals)
#self.emit(QtCore.SIGNAL('outputChanged'), self) ## triggers flowchart to propagate new data
self.sigOutputChanged.emit(self) ## triggers flowchart to propagate new data
def setOutputNoSignal(self, **vals):
for k, v in vals.items():
term = self.outputs()[k]
term.setValue(v)
#targets = term.connections()
#for t in targets: ## propagate downstream
#if t is term:
#continue
#t.inputChanged(term)
term.setValueAcceptable(True)
def setException(self, exc):
self.exception = exc
self.recolor()
def clearException(self):
self.setException(None)
def recolor(self):
if self.exception is None:
self.graphicsItem().setPen(QtGui.QPen(QtGui.QColor(0, 0, 0)))
else:
self.graphicsItem().setPen(QtGui.QPen(QtGui.QColor(150, 0, 0), 3))
def saveState(self):
"""Return a dictionary representing the current state of this node
(excluding input / output values). This is used for saving/reloading
flowcharts. The default implementation returns this Node's position,
bypass state, and information about each of its terminals.
Subclasses may want to extend this method, adding extra keys to the returned
dict."""
pos = self.graphicsItem().pos()
state = {'pos': (pos.x(), pos.y()), 'bypass': self.isBypassed()}
termsEditable = self._allowAddInput | self._allowAddOutput
for term in self._inputs.values() + self._outputs.values():
termsEditable |= term._renamable | term._removable | term._multiable
if termsEditable:
state['terminals'] = self.saveTerminals()
return state
def restoreState(self, state):
"""Restore the state of this node from a structure previously generated
by saveState(). """
pos = state.get('pos', (0,0))
self.graphicsItem().setPos(*pos)
self.bypass(state.get('bypass', False))
if 'terminals' in state:
self.restoreTerminals(state['terminals'])
def saveTerminals(self):
terms = OrderedDict()
for n, t in self.terminals.items():
terms[n] = (t.saveState())
return terms
def restoreTerminals(self, state):
for name in list(self.terminals.keys()):
if name not in state:
self.removeTerminal(name)
for name, opts in state.items():
if name in self.terminals:
term = self[name]
term.setOpts(**opts)
continue
try:
opts = strDict(opts)
self.addTerminal(name, **opts)
except:
printExc("Error restoring terminal %s (%s):" % (str(name), str(opts)))
def clearTerminals(self):
for t in self.terminals.values():
t.close()
self.terminals = OrderedDict()
self._inputs = OrderedDict()
self._outputs = OrderedDict()
def close(self):
"""Cleans up after the node--removes terminals, graphicsItem, widget"""
self.disconnectAll()
self.clearTerminals()
item = self.graphicsItem()
if item.scene() is not None:
item.scene().removeItem(item)
self._graphicsItem = None
w = self.ctrlWidget()
if w is not None:
w.setParent(None)
#self.emit(QtCore.SIGNAL('closed'), self)
self.sigClosed.emit(self)
def disconnectAll(self):
for t in self.terminals.values():
t.disconnectAll()
#class NodeGraphicsItem(QtGui.QGraphicsItem):
class NodeGraphicsItem(GraphicsObject):
def __init__(self, node):
#QtGui.QGraphicsItem.__init__(self)
GraphicsObject.__init__(self)
#QObjectWorkaround.__init__(self)
#self.shadow = QtGui.QGraphicsDropShadowEffect()
#self.shadow.setOffset(5,5)
#self.shadow.setBlurRadius(10)
#self.setGraphicsEffect(self.shadow)
self.pen = fn.mkPen(0,0,0)
self.selectPen = fn.mkPen(200,200,200,width=2)
self.brush = fn.mkBrush(200, 200, 200, 150)
self.hoverBrush = fn.mkBrush(200, 200, 200, 200)
self.selectBrush = fn.mkBrush(200, 200, 255, 200)
self.hovered = False
self.node = node
flags = self.ItemIsMovable | self.ItemIsSelectable | self.ItemIsFocusable |self.ItemSendsGeometryChanges
#flags = self.ItemIsFocusable |self.ItemSendsGeometryChanges
self.setFlags(flags)
self.bounds = QtCore.QRectF(0, 0, 100, 100)
self.nameItem = QtGui.QGraphicsTextItem(self.node.name(), self)
self.nameItem.setDefaultTextColor(QtGui.QColor(50, 50, 50))
self.nameItem.moveBy(self.bounds.width()/2. - self.nameItem.boundingRect().width()/2., 0)
self.nameItem.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.updateTerminals()
#self.setZValue(10)
self.nameItem.focusOutEvent = self.labelFocusOut
self.nameItem.keyPressEvent = self.labelKeyPress
self.menu = None
self.buildMenu()
#self.node.sigTerminalRenamed.connect(self.updateActionMenu)
#def setZValue(self, z):
#for t, item in self.terminals.itervalues():
#item.setZValue(z+1)
#GraphicsObject.setZValue(self, z)
def labelFocusOut(self, ev):
QtGui.QGraphicsTextItem.focusOutEvent(self.nameItem, ev)
self.labelChanged()
def labelKeyPress(self, ev):
if ev.key() == QtCore.Qt.Key_Enter or ev.key() == QtCore.Qt.Key_Return:
self.labelChanged()
else:
QtGui.QGraphicsTextItem.keyPressEvent(self.nameItem, ev)
def labelChanged(self):
newName = str(self.nameItem.toPlainText())
if newName != self.node.name():
self.node.rename(newName)
### re-center the label
bounds = self.boundingRect()
self.nameItem.setPos(bounds.width()/2. - self.nameItem.boundingRect().width()/2., 0)
def setPen(self, pen):
self.pen = pen
self.update()
def setBrush(self, brush):
self.brush = brush
self.update()
def updateTerminals(self):
bounds = self.bounds
self.terminals = {}
inp = self.node.inputs()
dy = bounds.height() / (len(inp)+1)
y = dy
for i, t in inp.items():
item = t.graphicsItem()
item.setParentItem(self)
#item.setZValue(self.zValue()+1)
br = self.bounds
item.setAnchor(0, y)
self.terminals[i] = (t, item)
y += dy
out = self.node.outputs()
dy = bounds.height() / (len(out)+1)
y = dy
for i, t in out.items():
item = t.graphicsItem()
item.setParentItem(self)
item.setZValue(self.zValue())
br = self.bounds
item.setAnchor(bounds.width(), y)
self.terminals[i] = (t, item)
y += dy
#self.buildMenu()
def boundingRect(self):
return self.bounds.adjusted(-5, -5, 5, 5)
def paint(self, p, *args):
p.setPen(self.pen)
if self.isSelected():
p.setPen(self.selectPen)
p.setBrush(self.selectBrush)
else:
p.setPen(self.pen)
if self.hovered:
p.setBrush(self.hoverBrush)
else:
p.setBrush(self.brush)
p.drawRect(self.bounds)
def mousePressEvent(self, ev):
ev.ignore()
def mouseClickEvent(self, ev):
#print "Node.mouseClickEvent called."
if int(ev.button()) == int(QtCore.Qt.LeftButton):
ev.accept()
#print " ev.button: left"
sel = self.isSelected()
#ret = QtGui.QGraphicsItem.mousePressEvent(self, ev)
self.setSelected(True)
if not sel and self.isSelected():
#self.setBrush(QtGui.QBrush(QtGui.QColor(200, 200, 255)))
#self.emit(QtCore.SIGNAL('selected'))
#self.scene().selectionChanged.emit() ## for some reason this doesn't seem to be happening automatically
self.update()
#return ret
elif int(ev.button()) == int(QtCore.Qt.RightButton):
#print " ev.button: right"
ev.accept()
#pos = ev.screenPos()
self.raiseContextMenu(ev)
#self.menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def mouseDragEvent(self, ev):
#print "Node.mouseDrag"
if ev.button() == QtCore.Qt.LeftButton:
ev.accept()
self.setPos(self.pos()+self.mapToParent(ev.pos())-self.mapToParent(ev.lastPos()))
def hoverEvent(self, ev):
if not ev.isExit() and ev.acceptClicks(QtCore.Qt.LeftButton):
ev.acceptDrags(QtCore.Qt.LeftButton)
self.hovered = True
else:
self.hovered = False
self.update()
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Delete or ev.key() == QtCore.Qt.Key_Backspace:
ev.accept()
if not self.node._allowRemove:
return
self.node.close()
else:
ev.ignore()
def itemChange(self, change, val):
if change == self.ItemPositionHasChanged:
for k, t in self.terminals.items():
t[1].nodeMoved()
return GraphicsObject.itemChange(self, change, val)
def getMenu(self):
return self.menu
def getContextMenus(self, event):
return [self.menu]
def raiseContextMenu(self, ev):
menu = self.scene().addParentContextMenus(self, self.getMenu(), ev)
pos = ev.screenPos()
menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def buildMenu(self):
self.menu = QtGui.QMenu()
self.menu.setTitle("Node")
a = self.menu.addAction("Add input", self.addInputFromMenu)
if not self.node._allowAddInput:
a.setEnabled(False)
a = self.menu.addAction("Add output", self.addOutputFromMenu)
if not self.node._allowAddOutput:
a.setEnabled(False)
a = self.menu.addAction("Remove node", self.node.close)
if not self.node._allowRemove:
a.setEnabled(False)
def addInputFromMenu(self): ## called when add input is clicked in context menu
self.node.addInput(renamable=True, removable=True, multiable=True)
def addOutputFromMenu(self): ## called when add output is clicked in context menu
self.node.addOutput(renamable=True, removable=True, multiable=False)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from novaclient.v1_1 import aggregates
from novaclient.v1_1 import availability_zones
from novaclient.v1_1 import certs
from novaclient.v1_1 import flavor_access
from novaclient.v1_1 import flavors
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import keypairs
from novaclient.v1_1 import quotas
from novaclient.v1_1 import security_group_rules as rules
from novaclient.v1_1 import security_groups as sec_groups
from novaclient.v1_1 import servers
from novaclient.v1_1 import services
from novaclient.v1_1 import usage
from novaclient.v1_1 import volume_snapshots as vol_snaps
from novaclient.v1_1 import volume_types
from novaclient.v1_1 import volumes
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas as usage_quotas
from openstack_dashboard.test.test_data import utils
SERVER_DATA = """
{
"server": {
"OS-EXT-SRV-ATTR:instance_name": "instance-00000005",
"OS-EXT-SRV-ATTR:host": "instance-host",
"OS-EXT-STS:task_state": null,
"addresses": {
"private": [
{
"version": 4,
"addr": "10.0.0.1"
}
]
},
"links": [
{
"href": "%(host)s/v1.1/%(tenant_id)s/servers/%(server_id)s",
"rel": "self"
},
{
"href": "%(host)s/%(tenant_id)s/servers/%(server_id)s",
"rel": "bookmark"
}
],
"image": {
"id": "%(image_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/images/%(image_id)s",
"rel": "bookmark"
}
]
},
"OS-EXT-STS:vm_state": "active",
"flavor": {
"id": "%(flavor_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/flavors/%(flavor_id)s",
"rel": "bookmark"
}
]
},
"id": "%(server_id)s",
"user_id": "%(user_id)s",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "",
"accessIPv6": "",
"progress": null,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "%(status)s",
"updated": "2012-02-28T19:51:27Z",
"hostId": "c461ea283faa0ab5d777073c93b126c68139e4e45934d4fc37e403c2",
"key_name": "%(key_name)s",
"name": "%(name)s",
"created": "2012-02-28T19:51:17Z",
"tenant_id": "%(tenant_id)s",
"metadata": {"someMetaLabel": "someMetaData",
"some<b>html</b>label": "<!--",
"empty": ""}
}
}
"""
USAGE_DATA = """
{
"total_memory_mb_usage": 64246.89777777778,
"total_vcpus_usage": 125.48222222222223,
"total_hours": 125.48222222222223,
"total_local_gb_usage": 0,
"tenant_id": "%(tenant_id)s",
"stop": "2012-01-31 23:59:59",
"start": "2012-01-01 00:00:00",
"server_usages": [
{
"memory_mb": %(flavor_ram)s,
"uptime": 442321,
"started_at": "2012-01-26 20:38:21",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 122.87361111111112,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
},
{
"memory_mb": %(flavor_ram)s,
"uptime": 9367,
"started_at": "2012-01-31 20:54:15",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 2.608611111111111,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
}
]
}
"""
def data(TEST):
TEST.servers = utils.TestDataContainer()
TEST.flavors = utils.TestDataContainer()
TEST.flavor_access = utils.TestDataContainer()
TEST.keypairs = utils.TestDataContainer()
TEST.security_groups = utils.TestDataContainer()
TEST.security_groups_uuid = utils.TestDataContainer()
TEST.security_group_rules = utils.TestDataContainer()
TEST.security_group_rules_uuid = utils.TestDataContainer()
TEST.volumes = utils.TestDataContainer()
TEST.quotas = utils.TestDataContainer()
TEST.quota_usages = utils.TestDataContainer()
TEST.floating_ips = utils.TestDataContainer()
TEST.floating_ips_uuid = utils.TestDataContainer()
TEST.usages = utils.TestDataContainer()
TEST.certs = utils.TestDataContainer()
TEST.volume_snapshots = utils.TestDataContainer()
TEST.volume_types = utils.TestDataContainer()
TEST.availability_zones = utils.TestDataContainer()
TEST.hypervisors = utils.TestDataContainer()
TEST.services = utils.TestDataContainer()
TEST.aggregates = utils.TestDataContainer()
# Data return by novaclient.
# It is used if API layer does data conversion.
TEST.api_floating_ips = utils.TestDataContainer()
TEST.api_floating_ips_uuid = utils.TestDataContainer()
# Volumes
volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="41023e92-8008-4c8b-8059-7f2293ff3775",
name='test_volume',
status='available',
size=40,
display_name='Volume name',
created_at='2012-04-01 10:30:00',
volume_type=None,
attachments=[]))
nameless_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
name='',
status='in-use',
size=10,
display_name='',
display_description='',
device="/dev/hda",
created_at='2010-11-21 18:34:25',
volume_type='vol_type_1',
attachments=[{"id": "1", "server_id": '1',
"device": "/dev/hda"}]))
attached_volume = volumes.Volume(volumes.VolumeManager(None),
dict(id="8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
name='my_volume',
status='in-use',
size=30,
display_name='My Volume',
display_description='',
device="/dev/hdk",
created_at='2011-05-01 11:54:33',
volume_type='vol_type_2',
attachments=[{"id": "2", "server_id": '1',
"device": "/dev/hdk"}]))
TEST.volumes.add(volume)
TEST.volumes.add(nameless_volume)
TEST.volumes.add(attached_volume)
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 1,
'name': 'vol_type_1'})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': 2,
'name': 'vol_type_2'})
TEST.volume_types.add(vol_type1, vol_type2)
# Flavors
flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'name': 'm1.tiny',
'vcpus': 1,
'disk': 0,
'ram': 512,
'swap': 0,
'extra_specs': {},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 0})
flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'name': 'm1.massive',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {'Trusted': True, 'foo': 'bar'},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 2048})
flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
'name': 'm1.secret',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'extra_specs': {},
'os-flavor-access:is_public': False,
'OS-FLV-EXT-DATA:ephemeral': 2048})
TEST.flavors.add(flavor_1, flavor_2, flavor_3)
flavor_access_manager = flavor_access.FlavorAccessManager(None)
flavor_access_1 = flavor_access.FlavorAccess(flavor_access_manager,
{"tenant_id": "1",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
flavor_access_2 = flavor_access.FlavorAccess(flavor_access_manager,
{"tenant_id": "2",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
TEST.flavor_access.add(flavor_access_1, flavor_access_2)
# Keypairs
keypair = keypairs.Keypair(keypairs.KeypairManager(None),
dict(name='keyName'))
TEST.keypairs.add(keypair)
# Security Groups and Rules
def generate_security_groups(is_uuid=False):
def get_id(is_uuid):
global current_int_id
if is_uuid:
return str(uuid.uuid4())
else:
get_id.current_int_id += 1
return get_id.current_int_id
get_id.current_int_id = 0
sg_manager = sec_groups.SecurityGroupManager(None)
rule_manager = rules.SecurityGroupRuleManager(None)
sec_group_1 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"default",
"description": u"default"})
sec_group_2 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"other_group",
"description": u"NotDefault."})
sec_group_3 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"another_group",
"description": u"NotDefault."})
rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
icmp_rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"icmp",
'from_port': u"9",
'to_port': u"5",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
group_rule = {'id': 3,
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'source_group_id': sec_group_1.id}
rule_obj = rules.SecurityGroupRule(rule_manager, rule)
rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)
sec_group_1.rules = [rule_obj]
sec_group_2.rules = [rule_obj]
return {"rules": [rule_obj, rule_obj2, rule_obj3],
"groups": [sec_group_1, sec_group_2, sec_group_3]}
sg_data = generate_security_groups()
TEST.security_group_rules.add(*sg_data["rules"])
TEST.security_groups.add(*sg_data["groups"])
sg_uuid_data = generate_security_groups(is_uuid=True)
TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
TEST.security_groups_uuid.add(*sg_uuid_data["groups"])
# Quota Sets
quota_data = dict(metadata_items='1',
injected_file_content_bytes='1',
volumes='1',
gigabytes='1000',
ram=10000,
floating_ips='1',
fixed_ips='10',
instances='10',
injected_files='1',
cores='10',
security_groups='10',
security_group_rules='20')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.quotas.nova = base.QuotaSet(quota)
TEST.quotas.add(base.QuotaSet(quota))
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'ram': {'used': 0,
'quota': 10000},
'cores': {'used': 0,
'quota': 20},
'floating_ips': {'used': 0,
'quota': 10},
'volumes': {'used': 0,
'quota': 10}}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.quota_usages.add(quota_usage)
# Limits
limits = {"absolute": {"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 10000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0}}
TEST.limits = limits
# Servers
tenant3 = TEST.tenants.list()[2]
vals = {"host": "http://nova.example.com:8774",
"name": "server_1",
"status": "ACTIVE",
"tenant_id": TEST.tenants.first().id,
"user_id": TEST.user.id,
"server_id": "1",
"flavor_id": flavor_1.id,
"image_id": TEST.images.first().id,
"key_name": keypair.name}
server_1 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_2",
"status": "BUILD",
"server_id": "2"})
server_2 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": u'\u4e91\u89c4\u5219',
"status": "ACTIVE",
"tenant_id": tenant3.id,
"server_id": "3"})
server_3 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
TEST.servers.add(server_1, server_2, server_3)
# VNC Console Data
console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
u'type': u'novnc'}}
TEST.servers.vnc_console_data = console
# SPICE Console Data
console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
u'type': u'spice'}}
TEST.servers.spice_console_data = console
# Floating IPs
def generate_fip(conf):
return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
conf)
fip_1 = {'id': 1,
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_2 = {'id': 2,
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))
TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
nova.FloatingIp(generate_fip(fip_2)))
# Floating IP with UUID id (for Floating IP with Neutron Proxy)
fip_3 = {'id': str(uuid.uuid4()),
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_4 = {'id': str(uuid.uuid4()),
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))
TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
nova.FloatingIp(generate_fip(fip_4)))
# Usage
usage_vals = {"tenant_id": TEST.tenant.id,
"instance_name": server_1.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_vals))
TEST.usages.add(usage_obj)
usage_2_vals = {"tenant_id": tenant3.id,
"instance_name": server_3.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj_2 = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_2_vals))
TEST.usages.add(usage_obj_2)
volume_snapshot = vol_snaps.Snapshot(vol_snaps.SnapshotManager(None),
{'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
'display_name': 'test snapshot',
'display_description': 'vol snap!',
'size': 40,
'status': 'available',
'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
TEST.volume_snapshots.add(volume_snapshot)
cert_data = {'private_key': 'private',
'data': 'certificate_data'}
certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
TEST.certs.add(certificate)
# Availability Zones
TEST.availability_zones.add(
availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{
'zoneName': 'nova',
'zoneState': {'available': True},
'hosts': {
"host001": {
"nova-network": {
"active": True,
"available": True
}
}
}
}
)
)
# hypervisors
hypervisor_1 = hypervisors.Hypervisor(hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack001", "id": 3},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 1
}
)
TEST.hypervisors.add(hypervisor_1)
TEST.hypervisors.stats = {
"hypervisor_statistics": {
"count": 5,
"vcpus_used": 3,
"local_gb_used": 15,
"memory_mb": 483310,
"current_workload": 0,
"vcpus": 160,
"running_vms": 3,
"free_disk_gb": 12548,
"disk_available_least": 12556,
"local_gb": 12563,
"free_ram_mb": 428014,
"memory_mb_used": 55296
}
}
# Services
service_1 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None
}
)
service_2 = services.Service(services.ServiceManager(None),
{
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None
}
)
TEST.services.add(service_1)
TEST.services.add(service_2)
# Aggregates
aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "foo",
"availability_zone": None,
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 1,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None),
{
"name": "bar",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 2,
"metadata": {
"foo": "testing",
"bar": "testing"
}
}
)
TEST.aggregates.add(aggregate_1)
TEST.aggregates.add(aggregate_2)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.basic_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import sys
# TODO(jart): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes # pylint: disable=g-import-not-at-top
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
# pylint: disable=g-import-not-at-top
import numpy as np
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class BasicDecoderTest(test.TestCase):
def _testStepWithTrainingHelper(self, use_output_layer):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
output_layer_depth = 3
with self.test_session() as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
helper = helper_py.TrainingHelper(
inputs, sequence_length, time_major=False)
if use_output_layer:
output_layer = layers_core.Dense(output_layer_depth, use_bias=False)
expected_output_depth = output_layer_depth
else:
output_layer = None
expected_output_depth = cell_depth
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size),
output_layer=output_layer)
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(expected_output_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, expected_output_depth),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
if use_output_layer:
# The output layer was accessed
self.assertEqual(len(output_layer.variables), 1)
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
self.assertAllEqual(
np.argmax(sess_results["step_outputs"].rnn_output, -1),
sess_results["step_outputs"].sample_id)
def testStepWithTrainingHelperNoOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=False)
def testStepWithTrainingHelperWithOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=True)
def testStepWithGreedyEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
start_tokens = [0] * batch_size
end_token = 1
with self.test_session() as sess:
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.GreedyEmbeddingHelper(embeddings, start_tokens,
end_token)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
expected_sample_ids = np.argmax(
sess_results["step_outputs"].rnn_output, -1)
expected_step_finished = (expected_sample_ids == end_token)
expected_step_next_inputs = embeddings[expected_sample_ids]
self.assertAllEqual([False, False, False, False, False],
sess_results["first_finished"])
self.assertAllEqual(expected_step_finished, sess_results["step_finished"])
self.assertAllEqual(expected_sample_ids,
sess_results["step_outputs"].sample_id)
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithScheduledEmbeddingTrainingHelper(self):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
vocabulary_size = 10
with self.test_session() as sess:
inputs = np.random.randn(
batch_size, max_time, input_depth).astype(np.float32)
embeddings = np.random.randn(
vocabulary_size, input_depth).astype(np.float32)
half = constant_op.constant(0.5)
cell = core_rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
embedding=embeddings,
sampling_probability=half,
time_major=False)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(vocabulary_size,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, vocabulary_size),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[1].get_shape())
self.assertEqual((batch_size, input_depth),
step_next_inputs.get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
batch_where_not_sampling = np.where(sample_ids == -1)
batch_where_sampling = np.where(sample_ids > -1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
embeddings[sample_ids[batch_where_sampling]])
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.squeeze(inputs[batch_where_not_sampling, 1]))
if __name__ == "__main__":
test.main()
|
|
# Classes for scrAPI Harvesters
from __future__ import unicode_literals
import abc
import json
import logging
from datetime import timedelta, date
import six
from furl import furl
from lxml import etree
from scrapi import registry
from scrapi import settings
from scrapi.base.schemas import OAISCHEMA
from scrapi.linter.document import RawDocument, NormalizedDocument
from scrapi.base.transformer import XMLTransformer, JSONTransformer
from scrapi.base.helpers import (
updated_schema,
build_properties,
oai_get_records_and_token,
compose,
datetime_formatter,
null_on_error,
coerce_to_list
)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
etree.set_default_parser(etree.XMLParser(recover=True))
class HarvesterMeta(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(HarvesterMeta, cls).__init__(name, bases, dct)
if len(cls.__abstractmethods__) == 0 and cls.short_name not in settings.disabled:
registry[cls.short_name] = cls()
else:
logger.info('Class {} not added to registry'.format(cls.__name__))
@six.add_metaclass(HarvesterMeta)
class BaseHarvester(object):
""" This is a base class that all harvesters should inheret from
Defines the copy to unicode method, which is useful for getting standard
unicode out of xml results.
"""
@abc.abstractproperty
def short_name(self):
raise NotImplementedError
@abc.abstractproperty
def long_name(self):
raise NotImplementedError
@abc.abstractproperty
def url(self):
raise NotImplementedError
@abc.abstractproperty
def file_format(self):
raise NotImplementedError
@abc.abstractmethod
def harvest(self, start_date=None, end_date=None):
raise NotImplementedError
@abc.abstractmethod
def normalize(self, raw_doc):
raise NotImplementedError
@property
def run_at(self):
return {
'hour': 22,
'minute': 59,
'day_of_week': 'mon-sun',
}
class JSONHarvester(BaseHarvester, JSONTransformer):
file_format = 'json'
def normalize(self, raw_doc):
transformed = self.transform(json.loads(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name,
'docID': raw_doc['docID'],
'filetype': raw_doc['filetype']
}
return NormalizedDocument(transformed, clean=True)
class XMLHarvester(BaseHarvester, XMLTransformer):
file_format = 'xml'
def normalize(self, raw_doc):
transformed = self.transform(etree.XML(raw_doc['doc']), fail=settings.RAISE_IN_TRANSFORMER)
transformed['shareProperties'] = {
'source': self.short_name,
'docID': raw_doc['docID'],
'filetype': raw_doc['filetype']
}
return NormalizedDocument(transformed, clean=True)
class OAIHarvester(XMLHarvester):
""" Create a harvester with a oai_dc namespace, that will harvest
documents within a certain date range
Contains functions for harvesting from an OAI provider, normalizing,
and outputting in a way that scrapi can understand, in the most
generic terms possible.
For more information, see the OAI PMH specification:
http://www.openarchives.org/OAI/openarchivesprotocol.html
"""
record_encoding = None
DEFAULT_ENCODING = 'UTF-8'
RESUMPTION = '&resumptionToken='
RECORDS_URL = '?verb=ListRecords'
META_PREFIX_DATE = '&metadataPrefix=oai_dc&from={}&until={}'
# Override these variable is required
namespaces = {
'dc': 'http://purl.org/dc/elements/1.1/',
'ns0': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/',
}
timeout = 0.5
approved_sets = None
timezone_granularity = False
property_list = ['date', 'type']
force_request_update = False
verify = True
@property
def schema(self):
return self._schema
@property
def _schema(self):
return updated_schema(OAISCHEMA, self.formatted_properties)
@property
def formatted_properties(self):
return {
'otherProperties': build_properties(*map(self.format_property, self.property_list))
}
def format_property(self, property):
if property == 'date':
fn = compose(lambda x: map(null_on_error(datetime_formatter), x), coerce_to_list, self.resolve_property)
else:
fn = self.resolve_property
return (property, (
'//dc:{}/node()'.format(property),
'//ns0:{}/node()'.format(property),
fn)
)
def resolve_property(self, dc, ns0):
ret = dc + ns0
return ret[0] if len(ret) == 1 else ret
def harvest(self, start_date=None, end_date=None):
start_date = (start_date or date.today() - timedelta(settings.DAYS_BACK)).isoformat()
end_date = (end_date or date.today()).isoformat()
if self.timezone_granularity:
start_date += 'T00:00:00Z'
end_date += 'T00:00:00Z'
url = furl(self.base_url)
url.args['verb'] = 'ListRecords'
url.args['metadataPrefix'] = 'oai_dc'
url.args['from'] = start_date
url.args['until'] = end_date
records = self.get_records(url.url, start_date, end_date)
return [
RawDocument({
'doc': etree.tostring(record, encoding=self.record_encoding),
'source': self.short_name,
'docID': record.xpath('ns0:header/ns0:identifier', namespaces=self.namespaces)[0].text,
'filetype': 'xml'
}) for record in records
]
def get_records(self, url, start_date, end_date=None):
url = furl(url)
all_records, token = oai_get_records_and_token(url.url, self.timeout, self.force_request_update, self.namespaces, self.verify)
while token:
url.remove('from')
url.remove('until')
url.remove('metadataPrefix')
url.args['resumptionToken'] = token[0]
records, token = oai_get_records_and_token(url.url, self.timeout, self.force_request_update, self.namespaces, self.verify)
all_records += records
return all_records
def normalize(self, raw_doc):
str_result = raw_doc.get('doc')
result = etree.XML(str_result)
if self.approved_sets:
set_spec = result.xpath(
'ns0:header/ns0:setSpec/node()',
namespaces=self.namespaces
)
# check if there's an intersection between the approved sets and the
# setSpec list provided in the record. If there isn't, don't normalize.
if not {x.replace('publication:', '') for x in set_spec}.intersection(self.approved_sets):
logger.info('Series {} not in approved list'.format(set_spec))
return None
status = result.xpath('ns0:header/@status', namespaces=self.namespaces)
if status and status[0] == 'deleted':
logger.info('Deleted record, not normalizing {}'.format(raw_doc['docID']))
return None
return super(OAIHarvester, self).normalize(raw_doc)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.calvinsys import CalvinSys
from calvin.actorstore.store import ActorStore
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCB
_log = get_logger(__name__)
def log_callback(reply, **kwargs):
if reply:
_log.info("%s: %s" % (kwargs['prefix'], reply))
class ActorManager(object):
"""docstring for ActorManager"""
def __init__(self, node):
super(ActorManager, self).__init__()
self.actors = {}
self.node = node
def new(self, actor_type, args, state=None, prev_connections=None, connection_list=None, callback=None):
"""
Instantiate an actor of type 'actor_type'. Parameters are passed in 'args',
'name' is an optional parameter in 'args', specifying a human readable name.
Returns actor id on success and raises an exception if anything goes wrong.
Optionally applies a serialized state to the actor, the supplied args are ignored and args from state
is used instead.
Optionally reconnecting the ports, using either
1) an unmodified connections structure obtained by the connections command supplied as
prev_connections or,
2) a mangled list of tuples with (in_node_id, in_port_id, out_node_id, out_port_id) supplied as
connection_list
"""
# When renewing (e.g. after migrate) apply the args from the state
# instead of any directly supplied
_log.debug("class: %s args: %s state: %s", actor_type, args, state)
try:
if state:
a = self._new_from_state(actor_type, state)
else:
a = self._new(actor_type, args)
except Exception as e:
_log.exception("Actor creation failed")
raise(e)
self.actors[a.id] = a
self.node.storage.add_actor(a, self.node.id)
if prev_connections:
# Convert prev_connections to connection_list format
connection_list = self._prev_connections_to_connection_list(prev_connections)
if connection_list:
# Migrated actor
self.connect(a.id, connection_list, callback=callback)
else:
# Nothing to connect then we are OK
if callback:
callback(status='ACK', actor_id=a.id)
else:
return a.id
def _new_actor(self, actor_type):
"""Return a 'bare' actor of actor_type, raises an exception on failure."""
(found, is_primitive, class_) = ActorStore().lookup(actor_type)
if not found or not is_primitive:
_log.error("Requested actor %s is not available" % (actor_type))
raise Exception("ERROR_NOT_FOUND")
try:
# Create a 'bare' instance of the actor
a = class_(actor_type)
# FIXME: Resolve the required (calvin) APIs and attach them to the actor
# (if it has the required access rights)
a.attach_API("calvinsys", CalvinSys(self.node))
except Exception as e:
_log.exception("")
_log.error("The actor %s(%s) can't be instantiated." % (actor_type, class_.__init__))
raise(e)
return a
def _new(self, actor_type, args):
"""Return an initialized actor in PENDING state, raises an exception on failure."""
try:
a = self._new_actor(actor_type)
# Now that required APIs are attached we can call init() which may use the APIs
human_readable_name = args.pop('name', '')
a.name = human_readable_name
self.node.pm.add_ports_of_actor(a)
a.init(**args)
a.setup_complete()
except Exception as e:
#_log.exception(e)
raise(e)
return a
def _new_from_state(self, actor_type, state):
"""Return an restored actor in PENDING state, raises an exception on failure."""
try:
a = self._new_actor(actor_type)
a.set_state(state)
self.node.pm.add_ports_of_actor(a)
a.did_migrate()
a.setup_complete()
except Exception as e:
raise(e)
return a
def destroy(self, actor_id):
# @TOOD - check order here
a = self.actors[actor_id]
a.will_end()
self.node.pm.remove_ports_of_actor(a)
self.node.storage.delete_actor(actor_id)
del self.actors[actor_id]
# DEPRECATED: Enabling of an actor is dependent on wether it's connected or not
def enable(self, actor_id):
if actor_id in self.actors:
self.actors[actor_id].enable()
# DEPRECATED: Disabling of an actor is dependent on wether it's connected or not
def disable(self, actor_id):
if actor_id in self.actors:
self.actors[actor_id].disable()
else:
_log.info("!!!FAILED to disable %s", actor_id)
def migrate(self, actor_id, node_id, callback = None):
""" Migrate an actor actor_id to peer node node_id """
if actor_id not in self.actors:
return
actor = self.actors[actor_id]
actor.will_migrate()
actor_type = actor._type
ports = actor.connections(self.node.id)
# Disconnect ports and continue in _migrate_disconnect
self.node.pm.disconnect(callback=CalvinCB(self._migrate_disconnected,
actor=actor,
actor_type=actor_type,
ports=ports,
node_id=node_id,
callback=callback),
actor_id=actor_id)
def _migrate_disconnected(self, actor, actor_type, ports, node_id, status=None, callback = None, **state):
""" Actor disconnected, continue migration """
if status == 'ACK':
state = actor.state()
self.destroy(actor.id)
self.node.proto.actor_new(node_id, callback, actor_type, state, ports)
else:
# FIXME handle errors!!!
if callback:
callback(status=status)
def peernew_to_local_cb(self, reply, **kwargs):
if kwargs['actor_id'] == reply:
# Managed to setup since new returned same actor id
self.node.set_local_reply(kwargs['lmsg_id'], "OK")
else:
# Just pass on new cmd reply if it failed
self.node.set_local_reply(kwargs['lmsg_id'], reply)
def _prev_connections_to_connection_list(self, prev_connections):
"""Convert prev_connection format to connection_list format"""
cl = []
for in_port_id, out_id in prev_connections['inports'].iteritems():
cl.append((self.node.id, in_port_id, out_id[0], out_id[1]))
for out_port_id, in_list in prev_connections['outports'].iteritems():
for in_id in in_list:
cl.append((self.node.id, out_port_id, in_id[0], in_id[1]))
return cl
def connect(self, actor_id, connection_list, callback=None):
"""
Reconnecting the ports can be done using a connection_list
of tuples (node_id i.e. our id, port_id, peer_node_id, peer_port_id)
"""
if actor_id not in self.actors:
return
peer_port_ids = [c[3] for c in connection_list]
for node_id, port_id, peer_node_id, peer_port_id in connection_list:
self.node.pm.connect(port_id=port_id,
peer_node_id=peer_node_id,
peer_port_id=peer_port_id,
callback=CalvinCB(self._actor_connected,
peer_port_id=peer_port_id,
actor_id=actor_id,
peer_port_ids=peer_port_ids,
_callback=callback))
def _actor_connected(self, status, peer_port_id, actor_id, peer_port_ids, _callback, **kwargs):
""" Get called for each of the actor's ports when connecting, but callback should only be called once
status: 'ACK'/'NACK'
_callback: original callback
peer_port_ids: list of port ids kept in context between calls when *changed* by this function,
do not replace it
"""
# Send NACK if not already done it
if status == "NACK" and peer_port_ids:
if _callback:
del peer_port_ids[:]
_callback(status="NACK", actor_id=actor_id)
if peer_port_id in peer_port_ids:
# Remove this port from list
peer_port_ids.remove(peer_port_id)
# If all ports done send ACK
if not peer_port_ids:
if _callback:
_callback(status="ACK", actor_id=actor_id)
def connections(self, actor_id):
return self.actors.get(actor_id, None).connections(self.node.id)
def dump(self, actor_id):
actor = self.actors.get(actor_id, None)
if not actor:
raise Exception("Actor '%s' not found" % (actor_id,))
_log.debug("-----------")
_log.debug(actor)
_log.debug("-----------")
def set_port_property(self, actor_id, port_type, port_name, port_property, value):
try:
actor = self.actors[actor_id]
except Exception as e:
_log.exception("Actor '%s' not found" % (actor_id,))
raise e
success = actor.set_port_property(port_type, port_name, port_property, value)
return 'OK' if success else 'FAILURE'
def actor_type(self, actor_id):
actor = self.actors.get(actor_id, None)
return actor._type if actor else 'BAD ACTOR'
def report(self, actor_id):
return self.actors.get(actor_id, None).report()
def enabled_actors(self):
return [actor for actor in self.actors.values() if actor.enabled()]
def list_actors(self):
return self.actors.keys()
|
|
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource
from ..plotting import figure
from ..plotting_helpers import _get_select_tool
def cross(start, facets):
"""Creates a unique combination of provided facets.
A cross product of an initial set of starting facets with a new set of
facets.
Args:
start (list): List of lists of facets
facets (list): List of facets
Returns:
list: a list of lists of unique combinations of facets
"""
new = [[facet] for facet in facets]
result = []
for x in start:
for n in new:
result.append(x + n)
return result
def hide_axes(plot, axes=('x', 'y')):
"""Hides the axes of the plot by setting component alphas.
Args:
plot (Figure): a valid figure with x and y axes
axes (tuple or list or str, optional): the axes to hide the axis on.
"""
if isinstance(axes, str):
axes = tuple(axes)
for label in axes:
axis = getattr(plot, label + 'axis')
axis = axis[0]
axis.major_label_text_alpha = 0.0
axis.major_label_text_font_size = '0pt'
axis.axis_line_alpha = 0.0
axis.major_tick_line_alpha = 0.0
axis.minor_tick_line_alpha = 0.0
plot.min_border = 0
def make_histogram_source(series):
"""Creates a ColumnDataSource containing the bins of the input series.
Args:
series (:py:class:`~pandas.Series`): description
Returns:
ColumnDataSource: includes bin centers with count of items in the bins
"""
counts, bins = np.histogram(series, bins=50)
centers = pd.rolling_mean(bins, 2)[1:]
return ColumnDataSource(data={'counts': counts, 'centers': centers})
def make_continuous_bar_source(df, x_field, y_field, agg):
"""Makes discrete, then creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str): the column in df that maps to the y dim of the plot
agg (str): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
# Generate dataframe required to use the categorical bar source function
labels, edges = pd.cut(x=df[x_field], bins=20, retbins=True, labels=False)
centers = pd.rolling_mean(edges, 2)[1:]
# store new value of x as the bin it fell into
df[x_field] = centers[labels]
# After making it discrete, create the categorical bar source
return make_categorical_bar_source(df, x_field, y_field, agg)
def make_categorical_bar_source(df, x_field, y_field, agg):
"""Creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str): the column in df that maps to the y dim of the plot
agg (str): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
# Get the y values after grouping by the x values
group = df.groupby(x_field)[y_field]
aggregate = getattr(group, agg)
# Convert back to a DataFrame on the aggregated data
result = aggregate().reset_index()
return ColumnDataSource(data=result)
def make_factor_source(series):
"""Generate data source that is based on the unique values in the series.
Args:
series (:py:class:`~pandas.Series`): contains categorical-like data
Returns:
ColumnDataSource: contains the unique values from the series
"""
return ColumnDataSource(data={'factors': series.unique()})
def make_bar_plot(datasource, counts_name="counts",
centers_name="centers",
bar_width=0.7,
x_range=None,
y_range=None,
plot_width=500, plot_height=500,
tools="pan,wheel_zoom,box_zoom,save,resize,box_select,reset",
title_text_font_size="12pt"):
"""Utility function to set/calculate default parameters of a bar plot.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
bar_width (float): the width of the bars in the bar plot
x_range (list): list of two values, the min and max of the x axis range
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: plot generated from the provided parameters
"""
top = np.max(datasource.data[counts_name])
# Create the figure container
plot = figure(
title="", title_text_font_size=title_text_font_size,
plot_width=plot_width, plot_height=plot_height,
x_range=x_range, y_range=[0, top], tools=tools)
# Get the bar values
y = [val/2.0 for val in datasource.data[counts_name]]
# Generate the bars in the figure
plot.rect(centers_name, y, bar_width, counts_name, source=datasource)
plot.min_border = 0
plot.h_symmetry = False
plot.v_symmetry = False
select_tool = _get_select_tool(plot)
if select_tool:
select_tool.dimensions = ['width']
return plot
def make_histogram(datasource,
counts_name="counts",
centers_name="centers",
x_range=None,
bar_width=0.7,
plot_width=500,
plot_height=500,
min_border=40,
tools=None,
title_text_font_size="12pt"):
"""Utility function to create a histogram figure.
This is used to create the filter widgets for continuous data in
CrossFilter.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
x_range (list): list of two values, the min and max of the x axis range
bar_width (float): the width of the bars in the bar plot
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
min_border (float): minimum border width of figure in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: histogram plot generated from the provided parameters
"""
start = np.min(datasource.data[centers_name]) - bar_width
end = np.max(datasource.data[centers_name]) - bar_width
plot = make_bar_plot(
datasource, counts_name=counts_name, centers_name=centers_name,
x_range=[start, end], plot_width=plot_width, plot_height=plot_height,
tools=tools, title_text_font_size=title_text_font_size)
return plot
|
|
import py
from rpython.rlib.jit import JitDriver, hint, set_param, dont_look_inside,\
elidable
from rpython.rlib.objectmodel import compute_hash
from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp import history
class LoopTest(object):
enable_opts = ''
automatic_promotion_result = {
'int_add' : 6, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1,
'guard_value' : 3
}
def meta_interp(self, f, args, policy=None, backendopt=False):
return ll_meta_interp(f, args, enable_opts=self.enable_opts,
policy=policy,
CPUClass=self.CPUClass,
backendopt=backendopt)
def run_directly(self, f, args):
return f(*args)
def test_simple_loop(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x
y -= 1
return res * 2
res = self.meta_interp(f, [6, 7])
assert res == 84
self.check_trace_count(1)
def test_loop_with_delayed_setfield(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res', 'a'])
class A(object):
def __init__(self):
self.x = 3
def f(x, y):
res = 0
a = A()
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res, a=a)
myjitdriver.jit_merge_point(x=x, y=y, res=res, a=a)
a.x = y
if y < 3:
return a.x
res += a.x
y -= 1
return res * 2
res = self.meta_interp(f, [6, 13])
assert res == f(6, 13)
self.check_trace_count(1)
if self.enable_opts:
self.check_resops(setfield_gc=2, getfield_gc_i=0)
def test_loop_with_two_paths(self):
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def l(y, x, t):
llop.debug_print(lltype.Void, y, x, t)
def g(y, x, r):
if y <= 12:
res = x - 2
else:
res = x
l(y, x, r)
return res
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += g(y, x, res)
y -= 1
return res * 2
res = self.meta_interp(f, [6, 33], policy=StopAtXPolicy(l))
assert res == f(6, 33)
if self.enable_opts:
self.check_trace_count(2)
else:
self.check_trace_count(2)
def test_alternating_loops(self):
myjitdriver = JitDriver(greens = [], reds = ['pattern'])
def f(pattern):
while pattern > 0:
myjitdriver.can_enter_jit(pattern=pattern)
myjitdriver.jit_merge_point(pattern=pattern)
if pattern & 1:
pass
else:
pass
pattern >>= 1
return 42
self.meta_interp(f, [0xF0F0F0])
if self.enable_opts:
self.check_trace_count(3)
else:
self.check_trace_count(2)
def test_interp_simple(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y'])
bytecode = "bedca"
def f(x, y):
i = 0
while i < len(bytecode):
myjitdriver.can_enter_jit(i=i, x=x, y=y)
myjitdriver.jit_merge_point(i=i, x=x, y=y)
op = bytecode[i]
if op == 'a':
x += 3
elif op == 'b':
x += 1
elif op == 'c':
x -= y
elif op == 'd':
y += y
else:
y += 1
i += 1
return x
res = self.meta_interp(f, [100, 30])
assert res == 42
self.check_trace_count(0)
def test_green_prevents_loop(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y'])
bytecode = "+--+++++----"
def f(x, y):
i = 0
while i < len(bytecode):
myjitdriver.can_enter_jit(i=i, x=x, y=y)
myjitdriver.jit_merge_point(i=i, x=x, y=y)
op = bytecode[i]
if op == '+':
x += y
else:
y += 1
i += 1
return x
res = self.meta_interp(f, [100, 5])
assert res == f(100, 5)
self.check_trace_count(0)
def test_interp_single_loop(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y'])
bytecode = "abcd"
def f(x, y):
i = 0
while i < len(bytecode):
myjitdriver.jit_merge_point(i=i, x=x, y=y)
op = bytecode[i]
if op == 'a':
x += y
elif op == 'b':
y -= 1
elif op == 'c':
if y:
i = 0
myjitdriver.can_enter_jit(i=i, x=x, y=y)
continue
else:
x += 1
i += 1
return x
res = self.meta_interp(f, [5, 8])
assert res == 42
self.check_trace_count(1)
# the 'int_eq' and following 'guard' should be constant-folded
if 'unroll' in self.enable_opts:
self.check_resops(int_eq=0, guard_true=2, guard_false=0)
else:
self.check_resops(int_eq=0, guard_true=1, guard_false=0)
if self.basic:
found = 0
for op in get_stats().loops[0]._all_operations():
if op.getopname() == 'guard_true':
liveboxes = op.getfailargs()
assert len(liveboxes) == 2 # x, y (in some order)
assert liveboxes[0].type == 'i'
assert liveboxes[1].type == 'i'
found += 1
if 'unroll' in self.enable_opts:
assert found == 2
else:
assert found == 1
def test_interp_many_paths(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node'])
NODE = self._get_NODE()
bytecode = "xxxxxxxb"
def f(node):
x = 0
i = 0
while i < len(bytecode):
myjitdriver.jit_merge_point(i=i, x=x, node=node)
op = bytecode[i]
if op == 'x':
if not node:
break
if node.value < 100: # a pseudo-random choice
x += 1
node = node.next
elif op == 'b':
i = 0
myjitdriver.can_enter_jit(i=i, x=x, node=node)
continue
i += 1
return x
node1 = self.nullptr(NODE)
for i in range(300):
prevnode = self.malloc(NODE)
prevnode.value = pow(47, i, 199)
prevnode.next = node1
node1 = prevnode
expected = f(node1)
res = self.meta_interp(f, [node1])
assert res == expected
self.check_trace_count_at_most(19)
def test_interp_many_paths_2(self):
import sys
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(10000)
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node'])
NODE = self._get_NODE()
bytecode = "xxxxxxxb"
def can_enter_jit(i, x, node):
myjitdriver.can_enter_jit(i=i, x=x, node=node)
def f(node):
x = 0
i = 0
while i < len(bytecode):
myjitdriver.jit_merge_point(i=i, x=x, node=node)
op = bytecode[i]
if op == 'x':
if not node:
break
if node.value < 100: # a pseudo-random choice
x += 1
node = node.next
elif op == 'b':
i = 0
can_enter_jit(i, x, node)
continue
i += 1
return x
node1 = self.nullptr(NODE)
for i in range(300):
prevnode = self.malloc(NODE)
prevnode.value = pow(47, i, 199)
prevnode.next = node1
node1 = prevnode
expected = f(node1)
res = self.meta_interp(f, [node1])
assert res == expected
self.check_trace_count_at_most(19)
finally:
sys.setrecursionlimit(oldlimit)
def test_nested_loops(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y'])
bytecode = "abc<de"
def f(x, y):
i = 0
op = '-'
while True:
myjitdriver.jit_merge_point(i=i, x=x, y=y)
op = bytecode[i]
if op == 'a':
x += 1
elif op == 'b':
x += y
elif op == 'c':
y -= 1
elif op == '<':
if y:
i -= 2
myjitdriver.can_enter_jit(i=i, x=x, y=y)
continue
elif op == 'd':
y = x
elif op == 'e':
if x > 1000:
break
else:
i = 0
myjitdriver.can_enter_jit(i=i, x=x, y=y)
continue
i += 1
return x
expected = f(2, 3)
res = self.meta_interp(f, [2, 3])
assert res == expected
def test_loop_in_bridge1(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y', 'res'])
bytecode = "abs>cxXyY"
def f(y):
res = x = 0
i = 0
op = '-'
while i < len(bytecode):
myjitdriver.jit_merge_point(i=i, x=x, y=y, res=res)
op = bytecode[i]
if op == 'a':
res += 1
elif op == 'b':
res += 10
elif op == 'c':
res += 10000
elif op == 's':
x = y
elif op == 'y':
y -= 1
elif op == 'Y':
if y:
i = 1
myjitdriver.can_enter_jit(i=i, x=x, y=y, res=res)
continue
elif op == 'x':
x -= 1
elif op == 'X':
if x > 0:
i -= 2
myjitdriver.can_enter_jit(i=i, x=x, y=y, res=res)
continue
elif op == '>':
if y > 6:
i += 4
continue
i += 1
return res
expected = f(12)
res = self.meta_interp(f, [12])
print res
assert res == expected
def test_nested_loops_discovered_by_bridge(self):
# This is an bytecode implementation of the loop below. With
# threshold=3 the first trace produced will start with a failing
# test j <= i from the inner loop followed by one iteration of the
# outer loop followed by one iteration of the inner loop. A bridge
# is then created by tracing the inner loop again.
#
# i = j = x = 0
# while i < n:
# j = 0
# while j <= i:
# j = j + 1
# x = x + (i&j)
# i = i + 1
myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x'])
bytecode = "IzJxji"
def f(n, threshold):
set_param(myjitdriver, 'threshold', threshold)
i = j = x = 0
pos = 0
op = '-'
while pos < len(bytecode):
myjitdriver.jit_merge_point(pos=pos, i=i, j=j, n=n, x=x)
op = bytecode[pos]
if op == 'z':
j = 0
elif op == 'i':
i += 1
pos = 0
myjitdriver.can_enter_jit(pos=pos, i=i, j=j, n=n, x=x)
continue
elif op == 'j':
j += 1
pos = 2
myjitdriver.can_enter_jit(pos=pos, i=i, j=j, n=n, x=x)
continue
elif op == 'I':
if not (i < n):
pos = 5
elif op == 'J':
if not (j <= i):
pos = 4
elif op == 'x':
x = x + (i&j)
pos += 1
return x
for th in (3, 1, 2, 4, 5): # Start with the interesting case
expected = f(25, th)
res = self.meta_interp(f, [25, th])
assert res == expected
def test_nested_loops_discovered_by_bridge_virtual(self):
# Same loop as above, but with virtuals
class A:
def __init__(self, val):
self.val = val
def add(self, val):
return A(self.val + val)
myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x'])
bytecode = "IzJxji"
def f(nval, threshold):
set_param(myjitdriver, 'threshold', threshold)
i, j, x = A(0), A(0), A(0)
n = A(nval)
pos = 0
op = '-'
while pos < len(bytecode):
myjitdriver.jit_merge_point(pos=pos, i=i, j=j, n=n, x=x)
op = bytecode[pos]
if op == 'z':
j = A(0)
elif op == 'i':
i = i.add(1)
pos = 0
myjitdriver.can_enter_jit(pos=pos, i=i, j=j, n=n, x=x)
continue
elif op == 'j':
j = j.add(1)
pos = 2
myjitdriver.can_enter_jit(pos=pos, i=i, j=j, n=n, x=x)
continue
elif op == 'I':
if not (i.val < n.val):
pos = 5
elif op == 'J':
if not (j.val <= i.val):
pos = 4
elif op == 'x':
x = x.add(i.val & j.val)
pos += 1
return x.val
for th in (5, 3, 1, 2, 4): # Start with the interesting case
expected = f(25, th)
res = self.meta_interp(f, [25, th])
assert res == expected
def test_two_bridged_loops(self):
myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 's', 'x'])
bytecode = "zI7izI8i"
def f(n, s):
i = x = 0
pos = 0
op = '-'
while pos < len(bytecode):
myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x)
op = bytecode[pos]
if op == 'z':
i = 0
if op == 'i':
i += 1
pos -= 2
myjitdriver.can_enter_jit(pos=pos, i=i, n=n, s=s, x=x)
continue
elif op == 'I':
if not (i < n):
pos += 2
elif op == '7':
if s==1:
x = x + 7
else:
x = x + 2
elif op == '8':
if s==1:
x = x + 8
else:
x = x + 3
pos += 1
return x
def g(n, s):
sa = 0
for i in range(7):
sa += f(n, s)
return sa
assert self.meta_interp(g, [25, 1]) == g(25, 1)
def h(n):
return g(n, 1) + g(n, 2)
assert self.meta_interp(h, [25]) == h(25)
def test_two_bridged_loops_classes(self):
myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's'])
class A(object):
pass
bytecode = "I7i"
def f(n, s):
i = x = 0
pos = 0
op = '-'
while pos < len(bytecode):
myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x)
op = bytecode[pos]
if op == 'i':
i += 1
pos -= 2
myjitdriver.can_enter_jit(pos=pos, i=i, n=n, s=s, x=x)
continue
elif op == 'I':
if not (i < n):
pos += 2
elif op == '7':
if s is not None:
x = x + 7
else:
x = x + 2
pos += 1
return x
def g(n, s):
if s == 2:
s = None
else:
s = A()
sa = 0
for i in range(7):
sa += f(n, s)
return sa
#assert self.meta_interp(g, [25, 1]) == g(25, 1)
def h(n):
return g(n, 1) + g(n, 2)
assert self.meta_interp(h, [25]) == h(25)
def test_three_nested_loops(self):
myjitdriver = JitDriver(greens = ['i'], reds = ['x'])
bytecode = ".+357"
def f(x):
assert x >= 0
i = 0
while i < len(bytecode):
myjitdriver.jit_merge_point(i=i, x=x)
op = bytecode[i]
if op == '+':
x += 1
elif op == '.':
pass
elif op == '3':
if x % 3 != 0:
i -= 1
myjitdriver.can_enter_jit(i=i, x=x)
continue
elif op == '5':
if x % 5 != 0:
i -= 2
myjitdriver.can_enter_jit(i=i, x=x)
continue
elif op == '7':
if x % 7 != 0:
i -= 4
myjitdriver.can_enter_jit(i=i, x=x)
continue
i += 1
return x
expected = f(0)
assert expected == 3*5*7
res = self.meta_interp(f, [0])
assert res == expected
def test_unused_loop_constant(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'z'])
def f(x, y, z):
while z > 0:
myjitdriver.can_enter_jit(x=x, y=y, z=z)
myjitdriver.jit_merge_point(x=x, y=y, z=z)
x += z
z -= 1
return x * y
expected = f(2, 6, 30)
res = self.meta_interp(f, [2, 6, 30])
assert res == expected
def test_loop_unicode(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
def f(n):
x = u''
while n > 13:
myjitdriver.can_enter_jit(n=n, x=x)
myjitdriver.jit_merge_point(n=n, x=x)
x += unichr(n)
n -= 1
return compute_hash(x)
expected = self.run_directly(f, [100])
res = self.meta_interp(f, [100])
assert res == expected
def test_loop_string(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
def f(n):
x = ''
while n > 13:
myjitdriver.can_enter_jit(n=n, x=x)
myjitdriver.jit_merge_point(n=n, x=x)
#print len(x), x
x += chr(n)
n -= 1
return compute_hash(x)
expected = self.run_directly(f, [100])
res = self.meta_interp(f, [100])
assert res == expected
def test_adapt_bridge_to_merge_point(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'z'])
class Z(object):
def __init__(self, elem):
self.elem = elem
def externfn(z):
pass
def f(x, y):
z = Z(y)
while x > 0:
myjitdriver.can_enter_jit(x=x, z=z)
myjitdriver.jit_merge_point(x=x, z=z)
if x % 5 != 0:
externfn(z)
z = Z(z.elem + 1)
x -= 1
return z.elem
expected = f(100, 5)
res = self.meta_interp(f, [100, 5], policy=StopAtXPolicy(externfn))
assert res == expected
if self.enable_opts:
self.check_trace_count(2)
self.check_jitcell_token_count(1) # 1 loop with bridge from interp
else:
self.check_trace_count(2)
self.check_jitcell_token_count(1) # 1 loop, callable from the interp
def test_example(self):
myjitdriver = JitDriver(greens = ['i'],
reds = ['res', 'a'])
CO_INCREASE = 0
CO_JUMP_BACK_3 = 1
CO_DECREASE = 2
code = [CO_INCREASE, CO_INCREASE, CO_INCREASE,
CO_JUMP_BACK_3, CO_INCREASE, CO_DECREASE]
def add(res, a):
return res + a
def sub(res, a):
return res - a
def main_interpreter_loop(a):
i = 0
res = 0
c = len(code)
while i < c:
myjitdriver.jit_merge_point(res=res, i=i, a=a)
elem = code[i]
if elem == CO_INCREASE:
res = add(res, a)
elif elem == CO_DECREASE:
res = sub(res, a)
else:
if res > 100:
pass
else:
i = i - 3
myjitdriver.can_enter_jit(res=res, i=i, a=a)
continue
i = i + 1
return res
res = self.meta_interp(main_interpreter_loop, [1])
assert res == 102
self.check_trace_count(1)
if 'unroll' in self.enable_opts:
self.check_resops({'int_add' : 6, 'int_gt' : 2,
'guard_false' : 2, 'jump' : 1})
else:
self.check_resops({'int_add' : 3, 'int_gt' : 1,
'guard_false' : 1, 'jump' : 1})
def test_automatic_promotion(self):
myjitdriver = JitDriver(greens = ['i'],
reds = ['res', 'a'])
CO_INCREASE = 0
CO_JUMP_BACK_3 = 1
code = [CO_INCREASE, CO_INCREASE, CO_INCREASE,
CO_JUMP_BACK_3, CO_INCREASE]
def add(res, a):
return res + a
def sub(res, a):
return res - a
def main_interpreter_loop(a):
i = 0
res = 0
c = len(code)
while True:
myjitdriver.jit_merge_point(res=res, i=i, a=a)
if i >= c:
break
elem = code[i]
if elem == CO_INCREASE:
i += a
res += a
else:
if res > 100:
i += 1
else:
i = i - 3
myjitdriver.can_enter_jit(res=res, i=i, a=a)
return res
res = self.meta_interp(main_interpreter_loop, [1])
assert res == main_interpreter_loop(1)
self.check_trace_count(1)
# These loops do different numbers of ops based on which optimizer we
# are testing with.
self.check_resops(self.automatic_promotion_result)
def test_can_enter_jit_outside_main_loop(self):
myjitdriver = JitDriver(greens=[], reds=['i', 'j', 'a'])
def done(a, j):
myjitdriver.can_enter_jit(i=0, j=j, a=a)
def main_interpreter_loop(a):
i = j = 0
while True:
myjitdriver.jit_merge_point(i=i, j=j, a=a)
i += 1
j += 3
if i >= 10:
a -= 1
if not a:
break
i = 0
done(a, j)
return j
assert main_interpreter_loop(5) == 5 * 10 * 3
res = self.meta_interp(main_interpreter_loop, [5])
assert res == 5 * 10 * 3
def test_outer_and_inner_loop(self):
jitdriver = JitDriver(greens = ['p', 'code'], reds = ['i', 'j',
'total'])
class Code:
def __init__(self, lst):
self.lst = lst
codes = [Code([]), Code([0, 0, 1, 1])]
def interpret(num):
code = codes[num]
p = 0
i = 0
j = 0
total = 0
while p < len(code.lst):
jitdriver.jit_merge_point(code=code, p=p, i=i, j=j, total=total)
total += i
e = code.lst[p]
if e == 0:
p += 1
elif e == 1:
if i < p * 20:
p = 3 - p
i += 1
jitdriver.can_enter_jit(code=code, p=p, j=j, i=i,
total=total)
else:
j += 1
i = j
p += 1
return total
res = self.meta_interp(interpret, [1])
assert res == interpret(1)
# XXX it's unsure how many loops should be there
self.check_trace_count(2)
def test_path_with_operations_not_from_start(self):
jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z'])
def f(n):
k = 0
z = 0
while n > 0:
jitdriver.can_enter_jit(n=n, k=k, z=z)
jitdriver.jit_merge_point(n=n, k=k, z=z)
k += 1
if k == 30:
if z == 0 or z == 1:
k = 4
z += 1
else:
k = 15
z = 0
n -= 1
return 42
res = self.meta_interp(f, [200])
def test_path_with_operations_not_from_start_2(self):
jitdriver = JitDriver(greens = ['k'], reds = ['n', 'z', 'stuff'])
class Stuff(object):
def __init__(self, n):
self.n = n
def some_fn(stuff, k, z):
jitdriver.can_enter_jit(n=stuff.n, k=k, z=z, stuff=stuff)
def f(n):
k = 0
z = 0
stuff = Stuff(0)
while n > 0:
jitdriver.jit_merge_point(n=n, k=k, z=z, stuff=stuff)
k += 1
if k == 30:
if z == 0 or z == 1:
k = 4
z += 1
else:
k = 15
z = 0
n -= 1
some_fn(Stuff(n), k, z)
return 0
res = self.meta_interp(f, [200])
def test_regular_pointers_in_short_preamble(self):
from rpython.rtyper.lltypesystem import lltype
BASE = lltype.GcStruct('BASE')
A = lltype.GcStruct('A', ('parent', BASE), ('val', lltype.Signed))
B = lltype.GcStruct('B', ('parent', BASE), ('charval', lltype.Char))
myjitdriver = JitDriver(greens = [], reds = ['n', 'm', 'i', 'j', 'sa', 'p'])
def f(n, m, j):
i = sa = 0
pa = lltype.malloc(A)
pa.val = 7
p = pa.parent
while i < n:
myjitdriver.jit_merge_point(n=n, m=m, i=i, j=j, sa=sa, p=p)
if i < m:
pa = lltype.cast_pointer(lltype.Ptr(A), p)
sa += pa.val
elif i == m:
pb = lltype.malloc(B)
pb.charval = 'y'
p = pb.parent
else:
pb = lltype.cast_pointer(lltype.Ptr(B), p)
sa += ord(pb.charval)
sa += 100
assert n>0 and m>0
i += j
return sa
# This is detected as invalid by the codewriter, for now
py.test.raises(NotImplementedError, self.meta_interp, f, [20, 10, 1])
def test_unerased_pointers_in_short_preamble(self):
from rpython.rlib.rerased import new_erasing_pair
from rpython.rtyper.lltypesystem import lltype
class A(object):
def __init__(self, val):
self.val = val
erase_A, unerase_A = new_erasing_pair('A')
erase_TP, unerase_TP = new_erasing_pair('TP')
TP = lltype.GcArray(lltype.Signed)
myjitdriver = JitDriver(greens = [], reds = ['n', 'm', 'i', 'j', 'sa', 'p'])
def f(n, m, j):
i = sa = 0
p = erase_A(A(7))
while i < n:
myjitdriver.jit_merge_point(n=n, m=m, i=i, j=j, sa=sa, p=p)
if i < m:
sa += unerase_A(p).val
elif i == m:
a = lltype.malloc(TP, 5)
a[0] = 42
p = erase_TP(a)
else:
sa += unerase_TP(p)[0]
sa += A(i).val
assert n>0 and m>0
i += j
return sa
res = self.meta_interp(f, [20, 10, 1])
assert res == f(20, 10, 1)
def test_boxed_unerased_pointers_in_short_preamble(self):
from rpython.rlib.rerased import new_erasing_pair
from rpython.rtyper.lltypesystem import lltype
class A(object):
def __init__(self, val):
self.val = val
def tst(self):
return self.val
class Box(object):
def __init__(self, val):
self.val = val
erase_A, unerase_A = new_erasing_pair('A')
erase_TP, unerase_TP = new_erasing_pair('TP')
TP = lltype.GcArray(lltype.Signed)
myjitdriver = JitDriver(greens = [], reds = ['n', 'm', 'i', 'sa', 'p'])
def f(n, m):
i = sa = 0
p = Box(erase_A(A(7)))
while i < n:
myjitdriver.jit_merge_point(n=n, m=m, i=i, sa=sa, p=p)
if i < m:
sa += unerase_A(p.val).tst()
elif i == m:
a = lltype.malloc(TP, 5)
a[0] = 42
p = Box(erase_TP(a))
else:
sa += unerase_TP(p.val)[0]
sa -= A(i).val
i += 1
return sa
res = self.meta_interp(f, [20, 10])
assert res == f(20, 10)
def test_unroll_issue_1(self):
class A(object):
_attrs_ = []
def checkcls(self):
raise NotImplementedError
class B(A):
def __init__(self, b_value):
self.b_value = b_value
def get_value(self):
return self.b_value
def checkcls(self):
return self.b_value
@dont_look_inside
def check(a):
return isinstance(a, B)
jitdriver = JitDriver(greens=[], reds='auto')
def f(a, xx):
i = 0
total = 0
while i < 10:
jitdriver.jit_merge_point()
if check(a):
if xx & 1:
total *= a.checkcls()
total += a.get_value()
i += 1
return total
def run(n):
bt = f(B(n), 1)
bt = f(B(n), 2)
at = f(A(), 3)
return at * 100000 + bt
assert run(42) == 420
res = self.meta_interp(run, [42], backendopt=True)
assert res == 420
def test_unroll_issue_2(self):
py.test.skip("decide")
class B(object):
def __init__(self, b_value):
self.b_value = b_value
class C(object):
pass
from rpython.rlib.rerased import new_erasing_pair
b_erase, b_unerase = new_erasing_pair("B")
c_erase, c_unerase = new_erasing_pair("C")
@elidable
def unpack_b(a):
return b_unerase(a)
jitdriver = JitDriver(greens=[], reds='auto')
def f(a, flag):
i = 0
total = 0
while i < 10:
jitdriver.jit_merge_point()
if flag:
total += unpack_b(a).b_value
flag += 1
i += 1
return total
def run(n):
res = f(b_erase(B(n)), 1)
f(c_erase(C()), 0)
return res
assert run(42) == 420
res = self.meta_interp(run, [42], backendopt=True)
assert res == 420
def test_unroll_issue_3(self):
py.test.skip("decide")
from rpython.rlib.rerased import new_erasing_pair
b_erase, b_unerase = new_erasing_pair("B") # list of ints
c_erase, c_unerase = new_erasing_pair("C") # list of Nones
@elidable
def unpack_b(a):
return b_unerase(a)
jitdriver = JitDriver(greens=[], reds='auto')
def f(a, flag):
i = 0
total = 0
while i < 10:
jitdriver.jit_merge_point()
if flag:
total += unpack_b(a)[0]
flag += 1
i += 1
return total
def run(n):
res = f(b_erase([n]), 1)
f(c_erase([None]), 0)
return res
assert run(42) == 420
res = self.meta_interp(run, [42], backendopt=True)
assert res == 420
def test_not_too_many_bridges(self):
jitdriver = JitDriver(greens = [], reds = 'auto')
def f(i):
s = 0
while i > 0:
jitdriver.jit_merge_point()
if i % 2 == 0:
s += 1
elif i % 3 == 0:
s += 1
elif i % 5 == 0:
s += 1
elif i % 7 == 0:
s += 1
i -= 1
return s
self.meta_interp(f, [30])
self.check_trace_count(3)
def test_sharing_guards(self):
py.test.skip("unimplemented")
driver = JitDriver(greens = [], reds = 'auto')
def f(i):
s = 0
while i > 0:
driver.jit_merge_point()
if s > 100:
raise Exception
if s > 9:
s += 1 # bridge
s += 1
i -= 1
self.meta_interp(f, [15])
# one guard_false got removed
self.check_resops(guard_false=4, guard_true=5)
class TestLLtype(LoopTest, LLJitMixin):
pass
|
|
import numpy
from einops import rearrange, parse_shape, reduce
from tests import collect_test_backends
from tests.test_ops import imp_op_backends
def test_rearrange_examples():
def test1(x):
# transpose
y = rearrange(x, 'b c h w -> b h w c')
assert y.shape == (10, 30, 40, 20)
return y
def test2(x):
# view / reshape
y = rearrange(x, 'b c h w -> b (c h w)')
assert y.shape == (10, 20 * 30 * 40)
return y
def test3(x):
# depth-to-space
y = rearrange(x, 'b (c h1 w1) h w -> b c (h h1) (w w1)', h1=2, w1=2)
assert y.shape == (10, 5, 30 * 2, 40 * 2)
return y
def test4(x):
# space-to-depth
y = rearrange(x, 'b c (h h1) (w w1) -> b (h1 w1 c) h w', h1=2, w1=2)
assert y.shape == (10, 20 * 4, 30 // 2, 40 // 2)
return y
def test5(x):
# simple transposition
y = rearrange(x, 'b1 sound b2 letter -> b1 b2 sound letter')
assert y.shape == (10, 30, 20, 40)
return y
def test6(x):
# parsing parameters
t = rearrange(x, 'b c h w -> (b h w) c')
t = t[:, ::2] # replacement for dot-product, just changes size of second axis
assert t.shape == (10 * 30 * 40, 10)
y = rearrange(t, '(b h w) c2 -> b c2 h w', **parse_shape(x, 'b _ h w'))
assert y.shape == (10, 10, 30, 40)
return y
def test7(x):
# split of embedding into groups
y1, y2 = rearrange(x, 'b (c g) h w -> g b c h w', g=2)
assert y1.shape == (10, 10, 30, 40)
assert y2.shape == (10, 10, 30, 40)
return y1 + y2 # only one tensor is expected in output
def test8(x):
# max-pooling
y = reduce(x, 'b c (h h1) (w w1) -> b c h w', reduction='max', h1=2, w1=2)
assert y.shape == (10, 20, 30 // 2, 40 // 2)
return y
def test9(x):
# squeeze - unsqueeze
y = reduce(x, 'b c h w -> b c () ()', reduction='max')
assert y.shape == (10, 20, 1, 1)
y = rearrange(y, 'b c () () -> c b')
assert y.shape == (20, 10)
return y
def test10(x):
# stack
tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
tensors = rearrange(tensors, 'b c h w -> b h w c')
assert tensors.shape == (10, 30, 40, 20)
return tensors
def test11(x):
# concatenate
tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
tensors = rearrange(tensors, 'b c h w -> h (b w) c')
assert tensors.shape == (30, 10 * 40, 20)
return tensors
def shufflenet(x, convolve, c1, c2):
# shufflenet reordering example
x = convolve(x)
x = rearrange(x, 'b (c1 c2) h w-> b (c2 c1) h w', c1=c1, c2=c2)
x = convolve(x)
return x
def convolve_strided_1d(x, stride, usual_convolution):
x = rearrange(x, 'b c t1 t2 -> b c (t1 t2)') # reduce dimensionality
x = rearrange(x, 'b c (t stride) -> (stride b) c t', stride=stride)
x = usual_convolution(x)
x = rearrange(x, '(stride b) c t -> b c (t stride)', stride=stride)
return x
def convolve_strided_2d(x, h_stride, w_stride, usual_convolution):
x = rearrange(x, 'b c (h hs) (w ws) -> (hs ws b) c h w', hs=h_stride, ws=w_stride)
x = usual_convolution(x)
x = rearrange(x, '(hs ws b) c h w -> b c (h hs) (w ws)', hs=h_stride, ws=w_stride)
return x
def unet_like_1d(x, usual_convolution):
# u-net like steps for increasing / reducing dimensionality
x = rearrange(x, 'b c t1 t2 -> b c (t1 t2)') # reduce dimensionality
y = rearrange(x, 'b c (t dt) -> b (dt c) t', dt=2)
y = usual_convolution(y)
x = x + rearrange(y, 'b (dt c) t -> b c (t dt)', dt=2)
return x
# mock for convolution (works for all backends)
convolve_mock = lambda x: x
tests = [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11,
lambda x: shufflenet(x, convolve=convolve_mock, c1=4, c2=5),
lambda x: convolve_strided_1d(x, stride=2, usual_convolution=convolve_mock),
lambda x: convolve_strided_2d(x, h_stride=2, w_stride=2, usual_convolution=convolve_mock),
lambda x: unet_like_1d(x, usual_convolution=convolve_mock),
]
for backend in imp_op_backends:
print('testing source_examples for ', backend.framework_name)
for test in tests:
x = numpy.arange(10 * 20 * 30 * 40).reshape([10, 20, 30, 40])
result1 = test(x)
result2 = backend.to_numpy(test(backend.from_numpy(x)))
assert numpy.array_equal(result1, result2)
# now with strides
x = numpy.arange(10 * 2 * 20 * 3 * 30 * 1 * 40).reshape([10 * 2, 20 * 3, 30 * 1, 40 * 1])
# known torch bug - torch doesn't support negative steps
last_step = -1 if backend.framework_name != 'torch' else 1
indexing_expression = numpy.index_exp[::2, ::3, ::1, ::last_step]
result1 = test(x[indexing_expression])
result2 = backend.to_numpy(test(backend.from_numpy(x)[indexing_expression]))
assert numpy.array_equal(result1, result2)
def tensor_train_example_numpy():
# kept here just for a collection, only tested for numpy
# https://arxiv.org/pdf/1509.06569.pdf, (5)
x = numpy.ones([3, 4, 5, 6])
rank = 4
if numpy.__version__ < '1.15.0':
# numpy.einsum fails here, skip test
return
# creating appropriate Gs
Gs = [numpy.ones([d, d, rank, rank]) for d in x.shape]
Gs[0] = Gs[0][:, :, :1, :]
Gs[-1] = Gs[-1][:, :, :, :1]
# einsum way
y = x.reshape((1,) + x.shape)
for G in Gs:
# taking partial results left-to-right
# y = numpy.einsum('i j alpha beta, alpha i ... -> beta ... j', G, y)
y = numpy.einsum('i j a b, a i ... -> b ... j', G, y)
y1 = y.reshape(-1)
# alternative way
y = x.reshape(-1)
for G in Gs:
i, j, alpha, beta = G.shape
y = rearrange(y, '(i rest alpha) -> rest (alpha i)', alpha=alpha, i=i)
y = y @ rearrange(G, 'i j alpha beta -> (alpha i) (j beta)')
y = rearrange(y, 'rest (beta j) -> (beta rest j)', beta=beta, j=j)
y2 = y
assert numpy.allclose(y1, y2)
# yet another way
y = x
for G in Gs:
i, j, alpha, beta = G.shape
y = rearrange(y, 'i ... (j alpha) -> ... j (alpha i)', alpha=alpha, i=i)
y = y @ rearrange(G, 'i j alpha beta -> (alpha i) (j beta)')
y3 = y.reshape(-1)
assert numpy.allclose(y1, y3)
def test_pytorch_yolo_fragment():
if not any(b.framework_name == 'torch' for b in collect_test_backends(symbolic=False, layers=False)):
return
import torch
def old_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
# https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L28-L44
bs = input.size(0)
in_h = input.size(2)
in_w = input.size(3)
scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in anchors]
prediction = input.view(bs, num_anchors,
5 + num_classes, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L70-L92
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
# Calculate offsets for each grid
grid_x = torch.linspace(0, in_w - 1, in_w).repeat(in_w, 1).repeat(
bs * num_anchors, 1, 1).view(x.shape).type(FloatTensor)
grid_y = torch.linspace(0, in_h - 1, in_h).repeat(in_h, 1).t().repeat(
bs * num_anchors, 1, 1).view(y.shape).type(FloatTensor)
# Calculate anchor w, h
anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)
anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + grid_x
pred_boxes[..., 1] = y.data + grid_y
pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
# Results
_scale = torch.Tensor([stride_w, stride_h] * 2).type(FloatTensor)
output = torch.cat((pred_boxes.view(bs, -1, 4) * _scale,
conf.view(bs, -1, 1), pred_cls.view(bs, -1, num_classes)), -1)
return output
def new_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
raw_predictions = rearrange(input, ' b (anchor prediction) h w -> prediction b anchor h w', anchor=num_anchors)
anchors = torch.FloatTensor(anchors).to(input.device)
anchor_sizes = rearrange(anchors, 'anchor dim -> dim () anchor () ()')
_, _, _, in_h, in_w = raw_predictions.shape
grid_h = rearrange(torch.arange(in_h).float(), 'h -> () () h ()').to(input.device)
grid_w = rearrange(torch.arange(in_w).float(), 'w -> () () () w').to(input.device)
predicted_bboxes = torch.zeros_like(raw_predictions)
predicted_bboxes[0] = (raw_predictions[0].sigmoid() + grid_h) * stride_h # center y
predicted_bboxes[1] = (raw_predictions[1].sigmoid() + grid_w) * stride_w # center x
predicted_bboxes[2:4] = (raw_predictions[2:4].exp()) * anchor_sizes # bbox width and height
predicted_bboxes[4] = raw_predictions[4].sigmoid() # confidence
predicted_bboxes[5:] = raw_predictions[5:].sigmoid() # class predictions
# only to match results of original code, not needed
return rearrange(predicted_bboxes, 'prediction b anchor h w -> b anchor h w prediction')
stride_h = 4
stride_w = 4
batch_size = 5
num_classes = 12
anchors = [[50, 100], [100, 50], [75, 75]]
num_anchors = len(anchors)
input = torch.randn([batch_size, num_anchors * (5 + num_classes), 1, 1])
result1 = old_way(input=input, num_anchors=num_anchors, num_classes=num_classes,
stride_h=stride_h, stride_w=stride_w, anchors=anchors)
result2 = new_way(input=input, num_anchors=num_anchors, num_classes=num_classes,
stride_h=stride_h, stride_w=stride_w, anchors=anchors)
result1 = result1.reshape(result2.shape)
assert torch.allclose(result1, result2)
|
|
#!/usr/bin/env python
# -*- encoding: utf8 -*-
"""
Central interfaces for ``Pyadjoint``.
:copyright:
Lion Krischer ([email protected]), 2015
:license:
BSD 3-Clause ("BSD New" or "BSD Simplified")
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import matplotlib.pylab as plt
import numpy as np
import obspy
import os
import pkgutil
import warnings
from . import PyadjointError, PyadjointWarning
class AdjointSource(object):
# Dictionary of available adjoint source. The key is the name, the value
# a tuple of function, verbose name, and description.
_ad_srcs = {}
def __init__(self, adj_src_type, misfit, dt, min_period, max_period,
component, adjoint_source=None, network=None, station=None):
"""
Class representing an already calculated adjoint source.
:param adj_src_type: The type of adjoint source.
:type adj_src_type: str
:param misfit: The misfit value.
:type misfit: float
:param dt: The sampling rate of the adjoint source.
:type dt: float
:param min_period: The minimum period of the spectral content
of the data.
:type min_period: float
:param max_period: The maximum period of the spectral content
of the data.
:type max_period: float
:param component: The adjoint source component, usually ``"Z"``,
``"N"``, ``"E"``, ``"R"``, or ``"T"``.
:type component: str
:param adjoint_source: The actual adjoint source.
:type adjoint_source: :class:`numpy.ndarray`
:param network: The network code of the station.
:type network: str
:param station: The station code of the station.
:type station: str
"""
if adj_src_type not in self._ad_srcs:
raise ValueError("Unknown adjoint source type '%s'." %
adj_src_type)
self.adj_src_type = adj_src_type
self.adj_src_name = self._ad_srcs[adj_src_type][1]
self.misfit = misfit
self.dt = dt
self.min_period = min_period
self.max_period = max_period
self.component = component
self.network = network
self.station = station
self.adjoint_source = adjoint_source
def __str__(self):
if self.network and self.station:
station = " at station %s.%s" % (self.network, self.station)
else:
station = ""
if self.adjoint_source is not None:
adj_src_status = "available with %i samples" % (len(
self.adjoint_source))
else:
adj_src_status = "has not been calculated"
return (
"{name} Adjoint Source for component {component}{station}\n"
" Misfit: {misfit:.4g}\n"
" Adjoint source {adj_src_status}"
).format(
name=self.adj_src_name,
component=self.component,
station=station,
misfit=self.misfit,
adj_src_status=adj_src_status
)
def write(self, filename, format, **kwargs):
"""
Write the adjoint source to a file.
:param filename: Determines where the adjoint source is saved.
:type filename: str, open file, or file-like object
:param format: The format of the adjoint source. Currently available
are: ``"SPECFEM"``
:type format: str
.. rubric:: SPECFEM
SPECFEM requires one additional parameter: the temporal offset of the
first sample in seconds. The following example sets the time of the
first sample in the adjoint source to ``-10``.
>>> adj_src.write("NET.STA.CHAN.adj", format="SPECFEM",
... time_offset=-10) # doctest: +SKIP
"""
if self.adjoint_source is None:
raise ValueError("Can only write adjoint sources if the adjoint "
"source has been calculated.")
format = format.upper()
available_formats = ["SPECFEM"]
if format not in available_formats:
raise ValueError("format '%s' not known. Available formats: %s" %
(format, ", ".join(available_formats)))
if not hasattr(filename, "write"):
with open(filename, "wb") as fh:
self._write(fh, format=format, **kwargs)
else:
self._write(filename, format=format, **kwargs)
def _write(self, buf, format, **kwargs):
if format == "SPECFEM":
self._write_specfem(buf=buf, time_offset=kwargs["time_offset"])
else:
raise NotImplementedError
def _write_specfem(self, buf, time_offset):
"""
Write the adjoint source for SPECFEM.
"""
l = len(self.adjoint_source)
to_write = np.empty((l, 2))
to_write[:, 0] = np.linspace(0, (l - 1) * self.dt, l)
to_write[:, 0] += time_offset
# SPECFEM expects non-time reversed adjoint sources.
to_write[:, 1] += self.adjoint_source[::-1]
np.savetxt(buf, to_write)
def write_to_asdf(self, ds, time_offset, coordinates=None, **kwargs):
"""
Writes the adjoint source to an ASDF file.
Note: For now it is assumed SPECFEM will be using the adjoint source
:param ds: The ASDF data structure read in using pyasdf.
:type ds: str
:param time_offset: The temporal offset of the first sample in seconds.
This is required if using the adjoint source as input to SPECFEM.
:type time_offset: float
:param coordinates: If given, the coordinates of the adjoint source.
The 'latitude', 'longitude', and 'elevation_in_m' of the adjoint
source must be defined.
:type coordinates: list
.. rubric:: SPECFEM
SPECFEM requires one additional parameter: the temporal offset of the
first sample in seconds. The following example sets the time of the
first sample in the adjoint source to ``-10``.
>>> adj_src.write_to_asdf(ds, time_offset=-10,
... coordinates={'latitude':19.2,
... 'longitude':13.4,
... 'elevation_in_m':2.0})
"""
# Import here to not have a global dependency on pyasdf
from pyasdf.exceptions import NoStationXMLForStation
# Convert the adjoint source to SPECFEM format
l = len(self.adjoint_source)
specfem_adj_source = np.empty((l, 2))
specfem_adj_source[:, 0] = np.linspace(0, (l - 1) * self.dt, l)
specfem_adj_source[:, 1] = time_offset
specfem_adj_source[:, 1] = self.adjoint_source[::-1]
tag = "%s_%s_%s" % (self.network, self.station, self.component)
min_period = self.min_period
max_period = self.max_period
component = self.component
station_id = "%s.%s" % (self.network, self.station)
if coordinates:
# If given, all three coordinates must be present
if {"latitude", "longitude", "elevation_in_m"}.difference(
set(coordinates.keys())):
raise ValueError(
"'latitude', 'longitude', and 'elevation_in_m'"
" must be given")
else:
try:
coordinates = ds.waveforms[
"%s.%s" % (self.network, self.station)].coordinates
except NoStationXMLForStation:
raise ValueError("Coordinates must either be given "
"directly or already be part of the "
"ASDF file")
# Safeguard against funny types in the coordinates dictionary
latitude = float(coordinates["latitude"])
longitude = float(coordinates["longitude"])
elevation_in_m = float(coordinates["elevation_in_m"])
parameters = {"dt": self.dt, "misfit_value": self.misfit,
"adjoint_source_type": self.adj_src_type,
"min_period": min_period, "max_period": max_period,
"latitude": latitude, "longitude": longitude,
"elevation_in_m": elevation_in_m,
"station_id": station_id, "component": component,
"units": "m"}
# Use pyasdf to add auxiliary data to the ASDF file
ds.add_auxiliary_data(data=specfem_adj_source,
data_type="AdjointSource", path=tag,
parameters=parameters)
def calculate_adjoint_source(adj_src_type, observed, synthetic, min_period,
max_period, left_window_border,
right_window_border, adjoint_src=True,
plot=False, plot_filename=None, **kwargs):
"""
Central function of Pyadjoint used to calculate adjoint sources and misfit.
This function uses the notion of observed and synthetic data to offer a
nomenclature most users are familiar with. Please note that it is
nonetheless independent of what the two data arrays actually represent.
The function tapers the data from ``left_window_border`` to
``right_window_border``, both in seconds since the first sample in the
data arrays.
:param adj_src_type: The type of adjoint source to calculate.
:type adj_src_type: str
:param observed: The observed data.
:type observed: :class:`obspy.core.trace.Trace`
:param synthetic: The synthetic data.
:type synthetic: :class:`obspy.core.trace.Trace`
:param min_period: The minimum period of the spectral content of the data.
:type min_period: float
:param max_period: The maximum period of the spectral content of the data.
:type max_period: float
:param left_window_border: Left border of the window to be tapered in
seconds since the first sample in the data arrays.
:type left_window_border: float
:param right_window_border: Right border of the window to be tapered in
seconds since the first sample in the data arrays.
:type right_window_border: float
:param adjoint_src: Only calculate the misfit or also derive
the adjoint source.
:type adjoint_src: bool
:param plot: Also produce a plot of the adjoint source. This will force
the adjoint source to be calculated regardless of the value of
``adjoint_src``.
:type plot: bool or empty :class:`matplotlib.figure.Figure` instance
:param plot_filename: If given, the plot of the adjoint source will be
saved there. Only used if ``plot`` is ``True``.
:type plot_filename: str
"""
observed, synthetic = _sanity_checks(observed, synthetic)
# Get number of samples now as the adjoint source calculation function
# are allowed to mess with the trace objects.
npts = observed.stats.npts
if adj_src_type not in AdjointSource._ad_srcs:
raise PyadjointError(
"Adjoint Source type '%s' is unknown. Available types: %s" % (
adj_src_type, ", ".join(
sorted(AdjointSource._ad_srcs.keys()))))
fct = AdjointSource._ad_srcs[adj_src_type][0]
if plot:
# The plot kwargs overwrites the adjoint_src kwarg.
adjoint_src = True
if plot is True:
figure = plt.figure(figsize=(12, 6))
else:
# Assume plot is a preexisting figure instance
figure = plot
else:
figure = None
try:
ret_val = fct(observed=observed, synthetic=synthetic,
min_period=min_period, max_period=max_period,
left_window_border=left_window_border,
right_window_border=right_window_border,
adjoint_src=adjoint_src, figure=figure, **kwargs)
if plot and plot_filename:
figure.savefig(plot_filename)
elif plot is True:
plt.show()
finally:
# Assure the figure is closed. Otherwise matplotlib will leak
# memory. If the figure has been created outside of Pyadjoint,
# it will not be closed.
if plot is True:
plt.close()
# Get misfit an warn for a negative one.
misfit = float(ret_val["misfit"])
if misfit < 0.0:
warnings.warn("The misfit value is negative. Be cautious!",
PyadjointWarning)
if adjoint_src and "adjoint_source" not in ret_val:
raise PyadjointError("The actual adjoint source was not calculated "
"by the underlying function although it was "
"requested.")
# Be very defensive. This assures future adjoint source types can be
# integrated smoothly.
if adjoint_src:
adjoint_source = ret_val["adjoint_source"]
# Raise if wrong type.
if not isinstance(adjoint_source, np.ndarray) or \
adjoint_source.dtype != np.float64:
raise PyadjointError("The adjoint source calculated by the "
"underlying function is no numpy array with "
"a `float64` dtype.")
if len(adjoint_source.shape) != 1:
raise PyadjointError(
"The underlying function returned at adjoint source with "
"shape %s. It must return a one-dimensional array." % str(
adjoint_source.shape))
if len(adjoint_source) != npts:
raise PyadjointError(
"The underlying function returned an adjoint source with %i "
"samples. It must return a function with %i samples which is "
"the sample count of the input data." % (
len(adjoint_source), npts))
# Make sure the data returned has no infs or NaNs.
if not np.isfinite(adjoint_source).all():
raise PyadjointError(
"The underlying function returned an adjoint source with "
"either NaNs or Inf values. This must not be.")
else:
adjoint_source = None
return AdjointSource(adj_src_type, misfit=misfit,
adjoint_source=adjoint_source,
dt=observed.stats.delta,
min_period=min_period, max_period=max_period,
network=observed.stats.network,
station=observed.stats.station,
component=observed.stats.channel[-1])
def _sanity_checks(observed, synthetic):
"""
Perform a number of basic sanity checks to assure the data is valid
in a certain sense.
It checks the types of both, the start time, sampling rate, number of
samples, ...
:param observed: The observed data.
:type observed: :class:`obspy.core.trace.Trace`
:param synthetic: The synthetic data.
:type synthetic: :class:`obspy.core.trace.Trace`
:raises: :class:`~pyadjoint.PyadjointError`
"""
if not isinstance(observed, obspy.Trace):
# Also accept Stream objects.
if isinstance(observed, obspy.Stream) and \
len(observed) == 1:
observed = observed[0]
else:
raise PyadjointError(
"Observed data must be an ObsPy Trace object.")
if not isinstance(synthetic, obspy.Trace):
if isinstance(synthetic, obspy.Stream) and \
len(synthetic) == 1:
synthetic = synthetic[0]
else:
raise PyadjointError(
"Synthetic data must be an ObsPy Trace object.")
if observed.stats.npts != synthetic.stats.npts:
raise PyadjointError("Observed and synthetic data must have the same "
"number of samples.")
sr1 = observed.stats.sampling_rate
sr2 = synthetic.stats.sampling_rate
if abs(sr1 - sr2) / sr1 >= 1E-5:
raise PyadjointError("Observed and synthetic data must have the same "
"sampling rate.")
# Make sure data and synthetics start within half a sample interval.
if abs(observed.stats.starttime - synthetic.stats.starttime) > \
observed.stats.delta * 0.5:
raise PyadjointError("Observed and synthetic data must have the same "
"starttime.")
ptp = sorted([observed.data.ptp(), synthetic.data.ptp()])
if ptp[1] / ptp[0] >= 5:
warnings.warn("The amplitude difference between data and "
"synthetic is fairly large.", PyadjointWarning)
# Also check the components of the data to avoid silly mistakes of
# users.
if len(set([observed.stats.channel[-1].upper(),
synthetic.stats.channel[-1].upper()])) != 1:
warnings.warn("The orientation code of synthetic and observed "
"data is not equal.")
observed = observed.copy()
synthetic = synthetic.copy()
observed.data = np.require(observed.data, dtype=np.float64,
requirements=["C"])
synthetic.data = np.require(synthetic.data, dtype=np.float64,
requirements=["C"])
return observed, synthetic
def _discover_adjoint_sources():
"""
Discovers the available adjoint sources. This should work no matter if
pyadjoint is checked out from git, packaged as .egg or for any other
possibility.
"""
from . import adjoint_source_types
AdjointSource._ad_srcs = {}
FCT_NAME = "calculate_adjoint_source"
NAME_ATTR = "VERBOSE_NAME"
DESC_ATTR = "DESCRIPTION"
ADD_ATTR = "ADDITIONAL_PARAMETERS"
path = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
"adjoint_source_types")
for importer, modname, _ in pkgutil.iter_modules(
[path], prefix=adjoint_source_types.__name__ + "."):
m = importer.find_module(modname).load_module(modname)
if not hasattr(m, FCT_NAME):
continue
fct = getattr(m, FCT_NAME)
if not callable(fct):
continue
name = modname.split('.')[-1]
if not hasattr(m, NAME_ATTR):
raise PyadjointError(
"Adjoint source '%s' does not have a variable named %s." %
(name, NAME_ATTR))
if not hasattr(m, DESC_ATTR):
raise PyadjointError(
"Adjoint source '%s' does not have a variable named %s." %
(name, DESC_ATTR))
# Add tuple of name, verbose name, and description.
AdjointSource._ad_srcs[name] = (
fct,
getattr(m, NAME_ATTR),
getattr(m, DESC_ATTR),
getattr(m, ADD_ATTR) if hasattr(m, ADD_ATTR) else None)
_discover_adjoint_sources()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod, ABCMeta
from pyspark import since, keyword_only
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol, \
HasFeaturesCol
from pyspark.ml.common import inherit_doc
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator',
'MulticlassClassificationEvaluator', 'ClusteringEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
.. versionadded:: 1.4.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
@since("1.4.0")
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@since("1.5.0")
def isLargerBetter(self):
"""
Indicates whether the metric returned by :py:meth:`evaluate` should be maximized
(True, default) or minimized (False).
A given evaluator may support multiple metrics which may be maximized or minimized.
"""
return True
@inherit_doc
class JavaEvaluator(JavaParams, Evaluator):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
def isLargerBetter(self):
self._transfer_params_to_java()
return self._java_obj.isLargerBetter()
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for binary classification, which expects two input columns: rawPrediction and label.
The rawPrediction column can be of type double (binary 0/1 prediction, or probability of label
1) or of type vector (length-2 vector of raw predictions, scores, or label probabilities).
>>> from pyspark.ml.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
>>> bce_path = temp_path + "/bce"
>>> evaluator.save(bce_path)
>>> evaluator2 = BinaryClassificationEvaluator.load(bce_path)
>>> str(evaluator2.getRawPredictionCol())
'raw'
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
self._setDefault(metricName="areaUnderROC")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
Sets params for binary classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Regression, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw")
>>> evaluator.evaluate(dataset)
2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
2.649...
>>> re_path = temp_path + "/re"
>>> evaluator.save(re_path)
>>> evaluator2 = RegressionEvaluator.load(re_path)
>>> str(evaluator2.getPredictionCol())
'raw'
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"""metric name in evaluation - one of:
rmse - root mean squared error (default)
mse - mean squared error
r2 - r^2 metric
mae - mean absolute error.""",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
self._setDefault(metricName="rmse")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.4.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
Sets params for regression evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Multiclass Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0),
... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
>>> mce_path = temp_path + "/mce"
>>> evaluator.save(mce_path)
>>> evaluator2 = MulticlassClassificationEvaluator.load(mce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 1.5.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(f1|weightedPrecision|weightedRecall|accuracy)",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
"""
super(MulticlassClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid)
self._setDefault(metricName="f1")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.5.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.5.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("1.5.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
Sets params for multiclass classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class ClusteringEvaluator(JavaEvaluator, HasPredictionCol, HasFeaturesCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Clustering results, which expects two input
columns: prediction and features. The metric computes the Silhouette
measure using the squared Euclidean distance.
The Silhouette is a measure for the validation of the consistency
within clusters. It ranges between 1 and -1, where a value close to
1 means that the points in a cluster are close to the other points
in the same cluster and far from the points of the other clusters.
>>> from pyspark.ml.linalg import Vectors
>>> featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
... [([0.0, 0.5], 0.0), ([0.5, 0.0], 0.0), ([10.0, 11.0], 1.0),
... ([10.5, 11.5], 1.0), ([1.0, 1.0], 0.0), ([8.0, 6.0], 1.0)])
>>> dataset = spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
...
>>> evaluator = ClusteringEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.9079...
>>> ce_path = temp_path + "/ce"
>>> evaluator.save(ce_path)
>>> evaluator2 = ClusteringEvaluator.load(ce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 2.3.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (silhouette)",
typeConverter=TypeConverters.toString)
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " +
"Supported options: 'squaredEuclidean' and 'cosine'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
__init__(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
"""
super(ClusteringEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.ClusteringEvaluator", self.uid)
self._setDefault(metricName="silhouette", distanceMeasure="squaredEuclidean")
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.3.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("2.3.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
@since("2.3.0")
def setParams(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
setParams(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
Sets params for clustering evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.evaluation
from pyspark.sql import SparkSession
globs = pyspark.ml.evaluation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.evaluation tests")\
.getOrCreate()
globs['spark'] = spark
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim
arg_scope = contrib_framework.arg_scope
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@contrib_framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@contrib_framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@contrib_framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@contrib_framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert output_filters % 2 == 0, (
'Need even number of filters when using this factorized reduction.')
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path2 = slim.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@contrib_framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
keep_prob = tf.cast(keep_prob, dtype=net.dtype)
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=net.dtype)
binary_tensor = tf.floor(random_tensor)
net = tf.div(net, keep_prob) * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
net_type = net.dtype
net = tf.cast(net, tf.float32) if net_type == tf.float16 else net
for layer_num in range(num_layers - 1):
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
net = tf.cast(net, net_type)
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object): # pylint: disable=g-classes-have-attributes
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
if curr_filter_shape != prev_filter_shape:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = factorized_reduction(prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
split_axis = get_channel_index()
net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
for split in net:
assert int(split.shape[split_axis] == int(
self._num_conv_filters * self._filter_scaling))
net.append(prev_layer)
return net
def __call__(self,
net,
scope=None,
filter_scaling=1,
stride=1,
prev_layer=None,
cell_num=-1):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i], self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i + 1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left, stride,
original_input_left)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right, stride,
original_input_right)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation, stride,
is_from_original_input):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size)
elif operation in ['none']:
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
elif 'pool' in operation:
net = _pooling(net, stride, operation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(net[idx], final_num_filters, stride)
states_to_combine = ([
h for h, is_used in zip(net, used_hiddenstates) if not is_used
])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@contrib_framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self,
net,
current_step=None,
use_summaries=True,
drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1) / float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if not current_step:
current_step = tf.cast(tf.train.get_or_create_global_step(),
tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = [
'separable_5x5_2', 'separable_3x3_2', 'separable_5x5_2',
'separable_3x3_2', 'avg_pool_3x3', 'none', 'avg_pool_3x3',
'avg_pool_3x3', 'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = [
'separable_5x5_2', 'separable_7x7_2', 'max_pool_3x3', 'separable_7x7_2',
'avg_pool_3x3', 'separable_5x5_2', 'none', 'avg_pool_3x3',
'separable_3x3_2', 'max_pool_3x3'
]
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps)
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sys
import re
import os
import time
import signal
from twisted.trial import unittest
from twisted.internet import task, defer, reactor
from twisted.python import runtime, util, log
from buildslave.test.util.misc import nl, BasedirMixin
from buildslave.test.util import compat
from buildslave.test.fake.slavebuilder import FakeSlaveBuilder
from buildslave.exceptions import AbandonChain
from buildslave import runprocess
def stdoutCommand(output):
return [sys.executable, '-c', 'import sys; sys.stdout.write("%s\\n")' % output]
def stderrCommand(output):
return [sys.executable, '-c', 'import sys; sys.stderr.write("%s\\n")' % output]
def sleepCommand(dur):
return [sys.executable, '-c', 'import time; time.sleep(%d)' % dur]
def scriptCommand(function, *args):
runprocess_scripts = util.sibpath(__file__, 'runprocess-scripts.py')
return [sys.executable, runprocess_scripts, function] + list(args)
# windows returns rc 1, because exit status cannot indicate "signalled";
# posix returns rc -1 for "signalled"
FATAL_RC = -1
if runtime.platformType == 'win32':
FATAL_RC = 1
# We would like to see debugging output in the test.log
runprocess.RunProcessPP.debug = True
class TestRunProcess(BasedirMixin, unittest.TestCase):
def setUp(self):
self.setUpBasedir()
def tearDown(self):
self.tearDownBasedir()
def testStart(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
d = s.start()
def check(ign):
self.failUnless({'stdout': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
def testNoStdout(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, sendStdout=False)
d = s.start()
def check(ign):
self.failIf({'stdout': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
def testKeepStdout(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, keepStdout=True)
d = s.start()
def check(ign):
self.failUnless({'stdout': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
self.failUnlessEquals(s.stdout, nl('hello\n'))
d.addCallback(check)
return d
def testStderr(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stderrCommand("hello"), self.basedir)
d = s.start()
def check(ign):
self.failIf({'stderr': nl('hello\n')} not in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
def testNoStderr(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stderrCommand("hello"), self.basedir, sendStderr=False)
d = s.start()
def check(ign):
self.failIf({'stderr': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
def testKeepStderr(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stderrCommand("hello"), self.basedir, keepStderr=True)
d = s.start()
def check(ign):
self.failUnless({'stderr': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
self.failUnlessEquals(s.stderr, nl('hello\n'))
d.addCallback(check)
return d
def testStringCommand(self):
b = FakeSlaveBuilder(False, self.basedir)
# careful! This command must execute the same on windows and UNIX
s = runprocess.RunProcess(b, 'echo hello', self.basedir)
d = s.start()
def check(ign):
self.failUnless({'stdout': nl('hello\n')} in b.updates, b.show())
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
def testCommandTimeout(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, sleepCommand(10), self.basedir, timeout=5)
clock = task.Clock()
s._reactor = clock
d = s.start()
def check(ign):
self.failUnless({'stdout': nl('hello\n')} not in b.updates, b.show())
self.failUnless({'rc': FATAL_RC} in b.updates, b.show())
d.addCallback(check)
clock.advance(6)
return d
def testCommandMaxTime(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, sleepCommand(10), self.basedir, maxTime=5)
clock = task.Clock()
s._reactor = clock
d = s.start()
def check(ign):
self.failUnless({'stdout': nl('hello\n')} not in b.updates, b.show())
self.failUnless({'rc': FATAL_RC} in b.updates, b.show())
d.addCallback(check)
clock.advance(6) # should knock out maxTime
return d
def test_stdin_closed(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b,
scriptCommand('assert_stdin_closed'),
self.basedir,
usePTY=False, # if usePTY=True, stdin is never closed
logEnviron=False)
d = s.start()
def check(ign):
self.failUnless({'rc': 0} in b.updates, b.show())
d.addCallback(check)
return d
if runtime.platformType != "posix":
test_stdin_closed.skip = "not a POSIX platform"
@compat.usesFlushLoggedErrors
def testBadCommand(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, ['command_that_doesnt_exist.exe'], self.basedir)
s.workdir = 1 # cause an exception
d = s.start()
def check(err):
err.trap(AbandonChain)
stderr = []
# Here we're checking that the exception starting up the command
# actually gets propogated back to the master.
for u in b.updates:
if 'stderr' in u:
stderr.append(u['stderr'])
stderr = "".join(stderr)
self.failUnless("TypeError" in stderr, stderr)
d.addBoth(check)
d.addBoth(lambda _ : self.flushLoggedErrors())
return d
def testLogEnviron(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir,
environ={"FOO": "BAR"})
d = s.start()
def check(ign):
headers = "".join([update.values()[0] for update in b.updates if update.keys() == ["header"] ])
self.failUnless("FOO=BAR" in headers, "got:\n" + headers)
d.addCallback(check)
return d
def testNoLogEnviron(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir,
environ={"FOO": "BAR"}, logEnviron=False)
d = s.start()
def check(ign):
headers = "".join([update.values()[0] for update in b.updates if update.keys() == ["header"] ])
self.failUnless("FOO=BAR" not in headers, "got:\n" + headers)
d.addCallback(check)
return d
def testEnvironExpandVar(self):
b = FakeSlaveBuilder(False, self.basedir)
environ = {"EXPND": "-${PATH}-",
"DOESNT_EXPAND": "-${---}-",
"DOESNT_FIND": "-${DOESNT_EXISTS}-"}
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, environ=environ)
d = s.start()
def check(ign):
headers = "".join([update.values()[0] for update in b.updates if update.keys() == ["header"] ])
self.failUnless("EXPND=-$" not in headers, "got:\n" + headers)
self.failUnless("DOESNT_FIND=--" in headers, "got:\n" + headers)
self.failUnless("DOESNT_EXPAND=-${---}-" in headers, "got:\n" + headers)
d.addCallback(check)
return d
def testUnsetEnvironVar(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir,
environ={"PATH":None})
d = s.start()
def check(ign):
headers = "".join([update.values()[0] for update in b.updates if update.keys() == ["header"] ])
self.failUnless(not re.match('\bPATH=',headers), "got:\n" + headers)
d.addCallback(check)
return d
class TestPOSIXKilling(BasedirMixin, unittest.TestCase):
if runtime.platformType != "posix":
skip = "not a POSIX platform"
def setUp(self):
self.pidfiles = []
self.setUpBasedir()
def tearDown(self):
# make sure all of the subprocesses are dead
for pidfile in self.pidfiles:
if not os.path.exists(pidfile): continue
pid = open(pidfile).read()
if not pid: return
pid = int(pid)
try: os.kill(pid, signal.SIGKILL)
except OSError: pass
# and clean up leftover pidfiles
for pidfile in self.pidfiles:
if os.path.exists(pidfile):
os.unlink(pidfile)
self.tearDownBasedir()
def newPidfile(self):
pidfile = os.path.abspath("test-%d.pid" % len(self.pidfiles))
if os.path.exists(pidfile):
os.unlink(pidfile)
self.pidfiles.append(pidfile)
return pidfile
def waitForPidfile(self, pidfile):
# wait for a pidfile, and return the pid via a Deferred
until = time.time() + 10
d = defer.Deferred()
def poll():
if reactor.seconds() > until:
d.errback(RuntimeError("pidfile %s never appeared" % pidfile))
return
if os.path.exists(pidfile):
try:
pid = int(open(pidfile).read())
except:
pid = None
if pid is not None:
d.callback(pid)
return
reactor.callLater(0.01, poll)
poll() # poll right away
return d
def assertAlive(self, pid):
try:
os.kill(pid, 0)
except OSError:
self.fail("pid %d still alive" % (pid,))
def assertDead(self, pid, timeout=5):
log.msg("checking pid %r" % (pid,))
def check():
try:
os.kill(pid, 0)
except OSError:
return True # dead
return False # alive
# check immediately
if check(): return
# poll every 100'th of a second; this allows us to test for
# processes that have been killed, but where the signal hasn't
# been delivered yet
until = time.time() + timeout
while time.time() < until:
time.sleep(0.01)
if check():
return
self.fail("pid %d still alive after %ds" % (pid, timeout))
# tests
def test_simple(self):
# test a simple process that just sleeps waiting to die
pidfile = self.newPidfile()
self.pid = None
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b,
scriptCommand('write_pidfile_and_sleep', pidfile),
self.basedir)
runproc_d = s.start()
pidfile_d = self.waitForPidfile(pidfile)
def check_alive(pid):
self.pid = pid # for use in check_dead
# test that the process is still alive
self.assertAlive(pid)
# and tell the RunProcess object to kill it
s.kill("diaf")
pidfile_d.addCallback(check_alive)
def check_dead(_):
self.assertDead(self.pid)
runproc_d.addCallback(check_dead)
return defer.gatherResults([pidfile_d, runproc_d])
def test_pgroup_usePTY(self):
return self.do_test_pgroup(usePTY=True)
def test_pgroup_no_usePTY(self):
return self.do_test_pgroup(usePTY=False)
def test_pgroup_no_usePTY_no_pgroup(self):
# note that this configuration is not *used*, but that it is
# still supported, and correctly fails to kill the child process
return self.do_test_pgroup(usePTY=False, useProcGroup=False,
expectChildSurvival=True)
def do_test_pgroup(self, usePTY, useProcGroup=True,
expectChildSurvival=False):
# test that a process group gets killed
parent_pidfile = self.newPidfile()
self.parent_pid = None
child_pidfile = self.newPidfile()
self.child_pid = None
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b,
scriptCommand('spawn_child', parent_pidfile, child_pidfile),
self.basedir,
usePTY=usePTY,
useProcGroup=useProcGroup)
runproc_d = s.start()
# wait for both processes to start up, then call s.kill
parent_pidfile_d = self.waitForPidfile(parent_pidfile)
child_pidfile_d = self.waitForPidfile(child_pidfile)
pidfiles_d = defer.gatherResults([parent_pidfile_d, child_pidfile_d])
def got_pids(pids):
self.parent_pid, self.child_pid = pids
pidfiles_d.addCallback(got_pids)
def kill(_):
s.kill("diaf")
pidfiles_d.addCallback(kill)
# check that both processes are dead after RunProcess is done
d = defer.gatherResults([pidfiles_d, runproc_d])
def check_dead(_):
self.assertDead(self.parent_pid)
if expectChildSurvival:
self.assertAlive(self.child_pid)
else:
self.assertDead(self.child_pid)
d.addCallback(check_dead)
return d
def test_double_fork_usePTY(self):
return self.do_test_double_fork(usePTY=True)
def test_double_fork_no_usePTY(self):
return self.do_test_double_fork(usePTY=False)
def test_double_fork_no_usePTY_no_pgroup(self):
# note that this configuration is not *used*, but that it is
# still supported, and correctly fails to kill the child process
return self.do_test_double_fork(usePTY=False, useProcGroup=False,
expectChildSurvival=True)
def do_test_double_fork(self, usePTY, useProcGroup=True,
expectChildSurvival=False):
# when a spawned process spawns another process, and then dies itself
# (either intentionally or accidentally), we should be able to clean up
# the child.
parent_pidfile = self.newPidfile()
self.parent_pid = None
child_pidfile = self.newPidfile()
self.child_pid = None
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b,
scriptCommand('double_fork', parent_pidfile, child_pidfile),
self.basedir,
usePTY=usePTY,
useProcGroup=useProcGroup)
runproc_d = s.start()
# wait for both processes to start up, then call s.kill
parent_pidfile_d = self.waitForPidfile(parent_pidfile)
child_pidfile_d = self.waitForPidfile(child_pidfile)
pidfiles_d = defer.gatherResults([parent_pidfile_d, child_pidfile_d])
def got_pids(pids):
self.parent_pid, self.child_pid = pids
pidfiles_d.addCallback(got_pids)
def kill(_):
s.kill("diaf")
pidfiles_d.addCallback(kill)
# check that both processes are dead after RunProcess is done
d = defer.gatherResults([pidfiles_d, runproc_d])
def check_dead(_):
self.assertDead(self.parent_pid)
if expectChildSurvival:
self.assertAlive(self.child_pid)
else:
self.assertDead(self.child_pid)
d.addCallback(check_dead)
return d
class TestLogging(BasedirMixin, unittest.TestCase):
def setUp(self):
self.setUpBasedir()
def tearDown(self):
self.tearDownBasedir()
def testSendStatus(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
s.sendStatus({'stdout': nl('hello\n')})
self.failUnlessEqual(b.updates, [{'stdout': nl('hello\n')}], b.show())
def testSendBuffered(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
s._addToBuffers('stdout', 'hello ')
s._addToBuffers('stdout', 'world')
s._sendBuffers()
self.failUnlessEqual(b.updates, [{'stdout': 'hello world'}], b.show())
def testSendBufferedInterleaved(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
s._addToBuffers('stdout', 'hello ')
s._addToBuffers('stderr', 'DIEEEEEEE')
s._addToBuffers('stdout', 'world')
s._sendBuffers()
self.failUnlessEqual(b.updates, [
{'stdout': 'hello '},
{'stderr': 'DIEEEEEEE'},
{'stdout': 'world'},
])
def testSendChunked(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
data = "x" * (runprocess.RunProcess.CHUNK_LIMIT * 3 / 2)
s._addToBuffers('stdout', data)
s._sendBuffers()
self.failUnlessEqual(len(b.updates), 2)
def testSendNotimeout(self):
b = FakeSlaveBuilder(False, self.basedir)
s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
data = "x" * (runprocess.RunProcess.BUFFER_SIZE + 1)
s._addToBuffers('stdout', data)
self.failUnlessEqual(len(b.updates), 1)
class TestLogFileWatcher(BasedirMixin, unittest.TestCase):
def setUp(self):
self.setUpBasedir()
def tearDown(self):
self.tearDownBasedir()
def makeRP(self):
b = FakeSlaveBuilder(False, self.basedir)
rp = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir)
return rp
def test_statFile_missing(self):
rp = self.makeRP()
if os.path.exists('statfile.log'):
os.remove('statfile.log')
lf = runprocess.LogFileWatcher(rp, 'test', 'statfile.log', False)
self.assertFalse(lf.statFile(), "statfile.log doesn't exist")
def test_statFile_exists(self):
rp = self.makeRP()
open('statfile.log', 'w').write('hi')
lf = runprocess.LogFileWatcher(rp, 'test', 'statfile.log', False)
st = lf.statFile()
self.assertEqual(st and st[2], 2, "statfile.log exists and size is correct")
os.remove('statfile.log')
|
|
# orm/strategies.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from sqlalchemy import exc as sa_exc
from sqlalchemy import sql, util, log, event
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql import visitors
from sqlalchemy.orm import attributes, interfaces, exc as orm_exc
from sqlalchemy.orm.mapper import _none_set
from sqlalchemy.orm.interfaces import (
LoaderStrategy, StrategizedOption, MapperOption, PropertyOption,
StrategizedProperty
)
from sqlalchemy.orm import session as sessionlib, unitofwork
from sqlalchemy.orm import util as mapperutil
from sqlalchemy.orm.query import Query
import itertools
def _register_attribute(strategy, mapper, useobject,
compare_function=None,
typecallable=None,
copy_function=None,
mutable_scalars=False,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, include_removes = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: mapperutil._validator_events(desc,
prop.key, fn, include_removes)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(desc,
backref,
uselist)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
mutable_scalars=mutable_scalars,
uselist=uselist,
copy_function=copy_function,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (prop.single_parent or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
def init(self):
self.columns = self.parent_property.columns
def setup_query(self, context, entity, path, reduced_path, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(self, context, path, reduced_path, mapper, row, adapter):
return None, None, None
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
def init(self):
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(self, context, entity, path, reduced_path,
adapter, column_collection, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key
_register_attribute(self, mapper, useobject=False,
compare_function=coltype.compare_values,
copy_function=coltype.copy_value,
mutable_scalars=self.columns[0].type.is_mutable(),
active_history = active_history
)
def create_row_processor(self, context, path, reduced_path,
mapper, row, adapter):
key = self.key
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
if col is not None and col in row:
def fetch_col(state, dict_, row):
dict_[key] = row[col]
return fetch_col, None, None
else:
def expire_for_non_present_col(state, dict_, row):
state.expire_attribute_pre_commit(dict_, key)
return expire_for_non_present_col, None, None
log.class_logger(ColumnLoader)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
def create_row_processor(self, context, path, reduced_path, mapper, row, adapter):
col = self.columns[0]
if adapter:
col = adapter.columns[col]
key = self.key
if col in row:
return self.parent_property._get_strategy(ColumnLoader).\
create_row_processor(
context, path, reduced_path, mapper, row, adapter)
elif not self.is_class_level:
def set_deferred_for_local_state(state, dict_, row):
state.set_callable(dict_, key, LoadDeferredColumns(state, key))
return set_deferred_for_local_state, None, None
else:
def reset_col_for_deferred(state, dict_, row):
# reset state on the key so that deferred callables
# fire off on next access.
state.reset(dict_, key)
return reset_col_for_deferred, None, None
def init(self):
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
copy_function=self.columns[0].type.copy_value,
mutable_scalars=self.columns[0].type.is_mutable(),
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(self, context, entity, path, reduced_path, adapter,
only_load_props=None, **kwargs):
if (
self.group is not None and
context.attributes.get(('undefer', self.group), False)
) or (only_load_props and self.key in only_load_props):
self.parent_property._get_strategy(ColumnLoader).\
setup_query(context, entity,
path, reduced_path, adapter, **kwargs)
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group==self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = sessionlib._state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(mapperutil.state_str(state), self.key)
)
query = session.query(localparent)
if query._load_on_ident(state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
log.class_logger(DeferredColumnLoader)
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, state, key):
self.state = state
self.key = key
def __call__(self, passive=attributes.PASSIVE_OFF):
state, key = self.state, self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class DeferredOption(StrategizedOption):
propagate_to_loaders = True
def __init__(self, key, defer=False):
super(DeferredOption, self).__init__(key)
self.defer = defer
def get_strategy_class(self):
if self.defer:
return DeferredColumnLoader
else:
return ColumnLoader
class UndeferGroupOption(MapperOption):
propagate_to_loaders = True
def __init__(self, group):
self.group = group
def process_query(self, query):
query._attributes[('undefer', self.group)] = True
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
def init(self):
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable = self.parent_property.collection_class,
)
def create_row_processor(self, context, path, reduced_path, mapper, row, adapter):
def invoke_no_load(state, dict_, row):
state.initialize(self.key)
return invoke_no_load, None, None
log.class_logger(NoLoader)
class LazyLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
def init(self):
super(LazyLoader, self).init()
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = self._create_lazy_clause(self.parent_property)
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = self._create_lazy_clause(
self.parent_property,
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#from sqlalchemy.orm import query
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in self._equated_columns.keys():
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads" % self)
def init_class_attribute(self, mapper):
self.is_class_level = True
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist = self.parent_property.uselist,
backref = self.parent_property.back_populates,
typecallable = self.parent_property.collection_class,
active_history = \
self.parent_property.active_history or \
self.parent_property.direction is not \
interfaces.MANYTOONE or \
not self.use_get,
)
def lazy_clause(self, state, reverse_direction=False,
alias_secondary=False,
adapt_source=None):
if state is None:
return self._lazy_none_clause(
reverse_direction,
adapt_source=adapt_source)
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col, \
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
if reverse_direction:
mapper = self.parent_property.mapper
else:
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
# use the "committed state" only if we're in a flush
# for this state.
sess = sessionlib._state_session(state)
if sess is not None and sess._flushing:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_committed_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
else:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
if self.parent_property.secondary is not None and alias_secondary:
criterion = sql_util.ClauseAdapter(
self.parent_property.secondary.alias()).\
traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam':visit_bindparam})
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col,\
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _load_for_state(self, state, passive):
if not state.key and \
(not self.parent_property.load_on_pending or not state.session_id):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(passive is attributes.PASSIVE_NO_FETCH or \
passive is attributes.PASSIVE_NO_FETCH_RELATED) and
not self.use_get
) or (
passive is attributes.PASSIVE_ONLY_PERSISTENT and
pending
):
return attributes.PASSIVE_NO_RESULT
session = sessionlib._state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(mapperutil.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = Query._get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif passive is attributes.PASSIVE_NO_FETCH or \
passive is attributes.PASSIVE_NO_FETCH_RELATED:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if session._flushing:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
if passive is attributes.PASSIVE_NO_FETCH_RELATED:
attr_passive = attributes.PASSIVE_OFF
else:
attr_passive = passive
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=attr_passive)
for pk in self.mapper.primary_key
]
def _emit_lazyload(self, session, state, ident_key):
q = session.query(self.mapper)._adapt_all_clauses()
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path + (self.key,))
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return q._load_on_ident(ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(EagerLazyOption((rev.key,), lazy='select'))
lazy_clause = self.lazy_clause(state)
if pending:
bind_values = sql_util.bind_values(lazy_clause)
if None in bind_values:
return None
q = q.filter(lazy_clause)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(self, context, path, reduced_path,
mapper, row, adapter):
key = self.key
if not self.is_class_level:
def set_lazy_callable(state, dict_, row):
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
state.set_callable(dict_, key, LoadLazyAttribute(state, key))
return set_lazy_callable, None, None
else:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state.reset(dict_, key)
return reset_for_lazy_callable, None, None
@classmethod
def _create_lazy_clause(cls, prop, reverse_direction=False):
binds = util.column_dict()
lookup = util.column_dict()
equated_columns = util.column_dict()
if reverse_direction and prop.secondaryjoin is None:
for l, r in prop.local_remote_pairs:
_list = lookup.setdefault(r, [])
_list.append((r, l))
equated_columns[l] = r
else:
for l, r in prop.local_remote_pairs:
_list = lookup.setdefault(l, [])
_list.append((l, r))
equated_columns[r] = l
def col_to_bind(col):
if col in lookup:
for tobind, equated in lookup[col]:
if equated in binds:
return None
if col not in binds:
binds[col] = sql.bindparam(None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = prop.primaryjoin
if prop.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if prop.secondaryjoin is not None:
secondaryjoin = prop.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
return lazywhere, bind_to_col, equated_columns
log.class_logger(LazyLoader)
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, state, key):
self.state = state
self.key = key
def __call__(self, passive=attributes.PASSIVE_OFF):
state, key = self.state, self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[LazyLoader]
return strategy._load_for_state(state, passive)
class ImmediateLoader(AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, reduced_path, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(self, context, path, reduced_path,
mapper, row, adapter):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
return None, None, load_immediate
class SubqueryLoader(AbstractRelationshipLoader):
def init(self):
super(SubqueryLoader, self).init()
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, reduced_path, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
path = path + (self.key, )
reduced_path = reduced_path + (self.key, )
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
subq_path = context.attributes.get(('subquery_path', None), ())
subq_path = subq_path + path
# join-depth / recursion check
if ("loaderstrategy", reduced_path) not in context.attributes:
if self.join_depth:
if len(path) / 2 > self.join_depth:
return
else:
if self.mapper.base_mapper in \
interfaces._reduce_path(subq_path):
return
subq_mapper, leftmost_mapper, leftmost_attr = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, subq_path
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(self.mapper)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None) : subq_path
}
q = q._enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(q, to_join, left_alias, parent_alias)
q = self._setup_options(q, subq_path, orig_query)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
context.attributes[('subquery', reduced_path)] = q
def _get_leftmost(self, subq_path):
subq_mapper = mapperutil._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and self.key==subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_mapper._props[subq_path[1]]
leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop)
leftmost_attr = [
leftmost_mapper._columntoproperty[c].class_attribute
for c in leftmost_cols
]
return subq_mapper, leftmost_mapper, leftmost_attr
def _generate_from_original_query(self,
orig_query, leftmost_mapper,
leftmost_attr, subq_path
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone()
# TODO: why does polymporphic etc. require hardcoding
# into _adapt_col_list ? Does query.add_columns(...) work
# with polymorphic loading ?
q._set_entities(q._adapt_col_list(leftmost_attr))
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = [
(subq_path[i], subq_path[i+1])
for i in xrange(0, len(subq_path), 2)
]
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif subq_path[-2].isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = mapperutil.AliasedClass(subq_path[-2])
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = mapperutil.AliasedClass(self.parent)
local_cols, remote_cols = \
self._local_remote_columns(self.parent_property)
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(self, q, to_join, left_alias, parent_alias):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
if first:
attr = getattr(left_alias, key)
else:
attr = key
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _local_remote_columns(self, prop):
if prop.secondary is None:
return zip(*prop.local_remote_pairs)
else:
return \
[p[0] for p in prop.synchronize_pairs],\
[
p[0] for p in prop.
secondary_synchronize_pairs
]
def _setup_options(self, q, subq_path, orig_query):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
def create_row_processor(self, context, path, reduced_path,
mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
reduced_path = reduced_path + (self.key,)
if ('subquery', reduced_path) not in context.attributes:
return None, None, None
local_cols, remote_cols = self._local_remote_columns(self.parent_property)
q = context.attributes[('subquery', reduced_path)]
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
if ('collections', reduced_path) in context.attributes:
collections = context.attributes[('collections', reduced_path)]
else:
collections = context.attributes[('collections', reduced_path)] = dict(
(k, [v[0] for v in v])
for k, v in itertools.groupby(
q,
lambda x:x[1:]
))
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
return self._create_collection_loader(collections, local_cols)
else:
return self._create_scalar_loader(collections, local_cols)
def _create_collection_loader(self, collections, local_cols):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
return load_collection_from_subq, None, None
def _create_scalar_loader(self, collections, local_cols):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
return load_scalar_from_subq, None, None
log.class_logger(SubqueryLoader)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
def init(self):
super(JoinedLoader, self).init()
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy(LazyLoader).init_class_attribute(mapper)
def setup_query(self, context, entity, path, reduced_path, adapter, \
column_collection=None, parentmapper=None,
allow_innerjoin=True,
**kwargs):
"""Add a left outer join to the statement thats being constructed."""
if not context.query._enable_eagerloads:
return
path = path + (self.key,)
reduced_path = reduced_path + (self.key,)
if ("user_defined_eager_row_processor", reduced_path) in\
context.attributes:
clauses, adapter, add_to_collection = \
self._get_user_defined_adapter(
context, entity, reduced_path, adapter
)
else:
# check for join_depth or basic recursion,
# if the current path was not explicitly stated as
# a desired "loaderstrategy" (i.e. via query.options())
if ("loaderstrategy", reduced_path) not in context.attributes:
if self.join_depth:
if len(path) / 2 > self.join_depth:
return
else:
if self.mapper.base_mapper in reduced_path:
return
clauses, adapter, add_to_collection, \
allow_innerjoin = self._generate_row_adapter(
context, entity, path, reduced_path, adapter,
column_collection, parentmapper, allow_innerjoin
)
path += (self.mapper,)
reduced_path += (self.mapper.base_mapper,)
for value in self.mapper._polymorphic_properties:
value.setup(
context,
entity,
path,
reduced_path,
clauses,
parentmapper=self.mapper,
column_collection=add_to_collection,
allow_innerjoin=allow_innerjoin)
def _get_user_defined_adapter(self, context, entity,
reduced_path, adapter):
clauses = context.attributes[
("user_defined_eager_row_processor",
reduced_path)]
adapter = entity._get_entity_clauses(context.query, context)
if adapter and clauses:
context.attributes[
("user_defined_eager_row_processor",
reduced_path)] = clauses = clauses.wrap(adapter)
elif adapter:
context.attributes[
("user_defined_eager_row_processor",
reduced_path)] = clauses = adapter
add_to_collection = context.primary_columns
return clauses, adapter, add_to_collection
def _generate_row_adapter(self,
context, entity, path, reduced_path, adapter,
column_collection, parentmapper, allow_innerjoin
):
clauses = mapperutil.ORMAdapter(
mapperutil.AliasedClass(self.mapper),
equivalents=self.mapper._equivalent_columns,
adapt_required=True)
if self.parent_property.direction != interfaces.MANYTOONE:
context.multi_row_eager_loaders = True
innerjoin = allow_innerjoin and context.attributes.get(
("eager_join_type", path),
self.parent_property.innerjoin)
if not innerjoin:
# if this is an outer join, all eager joins from
# here must also be outer joins
allow_innerjoin = False
context.create_eager_joins.append(
(self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin)
)
add_to_collection = context.secondary_columns
context.attributes[
("eager_row_processor", reduced_path)
] = clauses
return clauses, adapter, add_to_collection, allow_innerjoin
def _create_eager_join(self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = \
sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
join_to_left = False
if adapter:
if getattr(adapter, 'aliased_class', None):
onclause = getattr(
adapter.aliased_class, self.key,
self.parent_property)
else:
onclause = getattr(
mapperutil.AliasedClass(
self.parent,
adapter.selectable
),
self.key, self.parent_property
)
if onclause is self.parent_property:
# TODO: this is a temporary hack to
# account for polymorphic eager loads where
# the eagerload is referencing via of_type().
join_to_left = True
else:
onclause = self.parent_property
context.eager_joins[entity_key] = eagerjoin = \
mapperutil.join(
towrap,
clauses.aliased_class,
onclause,
join_to_left=join_to_left,
isouter=not innerjoin
)
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if self.parent_property.secondary is None and \
not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util.find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _create_eager_adapter(self, context, row, adapter, path, reduced_path):
if ("user_defined_eager_row_processor", reduced_path) in \
context.attributes:
decorator = context.attributes[
("user_defined_eager_row_processor",
reduced_path)]
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
elif ("eager_row_processor", reduced_path) in context.attributes:
decorator = context.attributes[
("eager_row_processor", reduced_path)]
else:
return False
try:
self.mapper.identity_key_from_row(row, decorator)
return decorator
except KeyError:
# no identity key - dont return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(self, context, path, reduced_path, mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
our_path = path + (self.key,)
our_reduced_path = reduced_path + (self.key,)
eager_adapter = self._create_eager_adapter(
context,
row,
adapter, our_path,
our_reduced_path)
if eager_adapter is not False:
key = self.key
_instance = self.mapper._instance_processor(
context,
our_path + (self.mapper,),
our_reduced_path + (self.mapper.base_mapper,),
eager_adapter)
if not self.uselist:
return self._create_scalar_loader(context, key, _instance)
else:
return self._create_collection_loader(context, key, _instance)
else:
return self.parent_property.\
_get_strategy(LazyLoader).\
create_row_processor(
context, path,
reduced_path,
mapper, row, adapter)
def _create_collection_loader(self, context, key, _instance):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(state,
dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_collection_from_joined_new_row, \
load_collection_from_joined_existing_row, \
None, load_collection_from_joined_exec
def _create_scalar_loader(self, context, key, _instance):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row, None)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row, None)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_scalar_from_joined_new_row, \
load_scalar_from_joined_existing_row, \
None, load_scalar_from_joined_exec
EagerLoader = JoinedLoader
"""Deprecated, use JoinedLoader"""
log.class_logger(JoinedLoader)
class EagerLazyOption(StrategizedOption):
def __init__(self, key, lazy=True, chained=False,
propagate_to_loaders=True
):
if isinstance(key[0], basestring) and key[0] == '*':
if len(key) != 1:
raise sa_exc.ArgumentError(
"Wildcard identifier '*' must "
"be specified alone.")
key = ("relationship:*",)
propagate_to_loaders = False
super(EagerLazyOption, self).__init__(key)
self.lazy = lazy
self.chained = self.lazy in (False, 'joined', 'subquery') and chained
self.propagate_to_loaders = propagate_to_loaders
self.strategy_cls = factory(lazy)
def get_strategy_class(self):
return self.strategy_cls
def factory(identifier):
if identifier is False or identifier == 'joined':
return JoinedLoader
elif identifier is None or identifier == 'noload':
return NoLoader
elif identifier is False or identifier == 'select':
return LazyLoader
elif identifier == 'subquery':
return SubqueryLoader
elif identifier == 'immediate':
return ImmediateLoader
else:
return LazyLoader
class EagerJoinOption(PropertyOption):
def __init__(self, key, innerjoin, chained=False):
super(EagerJoinOption, self).__init__(key)
self.innerjoin = innerjoin
self.chained = chained
def process_query_property(self, query, paths, mappers):
if self.chained:
for path in paths:
query._attributes[("eager_join_type", path)] = self.innerjoin
else:
query._attributes[("eager_join_type", paths[-1])] = self.innerjoin
class LoadEagerFromAliasOption(PropertyOption):
def __init__(self, key, alias=None, chained=False):
super(LoadEagerFromAliasOption, self).__init__(key)
if alias is not None:
if not isinstance(alias, basestring):
m, alias, is_aliased_class = mapperutil._entity_info(alias)
self.alias = alias
self.chained = chained
def process_query_property(self, query, paths, mappers):
if self.chained:
for path in paths[0:-1]:
(root_mapper, propname) = path[-2:]
prop = root_mapper._props[propname]
adapter = query._polymorphic_adapters.get(prop.mapper, None)
query._attributes.setdefault(
("user_defined_eager_row_processor",
interfaces._reduce_path(path)), adapter)
if self.alias is not None:
if isinstance(self.alias, basestring):
(root_mapper, propname) = paths[-1][-2:]
prop = root_mapper._props[propname]
self.alias = prop.target.alias(self.alias)
query._attributes[
("user_defined_eager_row_processor",
interfaces._reduce_path(paths[-1]))
] = sql_util.ColumnAdapter(self.alias)
else:
(root_mapper, propname) = paths[-1][-2:]
prop = root_mapper._props[propname]
adapter = query._polymorphic_adapters.get(prop.mapper, None)
query._attributes[
("user_defined_eager_row_processor",
interfaces._reduce_path(paths[-1]))] = adapter
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(mapperutil.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(desc, 'append', append, raw=True, retval=True, active_history=True)
event.listen(desc, 'set', set_, raw=True, retval=True, active_history=True)
|
|
import itertools
import unittest
import uuid
from mock import MagicMock
from hazelcast.protocol.codec import sql_execute_codec, sql_close_codec, sql_fetch_codec
from hazelcast.protocol.client_message import _OUTBOUND_MESSAGE_MESSAGE_TYPE_OFFSET
from hazelcast.serialization import LE_INT
from hazelcast.sql import (
SqlService,
SqlColumnMetadata,
SqlColumnType,
_SqlPage,
SqlRowMetadata,
_InternalSqlService,
HazelcastSqlError,
_SqlErrorCode,
_SqlError,
_SqlStatement,
SqlExpectedResultType,
)
from hazelcast.util import try_to_get_enum_value
EXPECTED_ROWS = ["result", "result2"]
EXPECTED_UPDATE_COUNT = 42
class SqlMockTest(unittest.TestCase):
def setUp(self):
self.connection = MagicMock()
connection_manager = MagicMock(client_uuid=uuid.uuid4())
connection_manager.get_random_connection_for_sql = MagicMock(return_value=self.connection)
serialization_service = MagicMock()
serialization_service.to_object.side_effect = lambda arg: arg
serialization_service.to_data.side_effect = lambda arg: arg
self.invocation_registry = {}
correlation_id_counter = itertools.count()
invocation_service = MagicMock()
def invoke(invocation):
self.invocation_registry[next(correlation_id_counter)] = invocation
invocation_service.invoke.side_effect = invoke
self.internal_service = _InternalSqlService(
connection_manager, serialization_service, invocation_service
)
self.service = SqlService(self.internal_service)
self.result = self.service.execute("SOME QUERY")
def test_iterator_with_rows(self):
self.set_execute_response_with_rows()
result = self.result.result()
self.assertEqual(-1, result.update_count())
self.assertTrue(result.is_row_set())
self.assertIsInstance(result.get_row_metadata(), SqlRowMetadata)
self.assertEqual(EXPECTED_ROWS, self.get_rows_from_iterator(result))
def test_blocking_iterator_with_rows(self):
self.set_execute_response_with_rows()
result = self.result.result()
self.assertEqual(-1, result.update_count())
self.assertTrue(result.is_row_set())
self.assertIsInstance(result.get_row_metadata(), SqlRowMetadata)
self.assertEqual(EXPECTED_ROWS, self.get_rows_from_blocking_iterator(result))
def test_iterator_with_update_count(self):
self.set_execute_response_with_update_count()
result = self.result.result()
self.assertEqual(EXPECTED_UPDATE_COUNT, result.update_count())
self.assertFalse(result.is_row_set())
with self.assertRaises(ValueError):
result.get_row_metadata()
with self.assertRaises(ValueError):
result.iterator()
def test_blocking_iterator_with_update_count(self):
self.set_execute_response_with_update_count()
result = self.result.result()
self.assertEqual(EXPECTED_UPDATE_COUNT, result.update_count())
self.assertFalse(result.is_row_set())
with self.assertRaises(ValueError):
result.get_row_metadata()
with self.assertRaises(ValueError):
for _ in result:
pass
def test_execute_error(self):
self.set_execute_error(RuntimeError("expected"))
with self.assertRaises(HazelcastSqlError) as cm:
result = self.result.result()
iter(result)
self.assertEqual(_SqlErrorCode.GENERIC, cm.exception._code)
def test_execute_error_when_connection_is_not_live(self):
self.connection.live = False
self.set_execute_error(RuntimeError("expected"))
with self.assertRaises(HazelcastSqlError) as cm:
result = self.result.result()
iter(result)
self.assertEqual(_SqlErrorCode.CONNECTION_PROBLEM, cm.exception._code)
def test_close_when_close_request_fails(self):
self.set_execute_response_with_rows(is_last=False)
result = self.result.result()
future = result.close()
self.set_close_error(HazelcastSqlError(None, _SqlErrorCode.PARSING, "expected", None))
with self.assertRaises(HazelcastSqlError) as cm:
future.result()
self.assertEqual(_SqlErrorCode.PARSING, cm.exception._code)
def test_fetch_error(self):
self.set_execute_response_with_rows(is_last=False)
result = self.result.result()
rows = []
i = result.iterator()
# First page contains two rows
rows.append(next(i).result().get_object_with_index(0))
rows.append(next(i).result().get_object_with_index(0))
self.assertEqual(EXPECTED_ROWS, rows)
# initiate the fetch request
future = next(i)
self.set_fetch_error(RuntimeError("expected"))
with self.assertRaises(HazelcastSqlError) as cm:
future.result()
self.assertEqual(_SqlErrorCode.GENERIC, cm.exception._code)
def test_fetch_server_error(self):
self.set_execute_response_with_rows(is_last=False)
result = self.result.result()
rows = []
i = result.iterator()
# First page contains two rows
rows.append(next(i).result().get_object_with_index(0))
rows.append(next(i).result().get_object_with_index(0))
self.assertEqual(EXPECTED_ROWS, rows)
# initiate the fetch request
future = next(i)
self.set_fetch_response_with_error()
with self.assertRaises(HazelcastSqlError) as cm:
future.result()
self.assertEqual(_SqlErrorCode.PARSING, cm.exception._code)
def test_close_in_between_fetches(self):
self.set_execute_response_with_rows(is_last=False)
result = self.result.result()
rows = []
i = result.iterator()
# First page contains two rows
rows.append(next(i).result().get_object_with_index(0))
rows.append(next(i).result().get_object_with_index(0))
self.assertEqual(EXPECTED_ROWS, rows)
# initiate the fetch request
future = next(i)
result.close()
with self.assertRaises(HazelcastSqlError) as cm:
future.result()
self.assertEqual(_SqlErrorCode.CANCELLED_BY_USER, cm.exception._code)
def set_fetch_response_with_error(self):
response = {
"row_page": None,
"error": _SqlError(_SqlErrorCode.PARSING, "expected", None, None, ""),
}
self.set_future_result_or_exception(response, sql_fetch_codec._REQUEST_MESSAGE_TYPE)
def set_fetch_error(self, error):
self.set_future_result_or_exception(error, sql_fetch_codec._REQUEST_MESSAGE_TYPE)
def set_close_error(self, error):
self.set_future_result_or_exception(error, sql_close_codec._REQUEST_MESSAGE_TYPE)
def set_close_response(self):
self.set_future_result_or_exception(None, sql_close_codec._REQUEST_MESSAGE_TYPE)
def set_execute_response_with_update_count(self):
self.set_execute_response(EXPECTED_UPDATE_COUNT, None, None, None)
@staticmethod
def get_rows_from_blocking_iterator(result):
return [row.get_object_with_index(0) for row in result]
@staticmethod
def get_rows_from_iterator(result):
rows = []
for row_future in result.iterator():
try:
row = row_future.result()
rows.append(row.get_object_with_index(0))
except StopIteration:
break
return rows
def set_execute_response_with_rows(self, is_last=True):
self.set_execute_response(
-1,
[SqlColumnMetadata("name", SqlColumnType.VARCHAR, True, True)],
_SqlPage([SqlColumnType.VARCHAR], [EXPECTED_ROWS], is_last),
None,
)
def set_execute_response(self, update_count, row_metadata, row_page, error):
response = {
"update_count": update_count,
"row_metadata": row_metadata,
"row_page": row_page,
"error": error,
}
self.set_future_result_or_exception(response, sql_execute_codec._REQUEST_MESSAGE_TYPE)
def set_execute_error(self, error):
self.set_future_result_or_exception(error, sql_execute_codec._REQUEST_MESSAGE_TYPE)
def get_message_type(self, invocation):
return LE_INT.unpack_from(invocation.request.buf, _OUTBOUND_MESSAGE_MESSAGE_TYPE_OFFSET)[0]
def set_future_result_or_exception(self, value, message_type):
for invocation in self.invocation_registry.values():
if self.get_message_type(invocation) == message_type:
if isinstance(value, Exception):
invocation.future.set_exception(value)
else:
invocation.future.set_result(value)
class SqlInvalidInputTest(unittest.TestCase):
def test_statement_sql(self):
valid_inputs = ["a", " a", " a "]
for valid in valid_inputs:
statement = _SqlStatement(valid, [])
self.assertEqual(valid, statement.sql)
invalid_inputs = ["", " ", None, 1]
for invalid in invalid_inputs:
with self.assertRaises((ValueError, AssertionError)):
_SqlStatement(invalid, [])
def test_statement_timeout(self):
valid_inputs = [-1, 0, 15, 1.5]
for valid in valid_inputs:
statement = _SqlStatement("sql", [])
statement.timeout = valid
self.assertEqual(valid, statement.timeout)
invalid_inputs = [-10, -100, "hey", None]
for invalid in invalid_inputs:
statement = _SqlStatement("sql", [])
with self.assertRaises((ValueError, AssertionError)):
statement.timeout = invalid
def test_statement_cursor_buffer_size(self):
valid_inputs = [1, 10, 999999]
for valid in valid_inputs:
statement = _SqlStatement("something", [])
statement.cursor_buffer_size = valid
self.assertEqual(valid, statement.cursor_buffer_size)
invalid_inputs = [0, -10, -99999, "hey", None, 1.0]
for invalid in invalid_inputs:
statement = _SqlStatement("something", [])
with self.assertRaises((ValueError, AssertionError)):
statement.cursor_buffer_size = invalid
def test_statement_expected_result_type(self):
valid_inputs = [
SqlExpectedResultType.ROWS,
SqlExpectedResultType.UPDATE_COUNT,
"ROWS",
"ANY",
]
for valid in valid_inputs:
statement = _SqlStatement("something", [])
statement.expected_result_type = valid
self.assertEqual(
try_to_get_enum_value(valid, SqlExpectedResultType), statement.expected_result_type
)
invalid_inputs = [None, 123, "hey"]
for invalid in invalid_inputs:
with self.assertRaises(TypeError):
statement = _SqlStatement("something")
statement.expected_result_type = invalid
def test_row_metadata_get_column(self):
row_metadata = self._create_row_metadata()
valid_inputs = [0, 1, 2]
for valid in valid_inputs:
column_metadata = row_metadata.get_column(valid)
self.assertEqual(str(valid), column_metadata.name)
invalid_inputs = [4, 5, "6", None]
for invalid in invalid_inputs:
with self.assertRaises((IndexError, AssertionError)):
row_metadata.get_column(invalid)
def test_row_metadata_find_column(self):
row_metadata = self._create_row_metadata()
valid_inputs = ["0", "1", "2", "-1"]
for valid in valid_inputs:
index = row_metadata.find_column(valid)
self.assertEqual(int(valid), index)
invalid_inputs = [6, None]
for invalid in invalid_inputs:
with self.assertRaises((IndexError, AssertionError)):
row_metadata.get_column(invalid)
@staticmethod
def _create_row_metadata():
return SqlRowMetadata(
[
SqlColumnMetadata("0", SqlColumnType.VARCHAR, True, True),
SqlColumnMetadata("1", SqlColumnType.TINYINT, True, True),
SqlColumnMetadata("2", SqlColumnType.OBJECT, True, True),
]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.